diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 2a895b44c0b..17b0b1fbb11 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -20,7 +20,7 @@ MASTER_SIZE=n1-standard-1 MINION_SIZE=n1-standard-1 NUM_MINIONS=4 # TODO(dchen1107): Filed an internal issue to create an alias -# for containervm image, so that gcloud/gcutil will expand this +# for containervm image, so that gcloud will expand this # to the latest supported image. IMAGE=container-vm-v20141016 IMAGE_PROJECT=google-containers @@ -31,7 +31,7 @@ MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) -MINION_SCOPES="storage-ro,compute-rw" +MINION_SCOPES=(storage-ro compute-rw) # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 PORTAL_NET="10.0.0.0/16" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 54dbcd8efa5..c2939d8f45b 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -20,7 +20,7 @@ MASTER_SIZE=g1-small MINION_SIZE=g1-small NUM_MINIONS=2 # TODO(dchen1107): Filed an internal issue to create an alias -# for containervm image, so that gcloud/gcutil will expand this +# for containervm image, so that gcloud will expand this # to the latest supported image. IMAGE=container-vm-v20141016 IMAGE_PROJECT=google-containers @@ -31,7 +31,7 @@ MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_IP_RANGES=($(eval echo "10.245.{1..${NUM_MINIONS}}.0/24")) -MINION_SCOPES="storage-ro,compute-rw" +MINION_SCOPES=("storage-ro" "compute-rw") # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 PORTAL_NET="10.0.0.0/16" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 8bcbec39bc1..2f0c3bba935 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -24,7 +24,7 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}" # Verify prereqs function verify-prereqs { local cmd - for cmd in gcloud gcutil gsutil; do + for cmd in gcloud gsutil; do which "${cmd}" >/dev/null || { echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud " echo "SDK can be downloaded from https://cloud.google.com/sdk/." @@ -143,10 +143,9 @@ function upload-server-tars() { function detect-minions () { KUBE_MINION_IP_ADDRESSES=() for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - # gcutil will print the "external-ip" column header even if no instances are found - local minion_ip=$(gcutil listinstances --format=csv --sort=external-ip \ - --columns=external-ip --zone ${ZONE} --filter="name eq ${MINION_NAMES[$i]}" \ - | tail -n '+2' | tail -n 1) + local minion_ip=$(gcloud compute instances describe --zone "${ZONE}" \ + "${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \ + --format=text | awk '{ print $2 }') if [[ -z "${minion_ip-}" ]] ; then echo "Did not find ${MINION_NAMES[$i]}" >&2 else @@ -171,10 +170,9 @@ function detect-minions () { function detect-master () { KUBE_MASTER=${MASTER_NAME} if [[ -z "${KUBE_MASTER_IP-}" ]]; then - # gcutil will print the "external-ip" column header even if no instances are found - KUBE_MASTER_IP=$(gcutil listinstances --format=csv --sort=external-ip \ - --columns=external-ip --zone ${ZONE} --filter="name eq ${MASTER_NAME}" \ - | tail -n '+2' | tail -n 1) + KUBE_MASTER_IP=$(gcloud compute instances describe --zone "${ZONE}" \ + "${MASTER_NAME}" --fields networkInterfaces[0].accessConfigs[0].natIP \ + --format=text | awk '{ print $2 }') fi if [[ -z "${KUBE_MASTER_IP-}" ]]; then echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2 @@ -245,41 +243,35 @@ function kube-up { local htpasswd htpasswd=$(cat "${KUBE_TEMP}/htpasswd") - if ! gcutil getnetwork "${NETWORK}" >/dev/null 2>&1; then - echo "Creating new network for: ${NETWORK}" + if ! gcloud compute networks describe "${NETWORK}" &>/dev/null; then + echo "Creating new network: ${NETWORK}" # The network needs to be created synchronously or we have a race. The # firewalls can be added concurrent with instance creation. - gcutil addnetwork "${NETWORK}" --range "10.240.0.0/16" + gcloud compute networks create "${NETWORK}" --range "10.240.0.0/16" fi - if ! gcutil getfirewall "${NETWORK}-default-internal" >/dev/null 2>&1; then - gcutil addfirewall "${NETWORK}-default-internal" \ + if ! gcloud compute firewall-rules describe "${NETWORK}-default-internal" &>/dev/null; then + gcloud compute firewall-rules create "${NETWORK}-default-internal" \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --network "${NETWORK}" \ - --allowed_ip_sources "10.0.0.0/8" \ - --allowed "tcp:1-65535,udp:1-65535,icmp" & + --source-ranges "10.0.0.0/8" \ + --allow "tcp:1-65535" "udp:1-65535" "icmp" & fi - if ! gcutil getfirewall "${NETWORK}-default-ssh" >/dev/null 2>&1; then - gcutil addfirewall "${NETWORK}-default-ssh" \ + if ! gcloud compute firewall-rules describe "${NETWORK}-default-ssh" &>/dev/null; then + gcloud compute firewall-rules create "${NETWORK}-default-ssh" \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --network "${NETWORK}" \ - --allowed_ip_sources "0.0.0.0/0" \ - --allowed "tcp:22" & + --source-ranges "0.0.0.0/0" \ + --allow "tcp:22" & fi echo "Starting VMs and configuring firewalls" - gcutil addfirewall "${MASTER_NAME}-https" \ + gcloud compute firewall-rules create "${MASTER_NAME}-https" \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --network "${NETWORK}" \ - --target_tags "${MASTER_TAG}" \ - --allowed tcp:443 & + --target-tags "${MASTER_TAG}" \ + --allow tcp:443 & ( echo "#! /bin/bash" @@ -312,18 +304,16 @@ function kube-up { MINION_SCOPES="${MINION_SCOPES}, https://www.googleapis.com/auth/logging.write" fi - gcutil addinstance "${MASTER_NAME}" \ + gcloud compute instances create "${MASTER_NAME}" \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --zone "${ZONE}" \ - --machine_type "${MASTER_SIZE}" \ - --image "projects/${IMAGE_PROJECT}/global/images/${IMAGE}" \ + --machine-type "${MASTER_SIZE}" \ + --image-project="${IMAGE_PROJECT}" \ + --image "${IMAGE}" \ --tags "${MASTER_TAG}" \ --network "${NETWORK}" \ - --service_account_scopes="storage-ro,compute-rw" \ - --automatic_restart \ - --metadata_from_file "startup-script:${KUBE_TEMP}/master-start.sh" & + --scopes "storage-ro" "compute-rw" \ + --metadata-from-file "startup-script=${KUBE_TEMP}/master-start.sh" & for (( i=0; i<${#MINION_NAMES[@]}; i++)); do ( @@ -336,34 +326,36 @@ function kube-up { grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/salt-minion.sh" ) > "${KUBE_TEMP}/minion-start-${i}.sh" - gcutil addfirewall "${MINION_NAMES[$i]}-all" \ + gcloud compute firewall-rules create "${MINION_NAMES[$i]}-all" \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --network "${NETWORK}" \ - --allowed_ip_sources "${MINION_IP_RANGES[$i]}" \ - --allowed "tcp,udp,icmp,esp,ah,sctp" & + --source-ranges "${MINION_IP_RANGES[$i]}" \ + --allow tcp udp icmp esp ah sctp & - gcutil addinstance ${MINION_NAMES[$i]} \ + local -a scope_flags=() + if (( "${#MINION_SCOPES[@]}" > 0 )); then + scope_flags=("--scopes" "${MINION_SCOPES[@]}") + else + scope_flags=("--no-scopes") + fi + gcloud compute instances create ${MINION_NAMES[$i]} \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --zone "${ZONE}" \ - --machine_type "${MINION_SIZE}" \ - --image "projects/${IMAGE_PROJECT}/global/images/${IMAGE}" \ + --machine-type "${MINION_SIZE}" \ + --image-project="${IMAGE_PROJECT}" \ + --image "${IMAGE}" \ --tags "${MINION_TAG}" \ --network "${NETWORK}" \ - --service_account_scopes "${MINION_SCOPES}" \ - --automatic_restart \ - --can_ip_forward \ - --metadata_from_file "startup-script:${KUBE_TEMP}/minion-start-${i}.sh" & + "${scope_flags[@]}" \ + --can-ip-forward \ + --metadata-from-file "startup-script=${KUBE_TEMP}/minion-start-${i}.sh" & - gcutil addroute "${MINION_NAMES[$i]}" "${MINION_IP_RANGES[$i]}" \ + gcloud compute routes create "${MINION_NAMES[$i]}" \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ + --destination-range "${MINION_IP_RANGES[$i]}" \ --network "${NETWORK}" \ - --next_hop_instance "${ZONE}/instances/${MINION_NAMES[$i]}" & + --next-hop-instance "${MINION_NAMES[$i]}" \ + --next-hop-instance-zone "${ZONE}" & done local fail=0 @@ -376,7 +368,7 @@ function kube-up { exit 2 fi - detect-master > /dev/null + detect-master echo "Waiting for cluster initialization." echo @@ -401,7 +393,7 @@ function kube-up { local rc # Capture return code without exiting because of errexit bash option for (( i=0; i<${#MINION_NAMES[@]}; i++)); do # Make sure docker is installed - gcutil ssh "${MINION_NAMES[$i]}" which docker >/dev/null || { + gcloud compute ssh --zone "$ZONE" "${MINION_NAMES[$i]}" --command "which docker" >/dev/null || { echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 echo "cluster. (sorry!)" >&2 @@ -424,9 +416,9 @@ function kube-up { # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's # config file. Distribute the same way the htpasswd is done. (umask 077 - gcutil ssh "${MASTER_NAME}" sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null - gcutil ssh "${MASTER_NAME}" sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null - gcutil ssh "${MASTER_NAME}" sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + gcloud compute ssh --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.crt" >"${HOME}/${kube_cert}" 2>/dev/null + gcloud compute ssh --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.key" >"${HOME}/${kube_key}" 2>/dev/null + gcloud compute ssh --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/ca.crt" >"${HOME}/${ca_cert}" 2>/dev/null cat << EOF > ~/.kubernetes_auth { @@ -449,47 +441,34 @@ function kube-down { detect-project echo "Bringing down cluster" - gcutil deletefirewall \ + gcloud compute firewall-rules delete \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --force \ + --quiet \ "${MASTER_NAME}-https" & - gcutil deleteinstance \ - --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --force \ - --delete_boot_pd \ - --zone "${ZONE}" \ - "${MASTER_NAME}" & + local minion + for minion in "${MINION_NAMES[@]}"; do + gcloud compute firewall-rules delete \ + --project "${PROJECT}" \ + --quiet \ + "${minion}-all" & - gcutil deletefirewall \ - --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --force \ - "${MINION_NAMES[@]/%/-all}" & + gcloud compute routes delete \ + --project "${PROJECT}" \ + --quiet \ + "${minion}" & + done - gcutil deleteinstance \ - --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --force \ - --delete_boot_pd \ - --zone "${ZONE}" \ - "${MINION_NAMES[@]}" & - - gcutil deleteroute \ - --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --force \ - "${MINION_NAMES[@]}" & + for minion in "${MASTER_NAME}" "${MINION_NAMES[@]}"; do + gcloud compute instances delete \ + --project "${PROJECT}" \ + --quiet \ + --delete-disks all \ + --zone "${ZONE}" \ + "${minion}" & + done wait - } # Update a kubernetes cluster with latest source @@ -512,7 +491,7 @@ function kube-push { echo "echo Executing configuration" echo "sudo salt '*' mine.update" echo "sudo salt --force-color '*' state.highstate" - ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash + ) | gcloud compute ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" --command "sudo bash" get-password @@ -551,12 +530,10 @@ function test-setup { detect-project # Open up port 80 & 8080 so common containers on minions can be reached - gcutil addfirewall \ + gcloud compute firewall-rules create \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --target_tags "${MINION_TAG}" \ - --allowed tcp:80,tcp:8080 \ + --target-tags "${MINION_TAG}" \ + --allow tcp:80 tcp:8080 \ --network "${NETWORK}" \ "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" } @@ -568,20 +545,18 @@ function test-setup { # PROJECT function test-teardown { echo "Shutting down test cluster in background." - gcutil deletefirewall \ + gcloud compute firewall-rules delete \ --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --force \ - "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true > /dev/null - "${KUBE_ROOT}/cluster/kube-down.sh" > /dev/null + --quiet \ + "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true + "${KUBE_ROOT}/cluster/kube-down.sh" } # SSH to a node by name ($1) and run a command ($2). function ssh-to-node { local node="$1" local cmd="$2" - gcutil --log_level=WARNING ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}" + gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --zone="${ZONE}" "${node}" --command "${cmd}" } # Restart the kube-proxy on a node ($1) @@ -592,41 +567,39 @@ function restart-kube-proxy { # Setup monitoring using heapster and InfluxDB function setup-monitoring { if [[ "${MONITORING}" == "true" ]]; then - echo "Setting up cluster monitoring using Heapster." + echo "Setting up cluster monitoring using Heapster." - if ! gcutil getfirewall monitoring-heapster &> /dev/null; then - if ! gcutil addfirewall monitoring-heapster \ - --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --target_tags="${MINION_TAG}" \ - --allowed "tcp:80,tcp:8083,tcp:8086,tcp:9200" &> /dev/null; then - echo "Failed to set up firewall for monitoring" && false - fi - fi + if ! gcloud compute firewall-rules describe monitoring-heapster &>/dev/null; then + if ! gcloud compute firewall-rules create monitoring-heapster \ + --project "${PROJECT}" \ + --target-tags="${MINION_TAG}" \ + --allow tcp:80 tcp:8083 tcp:8086 tcp:9200; then + echo "Failed to set up firewall for monitoring" && false + fi + fi - # Re-use master auth for Grafana - get-password + # Re-use master auth for Grafana + get-password ensure-temp-dir cp "${KUBE_ROOT}/examples/monitoring/influx-grafana-pod.json" "${KUBE_TEMP}/influx-grafana-pod.0.json" - sed "s/HTTP_USER, \"value\": \"[^\"]*\"/HTTP_USER, \"value\": \"$KUBE_USER\"/g" \ + sed "s/HTTP_USER, \"value\": \"[^\"]*\"/HTTP_USER, \"value\": \"$KUBE_USER\"/g" \ "${KUBE_TEMP}/influx-grafana-pod.0.json" > "${KUBE_TEMP}/influx-grafana-pod.1.json" - sed "s/HTTP_PASS, \"value\": \"[^\"]*\"/HTTP_PASS, \"value\": \"$KUBE_PASSWORD\"/g" \ + sed "s/HTTP_PASS, \"value\": \"[^\"]*\"/HTTP_PASS, \"value\": \"$KUBE_PASSWORD\"/g" \ "${KUBE_TEMP}/influx-grafana-pod.1.json" > "${KUBE_TEMP}/influx-grafana-pod.2.json" local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" - if "${kubectl}" create -f "${KUBE_TEMP}/influx-grafana-pod.2.json" &> /dev/null \ - && "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/influx-grafana-service.json" &> /dev/null \ - && "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/heapster-pod.json" &> /dev/null; then - local dashboard_url="http://$(${kubectl} get -o json pod influx-grafana | grep hostIP | awk '{print $2}' | sed 's/[,|\"]//g')" - echo - echo "Grafana dashboard will be available at $dashboard_url. Wait for the monitoring dashboard to be online." - echo "Use the master user name and password for the dashboard." - echo - else - echo "Failed to Setup Monitoring" - teardown-monitoring - fi + if "${kubectl}" create -f "${KUBE_TEMP}/influx-grafana-pod.2.json" &> /dev/null \ + && "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/influx-grafana-service.json" &> /dev/null \ + && "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/heapster-pod.json" &> /dev/null; then + local dashboard_url="http://$(${kubectl} get -o json pod influx-grafana | grep hostIP | awk '{print $2}' | sed 's/[,|\"]//g')" + echo + echo "Grafana dashboard will be available at $dashboard_url. Wait for the monitoring dashboard to be online." + echo "Use the master user name and password for the dashboard." + echo + else + echo "Failed to Setup Monitoring" + teardown-monitoring + fi fi } @@ -638,13 +611,11 @@ function teardown-monitoring { "${kubectl}" delete pods heapster &> /dev/null || true "${kubectl}" delete pods influx-grafana &> /dev/null || true "${kubectl}" delete services influx-master &> /dev/null || true - if gcutil getfirewall monitoring-heapster &> /dev/null; then - gcutil deletefirewall \ - --project "${PROJECT}" \ - --norespect_terminal_width \ - --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ - --force \ - monitoring-heapster &> /dev/null || true + if gcloud compute firewall-rules describe monitoring-heapster &> /dev/null; then + gcloud compute firewall-rules delete \ + --project "${PROJECT}" \ + --quiet \ + monitoring-heapster &> /dev/null || true fi fi } diff --git a/docs/design/networking.md b/docs/design/networking.md index 167b7382458..3f52d388287 100644 --- a/docs/design/networking.md +++ b/docs/design/networking.md @@ -40,11 +40,12 @@ We set up this bridge on each node with SaltStack, in [container_bridge.py](clus We make these addresses routable in GCE: - gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \ - --norespect_terminal_width \ - --project ${PROJECT} \ - --network ${NETWORK} \ - --next_hop_instance ${ZONE}/instances/${MINION_NAMES[$i]} & + gcloud compute routes add "${MINION_NAMES[$i]}" \ + --project "${PROJECT}" \ + --destination-range "${MINION_IP_RANGES[$i]}" \ + --network "${NETWORK}" \ + --next-hop-instance "${MINION_NAMES[$i]}" \ + --next-hop-instance-zone "${ZONE}" & The minion IP ranges are /24s in the 10-dot space. diff --git a/docs/getting-started-guides/gce.md b/docs/getting-started-guides/gce.md index 65aeeba9360..0e35a846f5e 100644 --- a/docs/getting-started-guides/gce.md +++ b/docs/getting-started-guides/gce.md @@ -9,14 +9,14 @@ The example below creates a Kubernetes cluster with 4 worker node Virtual Machin 2. Make sure you can start up a GCE VM. At least make sure you can do the [Create an instance](https://developers.google.com/compute/docs/quickstart#addvm) part of the GCE Quickstart. 3. Make sure you can ssh into the VM without interactive prompts. * Your GCE SSH key must either have no passcode or you need to be using `ssh-agent`. - * Ensure the GCE firewall isn't blocking port 22 to your VMs. By default, this should work but if you have edited firewall rules or created a new non-default network, you'll need to expose it: `gcutil addfirewall --network= --description "SSH allowed from anywhere" --allowed=tcp:22 default-ssh` + * Ensure the GCE firewall isn't blocking port 22 to your VMs. By default, this should work but if you have edited firewall rules or created a new non-default network, you'll need to expose it: `gcloud compute firewall-rules create --network= --description "SSH allowed from anywhere" --allow tcp:22 default-ssh` 4. You need to have the Google Cloud Storage API, and the Google Cloud Storage JSON API enabled. This can be done in the Google Cloud Console. ### Prerequisites for your workstation 1. Be running a Linux or Mac OS X. -2. You must have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed. This will get you `gcloud`, `gcutil` and `gsutil`. +2. You must have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed. This will get you `gcloud` and `gsutil`. 3. Ensure that your `gcloud` components are up-to-date by running `gcloud components update`. 4. If you want to build your own release, you need to have [Docker installed](https://docs.docker.com/installation/). On Mac OS X you can use diff --git a/docs/volumes.md b/docs/volumes.md index 056b066b4d9..70203edee44 100644 --- a/docs/volumes.md +++ b/docs/volumes.md @@ -59,10 +59,15 @@ Before you can use a GCE PD with a pod, you need to create it and format it. __We are actively working on making this more streamlined.__ ```sh -gcutil adddisk --size_gb= --zone= -gcutil attachdisk --disk kubernetes-master -gcutil ssh kubernetes-master sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-test2 /mnt/tmp -gcutil detachdisk --device_name google- kubernetes-master +DISK_NAME=my-data-disk +DISK_SIZE=500GB +ZONE=us-central1-a + +gcloud compute disks create --size=$DISK_SIZE --zone=$ZONE $DISK_NAME +gcloud compute instances attach-disk --zone=$ZONE --disk=$DISK_NAME --device-name temp-data kubernetes-master +gcloud compute ssh --zone=$ZONE kubernetes-master \ + --command "sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-temp-data /mnt/tmp" +gcloud compute instances detach-disk --zone=$ZONE --disk $DISK_NAME kubernetes-master ``` #### GCE PD Example configuration: diff --git a/examples/guestbook-go/README.md b/examples/guestbook-go/README.md index 4db6355ee69..9b4842b9ffe 100644 --- a/examples/guestbook-go/README.md +++ b/examples/guestbook-go/README.md @@ -39,8 +39,7 @@ redis-master-pod gurpartap/redis kubernetes-minion-3.c.thockin-dev.intern If you ssh to that machine, you can run `docker ps` to see the actual pod: ```shell -$ gcutil ssh --zone us-central1-b kubernetes-minion-3 -$ sudo docker ps +me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-minion-3 me@kubernetes-minion-3:~$ sudo docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS @@ -165,33 +164,33 @@ redis-slave name=redis,role=slave name=redis,role=slave 10.0.0.2 guestbook name=guestbook 10.0.0.3 3000 ``` -To play with the service itself, find the external IP of the load balancer from the [Google Cloud Console][cloud-console] or the `gcutil` tool, and visit `http://:3000`. +To play with the service itself, find the external IP of the load balancer from the [Google Cloud Console][cloud-console] or the `gcloud` tool, and visit `http://:3000`. ```shell -$ gcutil getforwardingrule guestbook -+---------------+-----------------------------------+ -| name | guestbook | -| description | | -| creation-time | 2014-10-15T19:07:24.837-07:00 | -| region | us-central1 | -| ip | 12.34.56.78 | -| protocol | TCP | -| port-range | 3000-3000 | -| target | us-central1/targetPools/guestbook | -+---------------+-----------------------------------+ +$ gcloud compute forwarding-rules describe --region=us-central1 guestbook +IPAddress: 11.22.33.44 +IPProtocol: TCP +creationTimestamp: '2014-11-24T16:08:15.327-08:00' +id: '17594840560849468061' +kind: compute#forwardingRule +name: guestbook +portRange: 1-65535 +region: https://www.googleapis.com/compute/v1/projects/jbeda-prod/regions/us-central1 +selfLink: https://www.googleapis.com/compute/v1/projects/jbeda-prod/regions/us-central1/forwardingRules/guestbook +target: https://www.googleapis.com/compute/v1/projects/jbeda-prod/regions/us-central1/targetPools/guestbook ``` -You may need to open the firewall for port 3000 using the [console][cloud-console] or the `gcutil` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: +You may need to open the firewall for port 3000 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: ```shell -$ gcutil addfirewall --allowed=tcp:3000 --target_tags=kubernetes-minion kubernetes-minion-3000 +$ gcloud compute firewall-rules create --allow=tcp:3000 --target-tags=kubernetes-minion kubernetes-minion-3000 ``` If you are running Kubernetes locally, you can just visit http://localhost:3000 -For details about limiting traffic to specific sources, see the [gcutil documentation][gcutil-docs] +For details about limiting traffic to specific sources, see the [GCE firewall documentation][gce-firewall-docs]. [cloud-console]: https://console.developer.google.com -[gcutil-docs]: https://developers.google.com/compute/docs/gcutil/reference/firewall#addfirewall +[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls ### Step Seven: Cleanup diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index 01b2cd6cffa..80ad4311075 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -66,8 +66,7 @@ redis-master dockerfile/redis kubernetes-minion-3.c.briandpe-api.inter If you ssh to that machine, you can run `docker ps` to see the actual pod: ```shell -$ gcutil ssh --zone us-central1-b kubernetes-minion-3 -$ sudo docker ps +me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-minion-3 me@kubernetes-minion-3:~$ sudo docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES @@ -308,23 +307,23 @@ if (isset($_GET['cmd']) === true) { } ?> ``` -To play with the service itself, find the name of a frontend, grab the external IP of that host from the [Google Cloud Console][cloud-console] or the `gcutil` tool, and visit `http://:8000`. +To play with the service itself, find the name of a frontend, grab the external IP of that host from the [Google Cloud Console][cloud-console] or the `gcloud` tool, and visit `http://:8000`. ```shell -$ gcutil listinstances +$ gcloud compute instances list ``` -You may need to open the firewall for port 8000 using the [console][cloud-console] or the `gcutil` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: +You may need to open the firewall for port 8000 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: ```shell -$ gcutil addfirewall --allowed=tcp:8000 --target_tags=kubernetes-minion kubernetes-minion-8000 +$ gcloud compute firewall-rules create --allow=tcp:8000 --target-tags=kubernetes-minion kubernetes-minion-8000 ``` If you are running Kubernetes locally, you can just visit http://localhost:8000. -For details about limiting traffic to specific sources, see the [gcutil documentation][gcutil-docs]. +For details about limiting traffic to specific sources, see the [GCE firewall documentation][gce-firewall-docs]. [cloud-console]: https://console.developer.google.com -[gcutil-docs]: https://developers.google.com/compute/docs/gcutil/reference/firewall#addfirewall +[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls ### Step Six: Cleanup diff --git a/examples/update-demo/README.md b/examples/update-demo/README.md index 8fa76d79127..6ee896db8f3 100644 --- a/examples/update-demo/README.md +++ b/examples/update-demo/README.md @@ -33,10 +33,12 @@ It also assumes that `$DOCKER_HUB_USER` is set to your Docker user id. We use t $ export DOCKER_HUB_USER=my-docker-id ``` -You may need to open the firewall for port 8080 using the [console][cloud-console] or the `gcutil` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: +You may need to open the firewall for port 8080 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: ```bash -$ gcutil addfirewall --allowed=tcp:8080 --target_tags=kubernetes-minion kubernetes-minion-8080 +$ gcloud compute firewall-rules create \ + --allow tcp:8080 --target-tags=kubernetes-minion \ + --zone=us-central1-a kubernetes-minion-8080 ``` ### Step Zero: Build the Docker images diff --git a/hack/e2e-suite/pd.sh b/hack/e2e-suite/pd.sh index f85de597af6..c62074ac69e 100755 --- a/hack/e2e-suite/pd.sh +++ b/hack/e2e-suite/pd.sh @@ -17,8 +17,6 @@ # Launches a container and verifies it can be reached. Assumes that # we're being called by hack/e2e-test.sh (we use some env vars it sets up). -# TODO: Convert uses of gcutil to gcloud - set -o errexit set -o nounset set -o pipefail @@ -41,7 +39,7 @@ function teardown() { rm -rf ${config} echo "Waiting for disk to become unmounted" sleep 20 - gcutil deletedisk -f --zone=${ZONE} ${disk_name} + gcloud compute disks delete --quiet --zone="${ZONE}" "${disk_name}" } trap "teardown" EXIT @@ -49,13 +47,14 @@ trap "teardown" EXIT perl -p -e "s/%.*%/${disk_name}/g" ${KUBE_ROOT}/examples/gce-pd/testpd.yaml > ${config} # Create and mount the disk. -gcutil adddisk --size_gb=10 --zone=${ZONE} ${disk_name} -gcutil attachdisk --disk ${disk_name} ${MASTER_NAME} -gcutil ssh ${MASTER_NAME} sudo rm -rf /mnt/tmp -gcutil ssh ${MASTER_NAME} sudo mkdir /mnt/tmp -gcutil ssh ${MASTER_NAME} sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-${disk_name} /mnt/tmp -gcutil ssh ${MASTER_NAME} sudo umount /mnt/tmp -gcloud compute instances detach-disk --disk ${disk_name} --zone ${ZONE} ${MASTER_NAME} +gcloud compute disks create --zone="${ZONE}" --size=10GB "${disk_name}" +gcloud compute instances attach-disk --zone="${ZONE}" --disk="${disk_name}" \ + --device-name temp-data "${MASTER_NAME}" +gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo rm -rf /mnt/tmp" +gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo mkdir -p /mnt/tmp" +gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-temp-data /mnt/tmp" +gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo umount /mnt/tmp" +gcloud compute instances detach-disk --zone="${ZONE}" --disk "${disk_name}" "${MASTER_NAME}" ${KUBECFG} -c ${config} create pods