From 6b91b45eff378b1da481cdf96cea8948a9787d28 Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 18:59:56 -0800 Subject: [PATCH 01/14] Minion->Node rename: IAM_PROFILE_NODE, KUBE_ENABLE_NODE_PUBLIC_IP, etc KUBE_GCE_NODE_IMAGE, KUBE_GCE_NODE_PROJECT, KUBEMARK_NUM_NODES --- cluster/aws/config-default.sh | 4 ++-- cluster/aws/config-test.sh | 4 ++-- cluster/aws/options.md | 2 +- cluster/aws/util.sh | 8 +++---- cluster/gce/config-default.sh | 4 ++-- cluster/gce/config-test.sh | 4 ++-- docs/getting-started-guides/rkt/README.md | 4 ++-- hack/jenkins/e2e.sh | 28 +++++++++++------------ 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 7013eec1873..5435239d554 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -56,7 +56,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-kubernetes}" CLUSTER_ID=${INSTANCE_PREFIX} AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} IAM_PROFILE_MASTER="kubernetes-master" -IAM_PROFILE_MINION="kubernetes-minion" +IAM_PROFILE_NODE="kubernetes-minion" LOG="/dev/null" @@ -130,7 +130,7 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true} +ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 0a38279de5c..45aa58816eb 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -54,7 +54,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-e2e-test-${USER}}" CLUSTER_ID=${INSTANCE_PREFIX} AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} IAM_PROFILE_MASTER="kubernetes-master" -IAM_PROFILE_MINION="kubernetes-minion" +IAM_PROFILE_NODE="kubernetes-minion" LOG="/dev/null" @@ -126,7 +126,7 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true} +ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" diff --git a/cluster/aws/options.md b/cluster/aws/options.md index 9db6e60af01..952b05a92d6 100644 --- a/cluster/aws/options.md +++ b/cluster/aws/options.md @@ -46,7 +46,7 @@ Please note: `kube-up` utilizes ephemeral storage available on instances for doc support ephemeral storage and will default to docker storage on the root disk which is usually only 8GB. EBS-only instance types include `t2`, `c4`, and `m4`. -**KUBE_ENABLE_MINION_PUBLIC_IP** +**KUBE_ENABLE_NODE_PUBLIC_IP** Should a public IP automatically assigned to the minions? "true" or "false" Defaults to: "true" diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 0c3f3a3ca85..8154897278c 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -609,9 +609,9 @@ function ensure-iam-profiles { echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}" create-iam-profile ${IAM_PROFILE_MASTER} } - aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MINION} || { - echo "Creating minion IAM profile: ${IAM_PROFILE_MINION}" - create-iam-profile ${IAM_PROFILE_MINION} + aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_NODE} || { + echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}" + create-iam-profile ${IAM_PROFILE_NODE} } } @@ -998,7 +998,7 @@ function start-minions() { ${AWS_ASG_CMD} create-launch-configuration \ --launch-configuration-name ${ASG_NAME} \ --image-id $KUBE_MINION_IMAGE \ - --iam-instance-profile ${IAM_PROFILE_MINION} \ + --iam-instance-profile ${IAM_PROFILE_NODE} \ --instance-type $MINION_SIZE \ --key-name ${AWS_SSH_KEY_NAME} \ --security-groups ${MINION_SG_ID} \ diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 8fd2fa8422f..c641ccec5f1 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -31,8 +31,8 @@ PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} -MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-"${MASTER_IMAGE}"} -MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-"${MASTER_IMAGE_PROJECT}"} +MINION_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} +MINION_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 8eca5118d70..ca2e015af86 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -32,8 +32,8 @@ PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} -MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-"${MASTER_IMAGE}"} -MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-"${MASTER_IMAGE_PROJECT}"} +MINION_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} +MINION_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} diff --git a/docs/getting-started-guides/rkt/README.md b/docs/getting-started-guides/rkt/README.md index cae3b48d6fa..c9c2208bd29 100644 --- a/docs/getting-started-guides/rkt/README.md +++ b/docs/getting-started-guides/rkt/README.md @@ -72,8 +72,8 @@ To use rkt as the container runtime for your CoreOS cluster on GCE, you need to ```console $ export KUBE_OS_DISTRIBUTION=coreos -$ export KUBE_GCE_MINION_IMAGE= -$ export KUBE_GCE_MINION_PROJECT=coreos-cloud +$ export KUBE_GCE_NODE_IMAGE= +$ export KUBE_GCE_NODE_PROJECT=coreos-cloud $ export KUBE_CONTAINER_RUNTIME=rkt ``` diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 33a9519788f..e1559d0e926 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -553,8 +553,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${PROJECT:="kubekins-e2e-gce-trusty-rls"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -571,8 +571,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-slow"} : ${PROJECT:="k8s-e2e-gce-trusty-slow"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -594,8 +594,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${PROJECT:="k8s-e2e-gce-trusty-beta"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -613,8 +613,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-beta-slow"} : ${PROJECT:="k8s-e2e-gce-trusty-beta-slow"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -1419,7 +1419,7 @@ case ${JOB_NAME} in MASTER_SIZE="n1-standard-2" MINION_SIZE="n1-standard-1" KUBEMARK_MASTER_SIZE="n1-standard-4" - KUBEMARK_NUM_MINIONS="100" + KUBEMARK_NUM_NODES="100" ;; # Run Kubemark test on a fake 500 node cluster to test for regressions on @@ -1439,7 +1439,7 @@ case ${JOB_NAME} in KUBE_GCE_INSTANCE_PREFIX="kubemark500" E2E_ZONE="asia-east1-a" KUBEMARK_MASTER_SIZE="n1-standard-16" - KUBEMARK_NUM_MINIONS="500" + KUBEMARK_NUM_NODES="500" ;; # Run big Kubemark test, this currently means a 1000 node cluster and 16 core master @@ -1461,7 +1461,7 @@ case ${JOB_NAME} in KUBE_GCE_INSTANCE_PREFIX="kubemark1000" E2E_ZONE="asia-east1-a" KUBEMARK_MASTER_SIZE="n1-standard-16" - KUBEMARK_NUM_MINIONS="1000" + KUBEMARK_NUM_NODES="1000" ;; esac @@ -1475,8 +1475,8 @@ export KUBE_GCE_ZONE=${E2E_ZONE} export KUBE_GCE_NETWORK=${E2E_NETWORK} export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-} export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-} -export KUBE_GCE_MINION_PROJECT=${KUBE_GCE_MINION_PROJECT:-} -export KUBE_GCE_MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-} +export KUBE_GCE_NODE_PROJECT=${KUBE_GCE_NODE_PROJECT:-} +export KUBE_GCE_NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-} export KUBE_OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-} # GKE variables @@ -1718,7 +1718,7 @@ if [[ "${USE_KUBEMARK:-}" == "true" ]]; then NUM_MINIONS_BKP=${NUM_MINIONS} MASTER_SIZE_BKP=${MASTER_SIZE} ./test/kubemark/stop-kubemark.sh - NUM_MINIONS=${KUBEMARK_NUM_MINIONS:-$NUM_MINIONS} + NUM_MINIONS=${KUBEMARK_NUM_NODES:-$NUM_MINIONS} MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE} ./test/kubemark/start-kubemark.sh ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false" From 68539ae8a4af482ad72ecc3bf40b425a895636de Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:00:37 -0800 Subject: [PATCH 02/14] Minion->Node rename: KUBE_NODE_IMAGE --- cluster/aws/config-default.sh | 2 +- cluster/aws/config-test.sh | 2 +- cluster/aws/coreos/util.sh | 8 ++++---- cluster/aws/trusty/common.sh | 4 ++-- cluster/aws/util.sh | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 5435239d554..be08d2a787b 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -134,7 +134,7 @@ ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" -KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}" +KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}" diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 45aa58816eb..5c47ef4c09e 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -130,7 +130,7 @@ ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" -KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}" +KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}" diff --git a/cluster/aws/coreos/util.sh b/cluster/aws/coreos/util.sh index 70763751feb..c58144bb02c 100644 --- a/cluster/aws/coreos/util.sh +++ b/cluster/aws/coreos/util.sh @@ -19,11 +19,11 @@ SSH_USER=core function detect-minion-image (){ - if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then - KUBE_MINION_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']") + if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then + KUBE_NODE_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']") fi - if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then - echo "unable to determine KUBE_MINION_IMAGE" + if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then + echo "unable to determine KUBE_NODE_IMAGE" exit 2 fi } diff --git a/cluster/aws/trusty/common.sh b/cluster/aws/trusty/common.sh index e8b827f4f8c..bfc98b1882b 100644 --- a/cluster/aws/trusty/common.sh +++ b/cluster/aws/trusty/common.sh @@ -18,9 +18,9 @@ # A library of common helper functions for Ubuntus & Debians. function detect-minion-image() { - if [[ -z "${KUBE_MINION_IMAGE=-}" ]]; then + if [[ -z "${KUBE_NODE_IMAGE=-}" ]]; then detect-image - KUBE_MINION_IMAGE=$AWS_IMAGE + KUBE_NODE_IMAGE=$AWS_IMAGE fi } diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 8154897278c..e790e8d9e1b 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -997,7 +997,7 @@ function start-minions() { fi ${AWS_ASG_CMD} create-launch-configuration \ --launch-configuration-name ${ASG_NAME} \ - --image-id $KUBE_MINION_IMAGE \ + --image-id $KUBE_NODE_IMAGE \ --iam-instance-profile ${IAM_PROFILE_NODE} \ --instance-type $MINION_SIZE \ --key-name ${AWS_SSH_KEY_NAME} \ From ae314ad24636a36f7418b432657d108865793035 Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:00:46 -0800 Subject: [PATCH 03/14] Minion->Node rename: KUBE_NODE_IP_ADDRESSES, KUBE_NODE_IP_ADDRESS --- cluster/aws/util.sh | 12 ++++++------ cluster/gce/util.sh | 8 ++++---- cluster/juju/util.sh | 6 +++--- cluster/kube-util.sh | 4 ++-- cluster/mesos/docker/util.sh | 6 +++--- cluster/vagrant/util.sh | 4 ++-- cluster/vsphere/util.sh | 12 ++++++------ 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index e790e8d9e1b..ed586dbe4bb 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -199,8 +199,8 @@ function find-running-minions () { function detect-minions () { find-running-minions - # This is inefficient, but we want MINION_NAMES / MINION_IDS to be ordered the same as KUBE_MINION_IP_ADDRESSES - KUBE_MINION_IP_ADDRESSES=() + # This is inefficient, but we want MINION_NAMES / MINION_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES + KUBE_NODE_IP_ADDRESSES=() for (( i=0; i<${#MINION_NAMES[@]}; i++)); do local minion_ip if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then @@ -209,10 +209,10 @@ function detect-minions () { minion_ip=$(get_instance_private_ip ${MINION_NAMES[$i]}) fi echo "Found minion ${i}: ${MINION_NAMES[$i]} @ ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") done - if [[ -z "$KUBE_MINION_IP_ADDRESSES" ]]; then + if [[ -z "$KUBE_NODE_IP_ADDRESSES" ]]; then echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" exit 1 fi @@ -1113,11 +1113,11 @@ function check-cluster() { # Basic sanity checking # TODO(justinsb): This is really not needed any more local rc # Capture return code without exiting because of errexit bash option - for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do + for (( i=0; i<${#KUBE_NODE_IP_ADDRESSES[@]}; i++)); do # Make sure docker is installed and working. local attempt=0 while true; do - local minion_ip=${KUBE_MINION_IP_ADDRESSES[$i]} + local minion_ip=${KUBE_NODE_IP_ADDRESSES[$i]} echo -n "Attempt $(($attempt+1)) to check Docker on node @ ${minion_ip} ..." local output=`check-minion ${minion_ip}` echo $output diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 655221c9d16..65595268b14 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -229,11 +229,11 @@ function detect-minion-names { # ZONE # Vars set: # MINION_NAMES -# KUBE_MINION_IP_ADDRESSES (array) +# KUBE_NODE_IP_ADDRESSES (array) function detect-minions () { detect-project detect-minion-names - KUBE_MINION_IP_ADDRESSES=() + KUBE_NODE_IP_ADDRESSES=() for (( i=0; i<${#MINION_NAMES[@]}; i++)); do local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \ "${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \ @@ -242,10 +242,10 @@ function detect-minions () { echo "Did not find ${MINION_NAMES[$i]}" >&2 else echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") fi done - if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then + if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 exit 1 fi diff --git a/cluster/juju/util.sh b/cluster/juju/util.sh index 2ed2dd70aba..faa9c4e20ac 100755 --- a/cluster/juju/util.sh +++ b/cluster/juju/util.sh @@ -69,9 +69,9 @@ function detect-minions() { # ] # Strip out the IP addresses - export KUBE_MINION_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}")) - # echo "Kubernetes minions: " ${KUBE_MINION_IP_ADDRESSES[@]} 1>&2 - export NUM_MINIONS=${#KUBE_MINION_IP_ADDRESSES[@]} + export KUBE_NODE_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}")) + # echo "Kubernetes minions: " ${KUBE_NODE_IP_ADDRESSES[@]} 1>&2 + export NUM_MINIONS=${#KUBE_NODE_IP_ADDRESSES[@]} } function get-password() { diff --git a/cluster/kube-util.sh b/cluster/kube-util.sh index 8bbb3b97ca2..396ccccf619 100644 --- a/cluster/kube-util.sh +++ b/cluster/kube-util.sh @@ -29,9 +29,9 @@ function detect-minion-names { echo "MINION_NAMES: [${MINION_NAMES[*]}]" 1>&2 } -# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] function detect-minions { - echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2 + echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2 } # Verify prereqs on host machine diff --git a/cluster/mesos/docker/util.sh b/cluster/mesos/docker/util.sh index 838936f9ec3..20f8f32df44 100644 --- a/cluster/mesos/docker/util.sh +++ b/cluster/mesos/docker/util.sh @@ -204,7 +204,7 @@ function detect-master { echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" 1>&2 } -# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] # These Mesos slaves MAY host Kublets, # but might not have a Kublet running unless a kubernetes task has been scheduled on them. function detect-minions { @@ -215,9 +215,9 @@ function detect-minions { fi while read -r docker_id; do local minion_ip=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" "${docker_id}") - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") done <<< "$docker_ids" - echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2 + echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2 } # Verify prereqs on host machine diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index b4ddc99fbb8..4de4e05a4ba 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -25,10 +25,10 @@ function detect-master () { echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 } -# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] function detect-minions { echo "Minions already detected" 1>&2 - KUBE_MINION_IP_ADDRESSES=("${MINION_IPS[@]}") + KUBE_NODE_IP_ADDRESSES=("${MINION_IPS[@]}") } # Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index c14452fbfb2..13847d152a6 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -47,19 +47,19 @@ function detect-master { # Assumed vars: # MINION_NAMES # Vars set: -# KUBE_MINION_IP_ADDRESS (array) +# KUBE_NODE_IP_ADDRESS (array) function detect-minions { - KUBE_MINION_IP_ADDRESSES=() + KUBE_NODE_IP_ADDRESSES=() for (( i=0; i<${#MINION_NAMES[@]}; i++)); do local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]}) if [[ -z "${minion_ip-}" ]] ; then echo "Did not find ${MINION_NAMES[$i]}" >&2 else echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") fi done - if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then + if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 exit 1 fi @@ -315,7 +315,7 @@ function kube-up { for (( i=0; i<${#MINION_NAMES[@]}; i++)); do printf "Waiting for ${MINION_NAMES[$i]} to become available..." until curl --max-time 5 \ - --fail --output /dev/null --silent "http://${KUBE_MINION_IP_ADDRESSES[$i]}:10250/healthz"; do + --fail --output /dev/null --silent "http://${KUBE_NODE_IP_ADDRESSES[$i]}:10250/healthz"; do printf "." sleep 2 done @@ -349,7 +349,7 @@ function kube-up { local i for (( i=0; i<${#MINION_NAMES[@]}; i++)); do # Make sure docker is installed - kube-ssh "${KUBE_MINION_IP_ADDRESSES[$i]}" which docker > /dev/null || { + kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || { echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 echo "cluster. (sorry!)" >&2 From a36d3390bf3565a1d7ac9298ee302465b56ee10f Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:01:03 -0800 Subject: [PATCH 04/14] Minion->Node rename: KUBERNETES_NODE_MEMORY, VAGRANT_NODE_NAMES, etc ENABLE_NODE_PUBLIC_IP NODE_ADDRESS NODE_BLOCK_DEVICE_MAPPINGS NODE_CONTAINER_ADDRS NODE_CONTAINER_NETMASKS NODE_CONTAINER_SUBNET_BASE NODE_CONTAINER_SUBNETS NODE_CPU --- Vagrantfile | 2 +- cluster/aws/config-default.sh | 2 +- cluster/aws/config-test.sh | 2 +- cluster/aws/util.sh | 8 ++++---- cluster/centos/node/scripts/kubelet.sh | 4 ++-- cluster/vagrant/config-default.sh | 16 ++++++++-------- cluster/vagrant/util.sh | 14 +++++++------- cluster/vsphere/config-default.sh | 2 +- cluster/vsphere/config-test.sh | 2 +- cluster/vsphere/util.sh | 2 +- docs/devel/developer-guides/vagrant.md | 2 +- docs/getting-started-guides/vagrant.md | 2 +- 12 files changed, 29 insertions(+), 29 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 709799dbe9f..bcedbb3698e 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -105,7 +105,7 @@ end # When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. # This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) $vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i -$vm_minion_mem = (ENV['KUBERNETES_MINION_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i +$vm_minion_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| def setvmboxandurl(config, provider) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index be08d2a787b..5a74d1e4a19 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -130,7 +130,7 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} +ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 5c47ef4c09e..11d164d87ea 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -126,7 +126,7 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} +ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index ed586dbe4bb..0aa6e0b232b 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -86,7 +86,7 @@ MINION_SG_NAME="kubernetes-minion-${CLUSTER_ID}" # only affects the big storage instance types, which aren't a typical use case right now. BLOCK_DEVICE_MAPPINGS_BASE="{\"DeviceName\": \"/dev/sdc\",\"VirtualName\":\"ephemeral0\"},{\"DeviceName\": \"/dev/sdd\",\"VirtualName\":\"ephemeral1\"},{\"DeviceName\": \"/dev/sde\",\"VirtualName\":\"ephemeral2\"},{\"DeviceName\": \"/dev/sdf\",\"VirtualName\":\"ephemeral3\"}" MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" -MINION_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MINION_ROOT_DISK_SIZE},\"VolumeType\":\"${MINION_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" +NODE_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MINION_ROOT_DISK_SIZE},\"VolumeType\":\"${MINION_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" # TODO (bburns) Parameterize this for multiple cluster per project @@ -203,7 +203,7 @@ function detect-minions () { KUBE_NODE_IP_ADDRESSES=() for (( i=0; i<${#MINION_NAMES[@]}; i++)); do local minion_ip - if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then + if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then minion_ip=$(get_instance_public_ip ${MINION_NAMES[$i]}) else minion_ip=$(get_instance_private_ip ${MINION_NAMES[$i]}) @@ -990,7 +990,7 @@ function start-minions() { echo "Creating minion configuration" generate-minion-user-data > "${KUBE_TEMP}/minion-user-data" local public_ip_option - if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then + if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then public_ip_option="--associate-public-ip-address" else public_ip_option="--no-associate-public-ip-address" @@ -1003,7 +1003,7 @@ function start-minions() { --key-name ${AWS_SSH_KEY_NAME} \ --security-groups ${MINION_SG_ID} \ ${public_ip_option} \ - --block-device-mappings "${MINION_BLOCK_DEVICE_MAPPINGS}" \ + --block-device-mappings "${NODE_BLOCK_DEVICE_MAPPINGS}" \ --user-data "file://${KUBE_TEMP}/minion-user-data" echo "Creating autoscaling group" diff --git a/cluster/centos/node/scripts/kubelet.sh b/cluster/centos/node/scripts/kubelet.sh index e104c434060..6491aa468fa 100755 --- a/cluster/centos/node/scripts/kubelet.sh +++ b/cluster/centos/node/scripts/kubelet.sh @@ -27,7 +27,7 @@ KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=4" # --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces) -MINION_ADDRESS="--address=${NODE_ADDRESS}" +NODE_ADDRESS="--address=${NODE_ADDRESS}" # --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag. MINION_PORT="--port=10250" @@ -48,7 +48,7 @@ EOF KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ \${KUBE_LOG_LEVEL} \\ - \${MINION_ADDRESS} \\ + \${NODE_ADDRESS} \\ \${MINION_PORT} \\ \${MINION_HOSTNAME} \\ \${KUBELET_API_SERVER} \\ diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index 2a8609b6a05..6208ffbed3e 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -32,18 +32,18 @@ REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} # Map out the IPs, names and container subnets of each minion export MINION_IP_BASE=${MINION_IP_BASE-"10.245.1."} -MINION_CONTAINER_SUBNET_BASE="10.246" +NODE_CONTAINER_SUBNET_BASE="10.246" MASTER_CONTAINER_NETMASK="255.255.255.0" -MASTER_CONTAINER_ADDR="${MINION_CONTAINER_SUBNET_BASE}.0.1" -MASTER_CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.1/24" -CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.0/16" +MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" +MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" +CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" for ((i=0; i < NUM_MINIONS; i++)) do MINION_IPS[$i]="${MINION_IP_BASE}$((i+3))" MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" - MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" - MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1" - MINION_CONTAINER_NETMASKS[$i]="255.255.255.0" - VAGRANT_MINION_NAMES[$i]="minion-$((i+1))" + NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" + NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" + NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" + VAGRANT_NODE_NAMES[$i]="minion-$((i+1))" done SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 4de4e05a4ba..a4ed79fced8 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -131,8 +131,8 @@ function create-provision-scripts { echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" - echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'" - echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" + echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'" + echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "MASTER_USER='${MASTER_USER}'" echo "MASTER_PASSWD='${MASTER_PASSWD}'" @@ -175,9 +175,9 @@ function create-provision-scripts { echo "MINION_ID='$i'" echo "NODE_IP='${MINION_IPS[$i]}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" - echo "CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'" - echo "CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'" - echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" + echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" + echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'" + echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" @@ -223,8 +223,8 @@ function verify-cluster { # verify each minion has all required daemons local i for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - echo "Validating ${VAGRANT_MINION_NAMES[$i]}" - local machine=${VAGRANT_MINION_NAMES[$i]} + echo "Validating ${VAGRANT_NODE_NAMES[$i]}" + local machine=${VAGRANT_NODE_NAMES[$i]} local -a required_daemon=("salt-minion" "kubelet" "docker") local validated="1" until [[ "$validated" == "0" ]]; do diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index e039f4b71f8..8ef419e5022 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -29,7 +29,7 @@ MASTER_CPU=1 MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_MEMORY_MB=2048 -MINION_CPU=1 +NODE_CPU=1 SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index 88740908d0d..bf9c68b05e7 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -29,7 +29,7 @@ MASTER_CPU=1 MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_MEMORY_MB=1024 -MINION_CPU=1 +NODE_CPU=1 SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index 13847d152a6..e28608137f1 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -278,7 +278,7 @@ function kube-up { ) > "${KUBE_TEMP}/minion-start-${i}.sh" ( - kube-up-vm "${MINION_NAMES[$i]}" -c ${MINION_CPU-1} -m ${MINION_MEMORY_MB-1024} + kube-up-vm "${MINION_NAMES[$i]}" -c ${NODE_CPU-1} -m ${MINION_MEMORY_MB-1024} kube-run "${MINION_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" ) & done diff --git a/docs/devel/developer-guides/vagrant.md b/docs/devel/developer-guides/vagrant.md index 61560db7f48..291b85bc37c 100644 --- a/docs/devel/developer-guides/vagrant.md +++ b/docs/devel/developer-guides/vagrant.md @@ -369,7 +369,7 @@ If you need more granular control, you can set the amount of memory for the mast ```sh export KUBERNETES_MASTER_MEMORY=1536 -export KUBERNETES_MINION_MEMORY=2048 +export KUBERNETES_NODE_MEMORY=2048 ``` #### I ran vagrant suspend and nothing works! diff --git a/docs/getting-started-guides/vagrant.md b/docs/getting-started-guides/vagrant.md index e103369b2c3..5e48b7b1fc4 100644 --- a/docs/getting-started-guides/vagrant.md +++ b/docs/getting-started-guides/vagrant.md @@ -408,7 +408,7 @@ If you need more granular control, you can set the amount of memory for the mast ```sh export KUBERNETES_MASTER_MEMORY=1536 -export KUBERNETES_MINION_MEMORY=2048 +export KUBERNETES_NODE_MEMORY=2048 ``` #### I ran vagrant suspend and nothing works! From 83ed2fa22e80b9cc66c5bc18b64bddbaa54cfe5e Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:02:38 -0800 Subject: [PATCH 05/14] Minion->Node rename: NODE_DISK_SIZE, NODE_DISK_TYPE, NODE_HOSTNAME, etc NODE_IDS NODE_ID NODE_IMAGE_PROJECT NODE_IMAGE --- cluster/aws/util.sh | 12 ++++++------ cluster/centos/node/scripts/kubelet.sh | 4 ++-- cluster/gce/config-default.sh | 8 ++++---- cluster/gce/config-test.sh | 8 ++++---- cluster/gce/util.sh | 8 ++++---- cluster/vagrant/util.sh | 2 +- hack/jenkins/e2e.sh | 6 +++--- 7 files changed, 24 insertions(+), 24 deletions(-) diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 0aa6e0b232b..fe365f59193 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -186,10 +186,10 @@ function query-running-minions () { } function find-running-minions () { - MINION_IDS=() + NODE_IDS=() MINION_NAMES=() for id in $(query-running-minions "Reservations[].Instances[].InstanceId"); do - MINION_IDS+=("${id}") + NODE_IDS+=("${id}") # We use the minion ids as the name MINION_NAMES+=("${id}") @@ -199,7 +199,7 @@ function find-running-minions () { function detect-minions () { find-running-minions - # This is inefficient, but we want MINION_NAMES / MINION_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES + # This is inefficient, but we want MINION_NAMES / NODE_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES KUBE_NODE_IP_ADDRESSES=() for (( i=0; i<${#MINION_NAMES[@]}; i++)); do local minion_ip @@ -1022,8 +1022,8 @@ function start-minions() { attempt=0 while true; do find-running-minions > $LOG - if [[ ${#MINION_IDS[@]} == ${NUM_MINIONS} ]]; then - echo -e " ${color_green}${#MINION_IDS[@]} minions started; ready${color_norm}" + if [[ ${#NODE_IDS[@]} == ${NUM_MINIONS} ]]; then + echo -e " ${color_green}${#NODE_IDS[@]} minions started; ready${color_norm}" break fi @@ -1037,7 +1037,7 @@ function start-minions() { exit 1 fi - echo -e " ${color_yellow}${#MINION_IDS[@]} minions started; waiting${color_norm}" + echo -e " ${color_yellow}${#NODE_IDS[@]} minions started; waiting${color_norm}" attempt=$(($attempt+1)) sleep 10 done diff --git a/cluster/centos/node/scripts/kubelet.sh b/cluster/centos/node/scripts/kubelet.sh index 6491aa468fa..71d780d5433 100755 --- a/cluster/centos/node/scripts/kubelet.sh +++ b/cluster/centos/node/scripts/kubelet.sh @@ -33,7 +33,7 @@ NODE_ADDRESS="--address=${NODE_ADDRESS}" MINION_PORT="--port=10250" # --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. -MINION_HOSTNAME="--hostname-override=${NODE_ADDRESS}" +NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}" # --api-servers=[]: List of Kubernetes API servers for publishing events, # and reading pods and services. (ip:port), comma separated. @@ -50,7 +50,7 @@ KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ \${KUBE_LOG_LEVEL} \\ \${NODE_ADDRESS} \\ \${MINION_PORT} \\ - \${MINION_HOSTNAME} \\ + \${NODE_HOSTNAME} \\ \${KUBELET_API_SERVER} \\ \${KUBE_ALLOW_PRIV} \\ \${KUBELET_ARGS}" diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index c641ccec5f1..84478c4375d 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -23,16 +23,16 @@ MINION_SIZE=${MINION_SIZE:-n1-standard-2} NUM_MINIONS=${NUM_MINIONS:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} -MINION_DISK_TYPE=${MINION_DISK_TYPE:-pd-standard} -MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB} +NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} +NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} -MINION_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} -MINION_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} +NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} +NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index ca2e015af86..f981334a7e9 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -23,8 +23,8 @@ MINION_SIZE=${MINION_SIZE:-n1-standard-2} NUM_MINIONS=${NUM_MINIONS:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} -MINION_DISK_TYPE=${MINION_DISK_TYPE:-pd-standard} -MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB} +NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} +NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} KUBE_APISERVER_REQUEST_TIMEOUT=300 PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} @@ -32,8 +32,8 @@ PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} -MINION_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} -MINION_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} +NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} +NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 65595268b14..9845cbb64e0 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -378,10 +378,10 @@ function create-node-template { if ! gcloud compute instance-templates create "$template_name" \ --project "${PROJECT}" \ --machine-type "${MINION_SIZE}" \ - --boot-disk-type "${MINION_DISK_TYPE}" \ - --boot-disk-size "${MINION_DISK_SIZE}" \ - --image-project="${MINION_IMAGE_PROJECT}" \ - --image "${MINION_IMAGE}" \ + --boot-disk-type "${NODE_DISK_TYPE}" \ + --boot-disk-size "${NODE_DISK_SIZE}" \ + --image-project="${NODE_IMAGE_PROJECT}" \ + --image "${NODE_IMAGE}" \ --tags "${MINION_TAG}" \ --network "${NETWORK}" \ ${preemptible_minions} \ diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index a4ed79fced8..b6085138ec1 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -172,7 +172,7 @@ function create-provision-scripts { echo "MINION_NAME=(${MINION_NAMES[$i]})" echo "MINION_IPS=(${MINION_IPS[@]})" echo "MINION_IP='${MINION_IPS[$i]}'" - echo "MINION_ID='$i'" + echo "NODE_ID='$i'" echo "NODE_IP='${MINION_IPS[$i]}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index e1559d0e926..017cdffd37a 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -431,7 +431,7 @@ case ${JOB_NAME} in # Override GCE defaults. MASTER_SIZE="n1-standard-4" MINION_SIZE="n1-standard-2" - MINION_DISK_SIZE="50GB" + NODE_DISK_SIZE="50GB" NUM_MINIONS="100" # Reduce logs verbosity TEST_CLUSTER_LOG_LEVEL="--v=2" @@ -453,7 +453,7 @@ case ${JOB_NAME} in E2E_ZONE="us-east1-b" MASTER_SIZE="n1-standard-4" MINION_SIZE="n1-standard-2" - MINION_DISK_SIZE="50GB" + NODE_DISK_SIZE="50GB" NUM_MINIONS="100" # Reduce logs verbosity TEST_CLUSTER_LOG_LEVEL="--v=2" @@ -1501,7 +1501,7 @@ export KUBE_ENABLE_DEPLOYMENTS=${ENABLE_DEPLOYMENTS:-} export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-} export MASTER_SIZE=${MASTER_SIZE:-} export MINION_SIZE=${MINION_SIZE:-} -export MINION_DISK_SIZE=${MINION_DISK_SIZE:-} +export NODE_DISK_SIZE=${NODE_DISK_SIZE:-} export NUM_MINIONS=${NUM_MINIONS:-} export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-} export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-} From 6fe68a737e320022e32f45d5f4fd80e7cae9bffd Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:03:44 -0800 Subject: [PATCH 06/14] Minion->Node rename: NODE_IP_BASE, NODE_IP_RANGES, NODE_IP_RANGE, etc NODE_IPS NODE_IP NODE_MEMORY_MB --- Vagrantfile | 2 +- cluster/vagrant/config-default.sh | 4 ++-- cluster/vagrant/provision-master.sh | 2 +- cluster/vagrant/provision-minion.sh | 8 ++++---- cluster/vagrant/util.sh | 18 +++++++++--------- cluster/vsphere/config-default.sh | 4 ++-- cluster/vsphere/config-test.sh | 4 ++-- cluster/vsphere/templates/salt-minion.sh | 2 +- cluster/vsphere/util.sh | 4 ++-- docs/design/networking.md | 2 +- 10 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index bcedbb3698e..514179fe73c 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -22,7 +22,7 @@ $num_minion = (ENV['NUM_MINIONS'] || 1).to_i # ip configuration $master_ip = ENV['MASTER_IP'] -$minion_ip_base = ENV['MINION_IP_BASE'] || "" +$minion_ip_base = ENV['NODE_IP_BASE'] || "" $minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" } # Determine the OS platform to use diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index 6208ffbed3e..c0f69bc0257 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -31,14 +31,14 @@ export MASTER_NAME="${INSTANCE_PREFIX}-master" REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} # Map out the IPs, names and container subnets of each minion -export MINION_IP_BASE=${MINION_IP_BASE-"10.245.1."} +export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."} NODE_CONTAINER_SUBNET_BASE="10.246" MASTER_CONTAINER_NETMASK="255.255.255.0" MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" for ((i=0; i < NUM_MINIONS; i++)) do - MINION_IPS[$i]="${MINION_IP_BASE}$((i+3))" + NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))" MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index da94a72cf72..fc5176dcb9e 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -70,7 +70,7 @@ fi # Setup hosts file to support ping by hostname to each minion in the cluster from apiserver for (( i=0; i<${#MINION_NAMES[@]}; i++)); do minion=${MINION_NAMES[$i]} - ip=${MINION_IPS[$i]} + ip=${NODE_IPS[$i]} if [ ! "$(cat /etc/hosts | grep $minion)" ]; then echo "Adding $minion to hosts file" echo "$ip $minion" >> /etc/hosts diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-minion.sh index f0cebe264ae..d8befaa7026 100755 --- a/cluster/vagrant/provision-minion.sh +++ b/cluster/vagrant/provision-minion.sh @@ -94,12 +94,12 @@ if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then echo "Adding $MASTER_NAME to hosts file" echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts fi -echo "$MINION_IP $MINION_NAME" >> /etc/hosts +echo "$NODE_IP $MINION_NAME" >> /etc/hosts # Setup hosts file to support ping by hostname to each minion in the cluster for (( i=0; i<${#MINION_NAMES[@]}; i++)); do minion=${MINION_NAMES[$i]} - ip=${MINION_IPS[$i]} + ip=${NODE_IPS[$i]} if [ ! "$(cat /etc/hosts | grep $minion)" ]; then echo "Adding $minion to hosts file" echo "$ip $minion" >> /etc/hosts @@ -145,13 +145,13 @@ cat </etc/salt/minion.d/grains.conf grains: cloud: vagrant network_mode: openvswitch - node_ip: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' + node_ip: '$(echo "$NODE_IP" | sed -e "s/'/''/g")' api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' roles: - kubernetes-pool cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")' - hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' + hostname_override: '$(echo "$NODE_IP" | sed -e "s/'/''/g")' docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' EOF diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index b6085138ec1..9a6198db147 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -28,7 +28,7 @@ function detect-master () { # Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] function detect-minions { echo "Minions already detected" 1>&2 - KUBE_NODE_IP_ADDRESSES=("${MINION_IPS[@]}") + KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}") } # Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so @@ -125,7 +125,7 @@ function create-provision-scripts { echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" echo "MASTER_IP='${MASTER_IP}'" echo "MINION_NAMES=(${MINION_NAMES[@]})" - echo "MINION_IPS=(${MINION_IPS[@]})" + echo "NODE_IPS=(${NODE_IPS[@]})" echo "NODE_IP='${MASTER_IP}'" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" @@ -170,10 +170,10 @@ function create-provision-scripts { echo "MASTER_IP='${MASTER_IP}'" echo "MINION_NAMES=(${MINION_NAMES[@]})" echo "MINION_NAME=(${MINION_NAMES[$i]})" - echo "MINION_IPS=(${MINION_IPS[@]})" - echo "MINION_IP='${MINION_IPS[$i]}'" + echo "NODE_IPS=(${NODE_IPS[@]})" + echo "NODE_IP='${NODE_IPS[$i]}'" echo "NODE_ID='$i'" - echo "NODE_IP='${MINION_IPS[$i]}'" + echo "NODE_IP='${NODE_IPS[$i]}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'" @@ -242,13 +242,13 @@ function verify-cluster { echo echo "Waiting for each minion to be registered with cloud provider" - for (( i=0; i<${#MINION_IPS[@]}; i++)); do - local machine="${MINION_IPS[$i]}" + for (( i=0; i<${#NODE_IPS[@]}; i++)); do + local machine="${NODE_IPS[$i]}" local count="0" until [[ "$count" == "1" ]]; do local minions minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o go-template='{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1) - count=$(echo $minions | grep -c "${MINION_IPS[i]}") || { + count=$(echo $minions | grep -c "${NODE_IPS[i]}") || { printf "." sleep 2 count="0" @@ -339,7 +339,7 @@ function test-teardown { # Find the minion name based on the IP address function find-vagrant-name-by-ip { local ip="$1" - local ip_pattern="${MINION_IP_BASE}(.*)" + local ip_pattern="${NODE_IP_BASE}(.*)" # This is subtle. We map 10.245.2.2 -> minion-1. We do this by matching a # regexp and using the capture to construct the name. diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index 8ef419e5022..43cfbc224f0 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -27,8 +27,8 @@ MASTER_MEMORY_MB=1024 MASTER_CPU=1 MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) -MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) -MINION_MEMORY_MB=2048 +NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) +NODE_MEMORY_MB=2048 NODE_CPU=1 SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index bf9c68b05e7..39ca55d7233 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -27,8 +27,8 @@ MASTER_MEMORY_MB=1024 MASTER_CPU=1 MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) -MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) -MINION_MEMORY_MB=1024 +NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) +NODE_MEMORY_MB=1024 NODE_CPU=1 SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/templates/salt-minion.sh b/cluster/vsphere/templates/salt-minion.sh index 2f7dcafc915..3a02aa2bbfc 100755 --- a/cluster/vsphere/templates/salt-minion.sh +++ b/cluster/vsphere/templates/salt-minion.sh @@ -41,7 +41,7 @@ grains: roles: - kubernetes-pool - kubernetes-pool-vsphere - cbr-cidr: $MINION_IP_RANGE + cbr-cidr: $NODE_IP_RANGE EOF # Install Salt diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index e28608137f1..30616366679 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -273,12 +273,12 @@ function kube-up { grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" echo "KUBE_MASTER=${KUBE_MASTER}" echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" - echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" + echo "NODE_IP_RANGE=${NODE_IP_RANGES[$i]}" grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh" ) > "${KUBE_TEMP}/minion-start-${i}.sh" ( - kube-up-vm "${MINION_NAMES[$i]}" -c ${NODE_CPU-1} -m ${MINION_MEMORY_MB-1024} + kube-up-vm "${MINION_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024} kube-run "${MINION_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" ) & done diff --git a/docs/design/networking.md b/docs/design/networking.md index 56009d5b85b..3259a83a67f 100644 --- a/docs/design/networking.md +++ b/docs/design/networking.md @@ -134,7 +134,7 @@ Example of GCE's advanced routing rules: ```sh gcloud compute routes add "${MINION_NAMES[$i]}" \ --project "${PROJECT}" \ - --destination-range "${MINION_IP_RANGES[$i]}" \ + --destination-range "${NODE_IP_RANGES[$i]}" \ --network "${NETWORK}" \ --next-hop-instance "${MINION_NAMES[$i]}" \ --next-hop-instance-zone "${ZONE}" & From fc04b550883aac3ce9319f9b80a35229f89e5548 Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:04:40 -0800 Subject: [PATCH 07/14] Minion->Node rename: NODE_NAMES, NODE_NAME, NODE_PORT --- cluster/aws/util.sh | 14 +++++----- cluster/centos/master/scripts/apiserver.sh | 4 +-- cluster/centos/node/scripts/kubelet.sh | 4 +-- cluster/gce/upgrade.sh | 6 ++--- cluster/gce/util.sh | 20 +++++++-------- cluster/gke/util.sh | 10 ++++---- cluster/kube-util.sh | 2 +- cluster/mesos/docker/config-default.sh | 2 +- cluster/vagrant/config-default.sh | 2 +- cluster/vagrant/provision-master.sh | 4 +-- cluster/vagrant/provision-minion.sh | 8 +++--- cluster/vagrant/util.sh | 10 ++++---- cluster/vsphere/config-default.sh | 2 +- cluster/vsphere/config-test.sh | 2 +- cluster/vsphere/util.sh | 30 +++++++++++----------- docs/design/networking.md | 4 +-- hack/kube-dump.sh | 2 +- 17 files changed, 63 insertions(+), 63 deletions(-) diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index fe365f59193..1a381f63287 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -187,28 +187,28 @@ function query-running-minions () { function find-running-minions () { NODE_IDS=() - MINION_NAMES=() + NODE_NAMES=() for id in $(query-running-minions "Reservations[].Instances[].InstanceId"); do NODE_IDS+=("${id}") # We use the minion ids as the name - MINION_NAMES+=("${id}") + NODE_NAMES+=("${id}") done } function detect-minions () { find-running-minions - # This is inefficient, but we want MINION_NAMES / NODE_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES + # This is inefficient, but we want NODE_NAMES / NODE_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES KUBE_NODE_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do local minion_ip if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then - minion_ip=$(get_instance_public_ip ${MINION_NAMES[$i]}) + minion_ip=$(get_instance_public_ip ${NODE_NAMES[$i]}) else - minion_ip=$(get_instance_private_ip ${MINION_NAMES[$i]}) + minion_ip=$(get_instance_private_ip ${NODE_NAMES[$i]}) fi - echo "Found minion ${i}: ${MINION_NAMES[$i]} @ ${minion_ip}" + echo "Found minion ${i}: ${NODE_NAMES[$i]} @ ${minion_ip}" KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") done diff --git a/cluster/centos/master/scripts/apiserver.sh b/cluster/centos/master/scripts/apiserver.sh index aebe85964c7..abd8cbc860c 100755 --- a/cluster/centos/master/scripts/apiserver.sh +++ b/cluster/centos/master/scripts/apiserver.sh @@ -38,7 +38,7 @@ KUBE_API_ADDRESS="--address=${MASTER_ADDRESS}" KUBE_API_PORT="--port=8080" # --kubelet-port=10250: Kubelet port -MINION_PORT="--kubelet-port=10250" +NODE_PORT="--kubelet-port=10250" # --allow-privileged=false: If true, allow privileged containers. KUBE_ALLOW_PRIV="--allow-privileged=false" @@ -75,7 +75,7 @@ KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\ \${KUBE_ETCD_SERVERS} \\ \${KUBE_API_ADDRESS} \\ \${KUBE_API_PORT} \\ - \${MINION_PORT} \\ + \${NODE_PORT} \\ \${KUBE_ALLOW_PRIV} \\ \${KUBE_SERVICE_ADDRESSES} \\ \${KUBE_ADMISSION_CONTROL} \\ diff --git a/cluster/centos/node/scripts/kubelet.sh b/cluster/centos/node/scripts/kubelet.sh index 71d780d5433..41192390eb0 100755 --- a/cluster/centos/node/scripts/kubelet.sh +++ b/cluster/centos/node/scripts/kubelet.sh @@ -30,7 +30,7 @@ KUBE_LOG_LEVEL="--v=4" NODE_ADDRESS="--address=${NODE_ADDRESS}" # --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag. -MINION_PORT="--port=10250" +NODE_PORT="--port=10250" # --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}" @@ -49,7 +49,7 @@ EOF KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ \${KUBE_LOG_LEVEL} \\ \${NODE_ADDRESS} \\ - \${MINION_PORT} \\ + \${NODE_PORT} \\ \${NODE_HOSTNAME} \\ \${KUBELET_API_SERVER} \\ \${KUBE_ALLOW_PRIV} \\ diff --git a/cluster/gce/upgrade.sh b/cluster/gce/upgrade.sh index 89ad920d390..bed8a8beb9c 100755 --- a/cluster/gce/upgrade.sh +++ b/cluster/gce/upgrade.sh @@ -121,15 +121,15 @@ function prepare-upgrade() { } -# Reads kube-env metadata from first node in MINION_NAMES. +# Reads kube-env metadata from first node in NODE_NAMES. # # Assumed vars: -# MINION_NAMES +# NODE_NAMES # PROJECT # ZONE function get-node-env() { # TODO(zmerlynn): Make this more reliable with retries. - gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${MINION_NAMES[0]} --command \ + gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${NODE_NAMES[0]} --command \ "curl --fail --silent -H 'Metadata-Flavor: Google' \ 'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null } diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 9845cbb64e0..a0af03d95da 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -214,13 +214,13 @@ function upload-server-tars() { # Assumed vars: # NODE_INSTANCE_PREFIX # Vars set: -# MINION_NAMES +# NODE_NAMES function detect-minion-names { detect-project - MINION_NAMES=($(gcloud compute instance-groups managed list-instances \ + NODE_NAMES=($(gcloud compute instance-groups managed list-instances \ "${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \ --format=yaml | grep instance: | cut -d ' ' -f 2)) - echo "MINION_NAMES=${MINION_NAMES[*]}" >&2 + echo "NODE_NAMES=${NODE_NAMES[*]}" >&2 } # Detect the information about the minions @@ -228,20 +228,20 @@ function detect-minion-names { # Assumed vars: # ZONE # Vars set: -# MINION_NAMES +# NODE_NAMES # KUBE_NODE_IP_ADDRESSES (array) function detect-minions () { detect-project detect-minion-names KUBE_NODE_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \ - "${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \ + "${NODE_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \ --format=text | awk '{ print $2 }') if [[ -z "${minion_ip-}" ]] ; then - echo "Did not find ${MINION_NAMES[$i]}" >&2 + echo "Did not find ${NODE_NAMES[$i]}" >&2 else - echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" + echo "Found ${NODE_NAMES[$i]} at ${minion_ip}" KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") fi done @@ -1105,8 +1105,8 @@ function kube-push { push-master - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - push-node "${MINION_NAMES[$i]}" & + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + push-node "${NODE_NAMES[$i]}" & done wait-for-jobs diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index 50feb9b860f..9a0f127f371 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -171,7 +171,7 @@ function test-setup() { detect-minions >&2 # At this point, CLUSTER_NAME should have been used, so its value is final. - MINION_TAG=$($GCLOUD compute instances describe ${MINION_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1) + MINION_TAG=$($GCLOUD compute instances describe ${NODE_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1) OLD_MINION_TAG="k8s-${CLUSTER_NAME}-node" # Open up port 80 & 8080 so common containers on minions can be reached. @@ -209,7 +209,7 @@ function detect-master() { # Assumed vars: # none # Vars set: -# MINION_NAMES +# NODE_NAMES function detect-minions() { echo "... in gke:detect-minions()" >&2 detect-minion-names @@ -220,16 +220,16 @@ function detect-minions() { # Assumed vars: # none # Vars set: -# MINION_NAMES +# NODE_NAMES function detect-minion-names { echo "... in gke:detect-minion-names()" >&2 detect-project detect-node-instance-group - MINION_NAMES=($(gcloud compute instance-groups managed list-instances \ + NODE_NAMES=($(gcloud compute instance-groups managed list-instances \ "${NODE_INSTANCE_GROUP}" --zone "${ZONE}" --project "${PROJECT}" \ --format=yaml | grep instance: | cut -d ' ' -f 2)) - echo "MINION_NAMES=${MINION_NAMES[*]}" + echo "NODE_NAMES=${NODE_NAMES[*]}" } # Detect instance group name generated by gke diff --git a/cluster/kube-util.sh b/cluster/kube-util.sh index 396ccccf619..d78622cd255 100644 --- a/cluster/kube-util.sh +++ b/cluster/kube-util.sh @@ -26,7 +26,7 @@ function detect-master { # Get minion names if they are not static. function detect-minion-names { - echo "MINION_NAMES: [${MINION_NAMES[*]}]" 1>&2 + echo "NODE_NAMES: [${NODE_NAMES[*]}]" 1>&2 } # Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] diff --git a/cluster/mesos/docker/config-default.sh b/cluster/mesos/docker/config-default.sh index ab2482d0917..2fee7df3537 100755 --- a/cluster/mesos/docker/config-default.sh +++ b/cluster/mesos/docker/config-default.sh @@ -19,7 +19,7 @@ NUM_MINIONS=${NUM_MINIONS:-2} INSTANCE_PREFIX="${INSTANCE_PREFIX:-kubernetes}" MASTER_NAME="${INSTANCE_PREFIX}-master" -MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) SERVICE_CLUSTER_IP_RANGE=10.10.10.0/24 diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index c0f69bc0257..86301b7b4b8 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -39,7 +39,7 @@ MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" for ((i=0; i < NUM_MINIONS; i++)) do NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))" - MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" + NODE_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index fc5176dcb9e..5a16929087b 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -68,8 +68,8 @@ fi # Setup hosts file to support ping by hostname to each minion in the cluster from apiserver -for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - minion=${MINION_NAMES[$i]} +for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + minion=${NODE_NAMES[$i]} ip=${NODE_IPS[$i]} if [ ! "$(cat /etc/hosts | grep $minion)" ]; then echo "Adding $minion to hosts file" diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-minion.sh index d8befaa7026..f5d2927c8cc 100755 --- a/cluster/vagrant/provision-minion.sh +++ b/cluster/vagrant/provision-minion.sh @@ -70,7 +70,7 @@ EOF # Set the host name explicitly # See: https://github.com/mitchellh/vagrant/issues/2430 -hostnamectl set-hostname ${MINION_NAME} +hostnamectl set-hostname ${NODE_NAME} if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=21 ]]; then # Workaround to vagrant inability to guess interface naming sequence @@ -94,11 +94,11 @@ if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then echo "Adding $MASTER_NAME to hosts file" echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts fi -echo "$NODE_IP $MINION_NAME" >> /etc/hosts +echo "$NODE_IP $NODE_NAME" >> /etc/hosts # Setup hosts file to support ping by hostname to each minion in the cluster -for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - minion=${MINION_NAMES[$i]} +for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + minion=${NODE_NAMES[$i]} ip=${NODE_IPS[$i]} if [ ! "$(cat /etc/hosts | grep $minion)" ]; then echo "Adding $minion to hosts file" diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 9a6198db147..5f2464c4529 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -124,7 +124,7 @@ function create-provision-scripts { echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'" echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" echo "MASTER_IP='${MASTER_IP}'" - echo "MINION_NAMES=(${MINION_NAMES[@]})" + echo "NODE_NAMES=(${NODE_NAMES[@]})" echo "NODE_IPS=(${NODE_IPS[@]})" echo "NODE_IP='${MASTER_IP}'" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" @@ -163,13 +163,13 @@ function create-provision-scripts { awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" ) > "${KUBE_TEMP}/master-start.sh" - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do ( echo "#! /bin/bash" echo "MASTER_NAME='${MASTER_NAME}'" echo "MASTER_IP='${MASTER_IP}'" - echo "MINION_NAMES=(${MINION_NAMES[@]})" - echo "MINION_NAME=(${MINION_NAMES[$i]})" + echo "NODE_NAMES=(${NODE_NAMES[@]})" + echo "NODE_NAME=(${NODE_NAMES[$i]})" echo "NODE_IPS=(${NODE_IPS[@]})" echo "NODE_IP='${NODE_IPS[$i]}'" echo "NODE_ID='$i'" @@ -222,7 +222,7 @@ function verify-cluster { # verify each minion has all required daemons local i - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do echo "Validating ${VAGRANT_NODE_NAMES[$i]}" local machine=${VAGRANT_NODE_NAMES[$i]} local -a required_daemon=("salt-minion" "kubelet" "docker") diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index 43cfbc224f0..fb981598f29 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -26,7 +26,7 @@ MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 MASTER_CPU=1 -MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) NODE_MEMORY_MB=2048 NODE_CPU=1 diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index 39ca55d7233..18bedebc9b5 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -26,7 +26,7 @@ MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 MASTER_CPU=1 -MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) NODE_MEMORY_MB=1024 NODE_CPU=1 diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index 30616366679..a003f725ba6 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -45,17 +45,17 @@ function detect-master { # Detect the information about the minions # # Assumed vars: -# MINION_NAMES +# NODE_NAMES # Vars set: # KUBE_NODE_IP_ADDRESS (array) function detect-minions { KUBE_NODE_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]}) + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + local minion_ip=$(govc vm.ip ${NODE_NAMES[$i]}) if [[ -z "${minion_ip-}" ]] ; then - echo "Did not find ${MINION_NAMES[$i]}" >&2 + echo "Did not find ${NODE_NAMES[$i]}" >&2 else - echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" + echo "Found ${NODE_NAMES[$i]} at ${minion_ip}" KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") fi done @@ -266,10 +266,10 @@ function kube-up { echo "Starting minion VMs (this can take a minute)..." - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do ( echo "#! /bin/bash" - echo "readonly MY_NAME=${MINION_NAMES[$i]}" + echo "readonly MY_NAME=${NODE_NAMES[$i]}" grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" echo "KUBE_MASTER=${KUBE_MASTER}" echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" @@ -278,8 +278,8 @@ function kube-up { ) > "${KUBE_TEMP}/minion-start-${i}.sh" ( - kube-up-vm "${MINION_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024} - kube-run "${MINION_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" + kube-up-vm "${NODE_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024} + kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" ) & done @@ -312,8 +312,8 @@ function kube-up { printf " OK\n" local i - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - printf "Waiting for ${MINION_NAMES[$i]} to become available..." + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + printf "Waiting for ${NODE_NAMES[$i]} to become available..." until curl --max-time 5 \ --fail --output /dev/null --silent "http://${KUBE_NODE_IP_ADDRESSES[$i]}:10250/healthz"; do printf "." @@ -347,10 +347,10 @@ function kube-up { # Basic sanity checking local i - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do # Make sure docker is installed kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || { - echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 + echo "Docker failed to install on ${NODE_NAMES[$i]}. Your cluster is unlikely" >&2 echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 echo "cluster. (sorry!)" >&2 exit 1 @@ -372,8 +372,8 @@ function kube-up { function kube-down { govc vm.destroy ${MASTER_NAME} & - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - govc vm.destroy ${MINION_NAMES[i]} & + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + govc vm.destroy ${NODE_NAMES[i]} & done wait diff --git a/docs/design/networking.md b/docs/design/networking.md index 3259a83a67f..b110ca75ade 100644 --- a/docs/design/networking.md +++ b/docs/design/networking.md @@ -132,11 +132,11 @@ differentiate it from `docker0`) is set up outside of Docker proper. Example of GCE's advanced routing rules: ```sh -gcloud compute routes add "${MINION_NAMES[$i]}" \ +gcloud compute routes add "${NODE_NAMES[$i]}" \ --project "${PROJECT}" \ --destination-range "${NODE_IP_RANGES[$i]}" \ --network "${NETWORK}" \ - --next-hop-instance "${MINION_NAMES[$i]}" \ + --next-hop-instance "${NODE_NAMES[$i]}" \ --next-hop-instance-zone "${ZONE}" & ``` diff --git a/hack/kube-dump.sh b/hack/kube-dump.sh index fd6321db343..76cc9a5bf86 100755 --- a/hack/kube-dump.sh +++ b/hack/kube-dump.sh @@ -32,7 +32,7 @@ source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" detect-project &> /dev/null echo "kube-dump.sh: Getting docker statuses on all nodes..." -ALL_NODES=(${MINION_NAMES[*]} ${MASTER_NAME}) +ALL_NODES=(${NODE_NAMES[*]} ${MASTER_NAME}) for NODE in ${ALL_NODES[*]}; do echo "kube-dump.sh: Node $NODE:" ssh-to-node "${NODE}" ' From 1846cfc12911b8189427722f13985a7015d36a2d Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:05:07 -0800 Subject: [PATCH 08/14] Minion->Node rename: NODE_ROOT_DISK_SIZE, NODE_ROOT_DISK_TYPE, etc NODE_SCOPES --- cluster/aws/config-default.sh | 6 +++--- cluster/aws/config-test.sh | 6 +++--- cluster/aws/util.sh | 2 +- cluster/gce/config-default.sh | 2 +- cluster/gce/config-test.sh | 2 +- cluster/gce/upgrade.sh | 8 ++++---- cluster/gce/util.sh | 8 ++++---- cluster/gke/config-common.sh | 2 +- cluster/gke/util.sh | 4 ++-- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 5a74d1e4a19..5e78219c77f 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -66,13 +66,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} # The minions root EBS volume size (used to house Docker images) -MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}" -MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32} +NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" +NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" -MINION_SCOPES="" +NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 11d164d87ea..5bb475f6162 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -64,13 +64,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} # The minions root EBS volume size (used to house Docker images) -MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}" -MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32} +NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" +NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" -MINION_SCOPES="" +NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 1a381f63287..08b0035b7e9 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -86,7 +86,7 @@ MINION_SG_NAME="kubernetes-minion-${CLUSTER_ID}" # only affects the big storage instance types, which aren't a typical use case right now. BLOCK_DEVICE_MAPPINGS_BASE="{\"DeviceName\": \"/dev/sdc\",\"VirtualName\":\"ephemeral0\"},{\"DeviceName\": \"/dev/sdd\",\"VirtualName\":\"ephemeral1\"},{\"DeviceName\": \"/dev/sde\",\"VirtualName\":\"ephemeral2\"},{\"DeviceName\": \"/dev/sdf\",\"VirtualName\":\"ephemeral3\"}" MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" -NODE_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MINION_ROOT_DISK_SIZE},\"VolumeType\":\"${MINION_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" +NODE_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${NODE_ROOT_DISK_SIZE},\"VolumeType\":\"${NODE_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" # TODO (bburns) Parameterize this for multiple cluster per project diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 84478c4375d..5fb089e06ba 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -43,7 +43,7 @@ MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" -MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" +NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL="${POLL_SLEEP_INTERVAL:-3}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index f981334a7e9..957214fb744 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -44,7 +44,7 @@ MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" -MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" +NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100} diff --git a/cluster/gce/upgrade.sh b/cluster/gce/upgrade.sh index bed8a8beb9c..fa6839eb71b 100755 --- a/cluster/gce/upgrade.sh +++ b/cluster/gce/upgrade.sh @@ -145,7 +145,7 @@ function get-env-val() { # Assumed vars: # KUBE_VERSION -# MINION_SCOPES +# NODE_SCOPES # NODE_INSTANCE_PREFIX # PROJECT # ZONE @@ -167,7 +167,7 @@ function upgrade-nodes() { # # Assumed vars: # KUBE_VERSION -# MINION_SCOPES +# NODE_SCOPES # NODE_INSTANCE_PREFIX # PROJECT # ZONE @@ -188,8 +188,8 @@ function prepare-node-upgrade() { # TODO(zmerlynn): Refactor setting scope flags. local scope_flags= - if [ -n "${MINION_SCOPES}" ]; then - scope_flags="--scopes ${MINION_SCOPES}" + if [ -n "${NODE_SCOPES}" ]; then + scope_flags="--scopes ${NODE_SCOPES}" else scope_flags="--no-scopes" fi diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index a0af03d95da..9ef3184e350 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -663,8 +663,8 @@ function kube-up { # TODO(zmerlynn): Refactor setting scope flags. local scope_flags= - if [ -n "${MINION_SCOPES}" ]; then - scope_flags="--scopes ${MINION_SCOPES}" + if [ -n "${NODE_SCOPES}" ]; then + scope_flags="--scopes ${NODE_SCOPES}" else scope_flags="--no-scopes" fi @@ -1040,8 +1040,8 @@ function prepare-push() { # TODO(zmerlynn): Refactor setting scope flags. local scope_flags= - if [ -n "${MINION_SCOPES}" ]; then - scope_flags="--scopes ${MINION_SCOPES}" + if [ -n "${NODE_SCOPES}" ]; then + scope_flags="--scopes ${NODE_SCOPES}" else scope_flags="--no-scopes" fi diff --git a/cluster/gke/config-common.sh b/cluster/gke/config-common.sh index c46604165bb..4d1aa16088e 100644 --- a/cluster/gke/config-common.sh +++ b/cluster/gke/config-common.sh @@ -27,7 +27,7 @@ FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}" GCLOUD="${GCLOUD:-gcloud}" CMD_GROUP="${CMD_GROUP:-}" GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}" -MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}" +NODE_SCOPES="${NODE_SCOPES:-"compute-rw,storage-ro"}" MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-2}" # WARNING: any new vars added here must correspond to options that can be diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index 9a0f127f371..c4d12da6283 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -113,7 +113,7 @@ function verify-prereqs() { # ZONE # CLUSTER_API_VERSION (optional) # NUM_MINIONS -# MINION_SCOPES +# NODE_SCOPES # MACHINE_TYPE function kube-up() { echo "... in gke:kube-up()" >&2 @@ -145,7 +145,7 @@ function kube-up() { "--project=${PROJECT}" "--num-nodes=${NUM_MINIONS}" "--network=${NETWORK}" - "--scopes=${MINION_SCOPES}" + "--scopes=${NODE_SCOPES}" "--cluster-version=${CLUSTER_API_VERSION}" "--machine-type=${MACHINE_TYPE}" ) From 8431993a444b0cc19fd43f850ef08ef6d08cb090 Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:05:33 -0800 Subject: [PATCH 09/14] Minion->Node rename: NODE_SG_ID, NODE_SG_NAME --- cluster/aws/util.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 08b0035b7e9..52307b83a27 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -79,7 +79,7 @@ if [[ -n "${KUBE_SUBNET_CIDR:-}" ]]; then fi MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}" -MINION_SG_NAME="kubernetes-minion-${CLUSTER_ID}" +NODE_SG_NAME="kubernetes-minion-${CLUSTER_ID}" # Be sure to map all the ephemeral drives. We can specify more than we actually have. # TODO: Actually mount the correct number (especially if we have more), though this is non-trivial, and @@ -228,13 +228,13 @@ function detect-security-groups { echo "Using master security group: ${MASTER_SG_NAME} ${MASTER_SG_ID}" fi fi - if [[ -z "${MINION_SG_ID-}" ]]; then - MINION_SG_ID=$(get_security_group_id "${MINION_SG_NAME}") - if [[ -z "${MINION_SG_ID}" ]]; then + if [[ -z "${NODE_SG_ID-}" ]]; then + NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}") + if [[ -z "${NODE_SG_ID}" ]]; then echo "Could not detect Kubernetes minion security group. Make sure you've launched a cluster with 'kube-up.sh'" exit 1 else - echo "Using minion security group: ${MINION_SG_NAME} ${MINION_SG_ID}" + echo "Using minion security group: ${NODE_SG_NAME} ${NODE_SG_ID}" fi fi } @@ -768,10 +768,10 @@ function kube-up { echo "Creating master security group." create-security-group "${MASTER_SG_NAME}" "Kubernetes security group applied to master nodes" fi - MINION_SG_ID=$(get_security_group_id "${MINION_SG_NAME}") - if [[ -z "${MINION_SG_ID}" ]]; then + NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}") + if [[ -z "${NODE_SG_ID}" ]]; then echo "Creating minion security group." - create-security-group "${MINION_SG_NAME}" "Kubernetes security group applied to minion nodes" + create-security-group "${NODE_SG_NAME}" "Kubernetes security group applied to minion nodes" fi detect-security-groups @@ -780,17 +780,17 @@ function kube-up { authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" # Minions can talk to minions - authorize-security-group-ingress "${MINION_SG_ID}" "--source-group ${MINION_SG_ID} --protocol all" + authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all" # Masters and minions can talk to each other - authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MINION_SG_ID} --protocol all" - authorize-security-group-ingress "${MINION_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" + authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all" + authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" # TODO(justinsb): Would be fairly easy to replace 0.0.0.0/0 in these rules # SSH is open to the world authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" # HTTPS to the master is allowed (for API access) authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0" @@ -1001,7 +1001,7 @@ function start-minions() { --iam-instance-profile ${IAM_PROFILE_NODE} \ --instance-type $MINION_SIZE \ --key-name ${AWS_SSH_KEY_NAME} \ - --security-groups ${MINION_SG_ID} \ + --security-groups ${NODE_SG_ID} \ ${public_ip_option} \ --block-device-mappings "${NODE_BLOCK_DEVICE_MAPPINGS}" \ --user-data "file://${KUBE_TEMP}/minion-user-data" @@ -1330,12 +1330,12 @@ function test-setup { # Open up port 80 & 8080 so common containers on minions can be reached # TODO(roberthbailey): Remove this once we are no longer relying on hostPorts. - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 80 --cidr 0.0.0.0/0" - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 8080 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 80 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 8080 --cidr 0.0.0.0/0" # Open up the NodePort range # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol all --port 30000-32767 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol all --port 30000-32767 --cidr 0.0.0.0/0" echo "test-setup complete" } From bd06c19aa8848d0ec1c4a0e6175635fa88626212 Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:05:51 -0800 Subject: [PATCH 10/14] Minion->Node rename: NODE_SIZE --- cluster/aws/config-default.sh | 10 +++++----- cluster/aws/config-test.sh | 10 +++++----- cluster/aws/options.md | 4 ++-- cluster/aws/util.sh | 2 +- cluster/gce/config-default.sh | 2 +- cluster/gce/config-test.sh | 2 +- cluster/gce/util.sh | 2 +- docs/getting-started-guides/aws.md | 2 +- hack/jenkins/e2e.sh | 20 ++++++++++---------- 9 files changed, 27 insertions(+), 27 deletions(-) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 5e78219c77f..ec3644a6e1f 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -16,17 +16,17 @@ ZONE=${KUBE_AWS_ZONE:-us-west-2a} MASTER_SIZE=${MASTER_SIZE:-} -MINION_SIZE=${MINION_SIZE:-} +NODE_SIZE=${NODE_SIZE:-} NUM_MINIONS=${NUM_MINIONS:-4} # Dynamically set node sizes so that Heapster has enough space to run -if [[ -z ${MINION_SIZE} ]]; then +if [[ -z ${NODE_SIZE} ]]; then if (( ${NUM_MINIONS} < 50 )); then - MINION_SIZE="t2.micro" + NODE_SIZE="t2.micro" elif (( ${NUM_MINIONS} < 150 )); then - MINION_SIZE="t2.small" + NODE_SIZE="t2.small" else - MINION_SIZE="t2.medium" + NODE_SIZE="t2.medium" fi fi diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 5bb475f6162..501368ddd6b 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -17,17 +17,17 @@ ZONE=${KUBE_AWS_ZONE:-us-west-2a} MASTER_SIZE=${MASTER_SIZE:-} -MINION_SIZE=${MINION_SIZE:-} +NODE_SIZE=${NODE_SIZE:-} NUM_MINIONS=${NUM_MINIONS:-2} # Dynamically set node sizes so that Heapster has enough space to run -if [[ -z ${MINION_SIZE} ]]; then +if [[ -z ${NODE_SIZE} ]]; then if (( ${NUM_MINIONS} < 50 )); then - MINION_SIZE="t2.micro" + NODE_SIZE="t2.micro" elif (( ${NUM_MINIONS} < 150 )); then - MINION_SIZE="t2.small" + NODE_SIZE="t2.small" else - MINION_SIZE="t2.medium" + NODE_SIZE="t2.medium" fi fi diff --git a/cluster/aws/options.md b/cluster/aws/options.md index 952b05a92d6..139a055efc0 100644 --- a/cluster/aws/options.md +++ b/cluster/aws/options.md @@ -27,7 +27,7 @@ It is not a bad idea to set AWS_S3_BUCKET to something more human friendly. AWS_S3_REGION is useful for people that want to control their data location, because of regulatory restrictions for example. -**MASTER_SIZE**, **MINION_SIZE** +**MASTER_SIZE**, **NODE_SIZE** The instance type to use for creating the master/minion. Defaults to auto-sizing based on the number of nodes (see below). @@ -35,7 +35,7 @@ For production usage, we recommend bigger instances, for example: ``` export MASTER_SIZE=c4.large -export MINION_SIZE=r3.large +export NODE_SIZE=r3.large ``` If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`. diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 52307b83a27..68c33af3432 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -999,7 +999,7 @@ function start-minions() { --launch-configuration-name ${ASG_NAME} \ --image-id $KUBE_NODE_IMAGE \ --iam-instance-profile ${IAM_PROFILE_NODE} \ - --instance-type $MINION_SIZE \ + --instance-type $NODE_SIZE \ --key-name ${AWS_SSH_KEY_NAME} \ --security-groups ${NODE_SG_ID} \ ${public_ip_option} \ diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 5fb089e06ba..2bf7c436d8a 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -19,7 +19,7 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} -MINION_SIZE=${MINION_SIZE:-n1-standard-2} +NODE_SIZE=${NODE_SIZE:-n1-standard-2} NUM_MINIONS=${NUM_MINIONS:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 957214fb744..51a7661dad2 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -19,7 +19,7 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} -MINION_SIZE=${MINION_SIZE:-n1-standard-2} +NODE_SIZE=${NODE_SIZE:-n1-standard-2} NUM_MINIONS=${NUM_MINIONS:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 9ef3184e350..f7f8c82321a 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -377,7 +377,7 @@ function create-node-template { echo "Attempt ${attempt} to create ${1}" >&2 if ! gcloud compute instance-templates create "$template_name" \ --project "${PROJECT}" \ - --machine-type "${MINION_SIZE}" \ + --machine-type "${NODE_SIZE}" \ --boot-disk-type "${NODE_DISK_TYPE}" \ --boot-disk-size "${NODE_DISK_SIZE}" \ --image-project="${NODE_IMAGE_PROJECT}" \ diff --git a/docs/getting-started-guides/aws.md b/docs/getting-started-guides/aws.md index 3c092c1e439..7c7aab9fb67 100644 --- a/docs/getting-started-guides/aws.md +++ b/docs/getting-started-guides/aws.md @@ -84,7 +84,7 @@ You can override the variables defined in [config-default.sh](http://releases.k8 ```bash export KUBE_AWS_ZONE=eu-west-1c export NUM_MINIONS=2 -export MINION_SIZE=m3.medium +export NODE_SIZE=m3.medium export AWS_S3_REGION=eu-west-1 export AWS_S3_BUCKET=mycompany-kubernetes-artifacts export INSTANCE_PREFIX=k8s diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 017cdffd37a..78fa5c96407 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -106,7 +106,7 @@ if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"} else : ${MASTER_SIZE:="m3.large"} - : ${MINION_SIZE:="m3.large"} + : ${NODE_SIZE:="m3.large"} : ${NUM_MINIONS:="3"} fi fi @@ -430,7 +430,7 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins"} # Override GCE defaults. MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-2" + NODE_SIZE="n1-standard-2" NODE_DISK_SIZE="50GB" NUM_MINIONS="100" # Reduce logs verbosity @@ -452,7 +452,7 @@ case ${JOB_NAME} in # Override GCE defaults. E2E_ZONE="us-east1-b" MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-2" + NODE_SIZE="n1-standard-2" NODE_DISK_SIZE="50GB" NUM_MINIONS="100" # Reduce logs verbosity @@ -793,7 +793,7 @@ case ${JOB_NAME} in : ${GINKGO_TEST_ARGS:="--ginkgo.focus=GCE\sL7\sLoadBalancer\sController|Job|Horizontal\spod\sautoscaling"} # At least n1-standard-2 nodes are required for the cluster to # have enough cpu/ram to run the Horizontal pod autoscaling tests. - MINION_SIZE="n1-standard-2" + NODE_SIZE="n1-standard-2" ;; # Sets up the GKE soak cluster weekly using the latest CI release. @@ -808,7 +808,7 @@ case ${JOB_NAME} in : ${E2E_UP:="true"} : ${PROJECT:="kubernetes-jenkins"} # Need at least n1-standard-2 nodes to run kubelet_perf tests - MINION_SIZE="n1-standard-2" + NODE_SIZE="n1-standard-2" ;; # Runs tests on GKE soak cluster. @@ -1417,7 +1417,7 @@ case ${JOB_NAME} in KUBE_GCE_INSTANCE_PREFIX="kubemark100" NUM_MINIONS="10" MASTER_SIZE="n1-standard-2" - MINION_SIZE="n1-standard-1" + NODE_SIZE="n1-standard-1" KUBEMARK_MASTER_SIZE="n1-standard-4" KUBEMARK_NUM_NODES="100" ;; @@ -1435,7 +1435,7 @@ case ${JOB_NAME} in # Override defaults to be indpendent from GCE defaults and set kubemark parameters NUM_MINIONS="6" MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-8" + NODE_SIZE="n1-standard-8" KUBE_GCE_INSTANCE_PREFIX="kubemark500" E2E_ZONE="asia-east1-a" KUBEMARK_MASTER_SIZE="n1-standard-16" @@ -1455,7 +1455,7 @@ case ${JOB_NAME} in # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. NUM_MINIONS="11" MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core + NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core # so NUM_MINIONS x cores_per_minion should # be set accordingly. KUBE_GCE_INSTANCE_PREFIX="kubemark1000" @@ -1486,7 +1486,7 @@ export KUBE_GKE_NETWORK=${E2E_NETWORK} export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-} export DOGFOOD_GCLOUD=${DOGFOOD_GCLOUD:-} export CMD_GROUP=${CMD_GROUP:-} -export MACHINE_TYPE=${MINION_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size +export MACHINE_TYPE=${NODE_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT} @@ -1500,7 +1500,7 @@ export KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER=${ENABLE_HORIZONTAL_POD_AUTOSCALER: export KUBE_ENABLE_DEPLOYMENTS=${ENABLE_DEPLOYMENTS:-} export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-} export MASTER_SIZE=${MASTER_SIZE:-} -export MINION_SIZE=${MINION_SIZE:-} +export NODE_SIZE=${NODE_SIZE:-} export NODE_DISK_SIZE=${NODE_DISK_SIZE:-} export NUM_MINIONS=${NUM_MINIONS:-} export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-} From e67be19a5b172ff10198a522b54d5a617ee41c65 Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:06:00 -0800 Subject: [PATCH 11/14] Minion->Node rename: OLD_NODE_TAG, NODE_TAG --- cluster/aws/config-default.sh | 2 +- cluster/aws/config-test.sh | 2 +- cluster/aws/util.sh | 4 ++-- cluster/gce/config-default.sh | 2 +- cluster/gce/config-test.sh | 2 +- cluster/gce/util.sh | 30 +++++++++++++++--------------- cluster/gke/config-test.sh | 2 +- cluster/gke/util.sh | 10 +++++----- cluster/vsphere/config-default.sh | 2 +- cluster/vsphere/config-test.sh | 2 +- 10 files changed, 29 insertions(+), 29 deletions(-) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index ec3644a6e1f..99cea744bf1 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -71,7 +71,7 @@ NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 501368ddd6b..250955b5e6d 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -69,7 +69,7 @@ NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 68c33af3432..ed2905e1ffa 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -181,7 +181,7 @@ function query-running-minions () { Name=vpc-id,Values=${VPC_ID} \ Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ Name=tag:aws:autoscaling:groupName,Values=${ASG_NAME} \ - Name=tag:Role,Values=${MINION_TAG} \ + Name=tag:Role,Values=${NODE_TAG} \ --query ${query} } @@ -1014,7 +1014,7 @@ function start-minions() { --max-size ${NUM_MINIONS} \ --vpc-zone-identifier ${SUBNET_ID} \ --tags ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Name,Value=${NODE_INSTANCE_PREFIX} \ - ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${MINION_TAG} \ + ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${NODE_TAG} \ ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=KubernetesCluster,Value=${CLUSTER_ID} # Wait for the minions to be running diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 2bf7c436d8a..d55c106f013 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -40,7 +40,7 @@ NETWORK=${KUBE_GCE_NETWORK:-default} INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 51a7661dad2..5bc3487dce2 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -41,7 +41,7 @@ NETWORK=${KUBE_GCE_NETWORK:-e2e} INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index f7f8c82321a..7a9a8b56e21 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -382,7 +382,7 @@ function create-node-template { --boot-disk-size "${NODE_DISK_SIZE}" \ --image-project="${NODE_IMAGE_PROJECT}" \ --image "${NODE_IMAGE}" \ - --tags "${MINION_TAG}" \ + --tags "${NODE_TAG}" \ --network "${NETWORK}" \ ${preemptible_minions} \ $2 \ @@ -649,7 +649,7 @@ function kube-up { create-master-instance "${MASTER_RESERVED_IP}" & # Create a single firewall rule for all minions. - create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" & + create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" & # Report logging choice (if any). if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then @@ -877,11 +877,11 @@ function kube-down { fi # Delete firewall rule for minions. - if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then + if gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-all" &>/dev/null; then gcloud compute firewall-rules delete \ --project "${PROJECT}" \ --quiet \ - "${MINION_TAG}-all" + "${NODE_TAG}-all" fi # Delete routes. @@ -989,7 +989,7 @@ function check-resources { return 1 fi - if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then + if gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-all" &>/dev/null; then KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all" return 1 fi @@ -1153,34 +1153,34 @@ function test-setup { local start=`date +%s` gcloud compute firewall-rules create \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG}" \ + --target-tags "${NODE_TAG}" \ --allow tcp:80,tcp:8080 \ --network "${NETWORK}" \ - "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true + "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true # As there is no simple way to wait longer for this operation we need to manually # wait some additional time (20 minutes altogether). - until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] + until gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] do sleep 5 done # Check if the firewall rule exists and fail if it does not. - gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" + gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" # Open up the NodePort range # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. start=`date +%s` gcloud compute firewall-rules create \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG}" \ + --target-tags "${NODE_TAG}" \ --allow tcp:30000-32767,udp:30000-32767 \ --network "${NETWORK}" \ - "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true + "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true # As there is no simple way to wait longer for this operation we need to manually # wait some additional time (20 minutes altogether). - until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] + until gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] do sleep 5 done # Check if the firewall rule exists and fail if it does not. - gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" + gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" } # Execute after running tests to perform any required clean-up. This is called @@ -1191,11 +1191,11 @@ function test-teardown { gcloud compute firewall-rules delete \ --project "${PROJECT}" \ --quiet \ - "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true + "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" || true gcloud compute firewall-rules delete \ --project "${PROJECT}" \ --quiet \ - "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" || true + "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" || true "${KUBE_ROOT}/cluster/kube-down.sh" } diff --git a/cluster/gke/config-test.sh b/cluster/gke/config-test.sh index 92679ecbdf2..d29e2250039 100644 --- a/cluster/gke/config-test.sh +++ b/cluster/gke/config-test.sh @@ -17,7 +17,7 @@ # The following are test-specific settings. CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}" NETWORK=${KUBE_GKE_NETWORK:-e2e} -MINION_TAG="k8s-${CLUSTER_NAME}-node" +NODE_TAG="k8s-${CLUSTER_NAME}-node" # For ease of maintenance, extract any pieces that do not vary between default diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index c4d12da6283..03addc27acd 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -163,7 +163,7 @@ function kube-up() { # GCLOUD # ZONE # Vars set: -# MINION_TAG +# NODE_TAG function test-setup() { echo "... in gke:test-setup()" >&2 # Detect the project into $PROJECT if it isn't set @@ -171,22 +171,22 @@ function test-setup() { detect-minions >&2 # At this point, CLUSTER_NAME should have been used, so its value is final. - MINION_TAG=$($GCLOUD compute instances describe ${NODE_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1) - OLD_MINION_TAG="k8s-${CLUSTER_NAME}-node" + NODE_TAG=$($GCLOUD compute instances describe ${NODE_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1) + OLD_NODE_TAG="k8s-${CLUSTER_NAME}-node" # Open up port 80 & 8080 so common containers on minions can be reached. "${GCLOUD}" compute firewall-rules create \ "${CLUSTER_NAME}-http-alt" \ --allow tcp:80,tcp:8080 \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG},${OLD_MINION_TAG}" \ + --target-tags "${NODE_TAG},${OLD_NODE_TAG}" \ --network="${NETWORK}" "${GCLOUD}" compute firewall-rules create \ "${CLUSTER_NAME}-nodeports" \ --allow tcp:30000-32767,udp:30000-32767 \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG},${OLD_MINION_TAG}" \ + --target-tags "${NODE_TAG},${OLD_NODE_TAG}" \ --network="${NETWORK}" } diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index fb981598f29..6ef11a521bf 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -20,7 +20,7 @@ GUEST_ID=debian7_64Guest INSTANCE_PREFIX=kubernetes MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index 18bedebc9b5..e2d65854a19 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -20,7 +20,7 @@ GUEST_ID=debian7_64Guest INSTANCE_PREFIX="e2e-test-${USER}" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 From d1dbeb98a0a7c3dfd70d3c4480883612f750fe06 Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:06:18 -0800 Subject: [PATCH 12/14] Minion->Node rename: NUM_NODES_BKP, NUM_NODES_PARALLEL --- hack/jenkins/e2e.sh | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 78fa5c96407..100258d565a 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -87,7 +87,7 @@ if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then KUBERNETES_PROVIDER="gce" : ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_ZONE:="us-central1-f"} - : ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel + : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then KUBERNETES_PROVIDER="gke" @@ -96,7 +96,7 @@ elif [[ ${JOB_NAME} =~ ^kubernetes-.*-aws ]]; then KUBERNETES_PROVIDER="aws" : ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_ZONE:="us-east-1a"} - : ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel + : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel fi if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then @@ -354,7 +354,7 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins-pull"} : ${ENABLE_DEPLOYMENTS:=true} # Override GCE defaults - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_MINIONS=${NUM_NODES_PARALLEL} ;; # Runs all non-flaky tests on GCE in parallel. @@ -373,7 +373,7 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins"} : ${ENABLE_DEPLOYMENTS:=true} # Override GCE defaults - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_MINIONS=${NUM_NODES_PARALLEL} ;; # Runs all non-flaky tests on AWS in parallel. @@ -390,7 +390,7 @@ case ${JOB_NAME} in )"} : ${ENABLE_DEPLOYMENTS:=true} # Override AWS defaults. - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_MINIONS=${NUM_NODES_PARALLEL} ;; # Runs the flaky tests on GCE in parallel. @@ -409,7 +409,7 @@ case ${JOB_NAME} in : ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"} : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} # Override GCE defaults. - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_MINIONS=${NUM_NODES_PARALLEL} ;; # Runs only the reboot tests on GCE. @@ -1715,7 +1715,7 @@ fi ### Start Kubemark ### if [[ "${USE_KUBEMARK:-}" == "true" ]]; then export RUN_FROM_DISTRO=true - NUM_MINIONS_BKP=${NUM_MINIONS} + NUM_NODES_BKP=${NUM_MINIONS} MASTER_SIZE_BKP=${MASTER_SIZE} ./test/kubemark/stop-kubemark.sh NUM_MINIONS=${KUBEMARK_NUM_NODES:-$NUM_MINIONS} @@ -1723,10 +1723,10 @@ if [[ "${USE_KUBEMARK:-}" == "true" ]]; then ./test/kubemark/start-kubemark.sh ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false" ./test/kubemark/stop-kubemark.sh - NUM_MINIONS=${NUM_MINIONS_BKP} + NUM_MINIONS=${NUM_NODES_BKP} MASTER_SIZE=${MASTER_SIZE_BKP} unset RUN_FROM_DISTRO - unset NUM_MINIONS_BKP + unset NUM_NODES_BKP unset MASTER_SIZE_BKP fi From 53172a5356c9f3268ae4604745a1aba374c9ea6c Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:06:36 -0800 Subject: [PATCH 13/14] Minion->Node rename: NUM_NODES --- Vagrantfile | 2 +- cluster/aws/config-default.sh | 12 ++-- cluster/aws/config-test.sh | 12 ++-- cluster/aws/options.md | 2 +- .../templates/create-dynamic-salt-files.sh | 2 +- cluster/aws/util.sh | 6 +- cluster/centos/config-default.sh | 4 +- cluster/gce/config-default.sh | 4 +- cluster/gce/config-test.sh | 4 +- cluster/gce/configure-vm.sh | 2 +- cluster/gce/util.sh | 4 +- cluster/gke/config-common.sh | 2 +- cluster/gke/util.sh | 4 +- cluster/juju/config-test.sh | 2 +- cluster/juju/util.sh | 2 +- cluster/kubemark/config-default.sh | 2 +- cluster/libvirt-coreos/config-default.sh | 10 ++-- cluster/libvirt-coreos/util.sh | 16 +++--- cluster/mesos/docker/config-default.sh | 4 +- cluster/mesos/docker/config-test.sh | 4 +- cluster/mesos/docker/util.sh | 4 +- cluster/options.md | 2 +- cluster/rackspace/config-default.sh | 6 +- cluster/ubuntu/config-default.sh | 2 +- cluster/vagrant/config-default.sh | 6 +- cluster/vagrant/config-test.sh | 4 +- cluster/vagrant/pod-ip-test.sh | 2 +- cluster/validate-cluster.sh | 2 +- cluster/vsphere/config-default.sh | 6 +- cluster/vsphere/config-test.sh | 6 +- docs/admin/cluster-large.md | 2 +- docs/design/aws_under_the_hood.md | 2 +- docs/devel/developer-guides/vagrant.md | 6 +- docs/devel/kubemark-guide.md | 2 +- docs/getting-started-guides/aws.md | 4 +- docs/getting-started-guides/libvirt-coreos.md | 4 +- docs/getting-started-guides/rackspace.md | 2 +- docs/getting-started-guides/ubuntu.md | 4 +- docs/getting-started-guides/vagrant.md | 4 +- examples/celery-rabbitmq/README.md | 2 +- examples/runtime-constraints/README.md | 2 +- hack/conformance-test.sh | 6 +- hack/ginkgo-e2e.sh | 2 +- hack/jenkins/e2e.sh | 56 +++++++++---------- test/kubemark/start-kubemark.sh | 4 +- 45 files changed, 121 insertions(+), 121 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 514179fe73c..55612548fb4 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -18,7 +18,7 @@ END end # The number of minions to provision -$num_minion = (ENV['NUM_MINIONS'] || 1).to_i +$num_minion = (ENV['NUM_NODES'] || 1).to_i # ip configuration $master_ip = ENV['MASTER_IP'] diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 99cea744bf1..95f9eb4f9d4 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -17,13 +17,13 @@ ZONE=${KUBE_AWS_ZONE:-us-west-2a} MASTER_SIZE=${MASTER_SIZE:-} NODE_SIZE=${NODE_SIZE:-} -NUM_MINIONS=${NUM_MINIONS:-4} +NUM_NODES=${NUM_NODES:-4} # Dynamically set node sizes so that Heapster has enough space to run if [[ -z ${NODE_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then + if (( ${NUM_NODES} < 50 )); then NODE_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then + elif (( ${NUM_NODES} < 150 )); then NODE_SIZE="t2.small" else NODE_SIZE="t2.medium" @@ -33,9 +33,9 @@ fi # Dynamically set the master size by the number of nodes, these are guesses # TODO: gather some data if [[ -z ${MASTER_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then + if (( ${NUM_NODES} < 50 )); then MASTER_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then + elif (( ${NUM_NODES} < 150 )); then MASTER_SIZE="t2.small" else MASTER_SIZE="t2.medium" @@ -121,7 +121,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then # TODO: actually configure ASG or similar AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 250955b5e6d..2512c8dbeff 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -18,13 +18,13 @@ ZONE=${KUBE_AWS_ZONE:-us-west-2a} MASTER_SIZE=${MASTER_SIZE:-} NODE_SIZE=${NODE_SIZE:-} -NUM_MINIONS=${NUM_MINIONS:-2} +NUM_NODES=${NUM_NODES:-2} # Dynamically set node sizes so that Heapster has enough space to run if [[ -z ${NODE_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then + if (( ${NUM_NODES} < 50 )); then NODE_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then + elif (( ${NUM_NODES} < 150 )); then NODE_SIZE="t2.small" else NODE_SIZE="t2.medium" @@ -34,9 +34,9 @@ fi # Dynamically set the master size by the number of nodes, these are guesses # TODO: gather some data if [[ -z ${MASTER_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then + if (( ${NUM_NODES} < 50 )); then MASTER_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then + elif (( ${NUM_NODES} < 150 )); then MASTER_SIZE="t2.small" else MASTER_SIZE="t2.medium" @@ -117,7 +117,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then # TODO: actually configure ASG or similar AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi diff --git a/cluster/aws/options.md b/cluster/aws/options.md index 139a055efc0..ee1a77db31d 100644 --- a/cluster/aws/options.md +++ b/cluster/aws/options.md @@ -38,7 +38,7 @@ export MASTER_SIZE=c4.large export NODE_SIZE=r3.large ``` -If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`. +If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`. In particular for clusters less than 50 nodes it will use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`. diff --git a/cluster/aws/templates/create-dynamic-salt-files.sh b/cluster/aws/templates/create-dynamic-salt-files.sh index 54c81dfc442..e14693019b3 100644 --- a/cluster/aws/templates/create-dynamic-salt-files.sh +++ b/cluster/aws/templates/create-dynamic-salt-files.sh @@ -40,7 +40,7 @@ network_provider: '$(echo "$NETWORK_PROVIDER")' opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG")' opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")' opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")' -num_nodes: $(echo "${NUM_MINIONS}") +num_nodes: $(echo "${NUM_NODES}") e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' EOF diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index ed2905e1ffa..b3dd3ffa421 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -1010,8 +1010,8 @@ function start-minions() { ${AWS_ASG_CMD} create-auto-scaling-group \ --auto-scaling-group-name ${ASG_NAME} \ --launch-configuration-name ${ASG_NAME} \ - --min-size ${NUM_MINIONS} \ - --max-size ${NUM_MINIONS} \ + --min-size ${NUM_NODES} \ + --max-size ${NUM_NODES} \ --vpc-zone-identifier ${SUBNET_ID} \ --tags ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Name,Value=${NODE_INSTANCE_PREFIX} \ ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${NODE_TAG} \ @@ -1022,7 +1022,7 @@ function start-minions() { attempt=0 while true; do find-running-minions > $LOG - if [[ ${#NODE_IDS[@]} == ${NUM_MINIONS} ]]; then + if [[ ${#NODE_IDS[@]} == ${NUM_NODES} ]]; then echo -e " ${color_green}${#NODE_IDS[@]} minions started; ready${color_norm}" break fi diff --git a/cluster/centos/config-default.sh b/cluster/centos/config-default.sh index df04bb4f6bc..116e6f4617c 100755 --- a/cluster/centos/config-default.sh +++ b/cluster/centos/config-default.sh @@ -27,8 +27,8 @@ export NODES=${NODES:-"centos@172.10.0.12 centos@172.10.0.13"} # Number of nodes in your cluster. export NUM_NODES=${NUM_NODES:-2} -# Should be removed when NUM_MINIONS is deprecated in validate-cluster.sh -export NUM_MINIONS=${NUM_NODES} +# Should be removed when NUM_NODES is deprecated in validate-cluster.sh +export NUM_NODES=${NUM_NODES} # By default, the cluster will use the etcd installed on master. export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:4001"} diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index d55c106f013..316c177b1dd 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -20,7 +20,7 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} NODE_SIZE=${NODE_SIZE:-n1-standard-2} -NUM_MINIONS=${NUM_MINIONS:-3} +NUM_NODES=${NUM_NODES:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} @@ -101,7 +101,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" ENABLE_CLUSTER_MONITORING=googleinfluxdb fi diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 5bc3487dce2..6f3274b534b 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -20,7 +20,7 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} NODE_SIZE=${NODE_SIZE:-n1-standard-2} -NUM_MINIONS=${NUM_MINIONS:-3} +NUM_NODES=${NUM_NODES:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} @@ -109,7 +109,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index fab6f6527bb..c71f6fae881 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -285,7 +285,7 @@ opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")' enable_manifest_url: '$(echo "$ENABLE_MANIFEST_URL" | sed -e "s/'/''/g")' manifest_url: '$(echo "$MANIFEST_URL" | sed -e "s/'/''/g")' manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")' -num_nodes: $(echo "${NUM_MINIONS}") +num_nodes: $(echo "${NUM_NODES}") e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' EOF diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 7a9a8b56e21..aed1742c2cb 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -680,7 +680,7 @@ function kube-up { --project "${PROJECT}" \ --zone "${ZONE}" \ --base-instance-name "${NODE_INSTANCE_PREFIX}" \ - --size "${NUM_MINIONS}" \ + --size "${NUM_NODES}" \ --template "$template_name" || true; gcloud compute instance-groups managed wait-until-stable \ "${NODE_INSTANCE_PREFIX}-group" \ @@ -1333,7 +1333,7 @@ KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-}) ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false}) MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-}) MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-}) -NUM_MINIONS: $(yaml-quote ${NUM_MINIONS}) +NUM_NODES: $(yaml-quote ${NUM_NODES}) EOF if [ -n "${APISERVER_TEST_ARGS:-}" ]; then cat >>$file <&2 - export NUM_MINIONS=${#KUBE_NODE_IP_ADDRESSES[@]} + export NUM_NODES=${#KUBE_NODE_IP_ADDRESSES[@]} } function get-password() { diff --git a/cluster/kubemark/config-default.sh b/cluster/kubemark/config-default.sh index babcdd6c4c3..f97df8dfece 100644 --- a/cluster/kubemark/config-default.sh +++ b/cluster/kubemark/config-default.sh @@ -22,7 +22,7 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-4} -NUM_MINIONS=${NUM_MINIONS:-100} +NUM_NODES=${NUM_NODES:-100} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} diff --git a/cluster/libvirt-coreos/config-default.sh b/cluster/libvirt-coreos/config-default.sh index d03fbe47067..93197d064fb 100644 --- a/cluster/libvirt-coreos/config-default.sh +++ b/cluster/libvirt-coreos/config-default.sh @@ -17,8 +17,8 @@ ## Contains configuration values for interacting with the libvirt CoreOS cluster # Number of minions in the cluster -NUM_MINIONS=${NUM_MINIONS:-3} -export NUM_MINIONS +NUM_NODES=${NUM_NODES:-3} +export NUM_NODES # The IP of the master export MASTER_IP="192.168.10.1" @@ -33,18 +33,18 @@ MASTER_CONTAINER_NETMASK="255.255.255.0" MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" -if [[ "$NUM_MINIONS" -gt 253 ]]; then +if [[ "$NUM_NODES" -gt 253 ]]; then echo "ERROR: Because of how IPs are allocated in ${BASH_SOURCE}, you cannot create more than 253 nodes" exit 1 fi -for ((i=0; i < NUM_MINIONS; i++)) do +for ((i=0; i < NUM_NODES; i++)) do NODE_IPS[$i]="${NODE_IP_BASE}$((i+2))" NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" done -NODE_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET +NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh index 4a1c74be1b1..76dea254afb 100644 --- a/cluster/libvirt-coreos/util.sh +++ b/cluster/libvirt-coreos/util.sh @@ -167,8 +167,8 @@ function wait-cluster-readiness { local timeout=120 while [[ $timeout -ne 0 ]]; do nb_ready_nodes=$("${kubectl}" get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true) - echo "Nb ready nodes: $nb_ready_nodes / $NUM_MINIONS" - if [[ "$nb_ready_nodes" -eq "$NUM_MINIONS" ]]; then + echo "Nb ready nodes: $nb_ready_nodes / $NUM_NODES" + if [[ "$nb_ready_nodes" -eq "$NUM_NODES" ]]; then return 0 fi @@ -191,8 +191,8 @@ function kube-up { readonly kubernetes_dir="$POOL_PATH/kubernetes" local i - for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do - if [[ $i -eq $NUM_MINIONS ]]; then + for (( i = 0 ; i <= $NUM_NODES ; i++ )); do + if [[ $i -eq $NUM_NODES ]]; then etcd2_initial_cluster[$i]="${MASTER_NAME}=http://${MASTER_IP}:2380" else etcd2_initial_cluster[$i]="${NODE_NAMES[$i]}=http://${NODE_IPS[$i]}:2380" @@ -201,8 +201,8 @@ function kube-up { etcd2_initial_cluster=$(join , "${etcd2_initial_cluster[@]}") readonly machines=$(join , "${KUBE_NODE_IP_ADDRESSES[@]}") - for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do - if [[ $i -eq $NUM_MINIONS ]]; then + for (( i = 0 ; i <= $NUM_NODES ; i++ )); do + if [[ $i -eq $NUM_NODES ]]; then type=master name=$MASTER_NAME public_ip=$MASTER_IP @@ -262,7 +262,7 @@ function upload-server-tars { function kube-push { kube-push-internal ssh-to-node "$MASTER_NAME" "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler" - for ((i=0; i < NUM_MINIONS; i++)); do + for ((i=0; i < NUM_NODES; i++)); do ssh-to-node "${NODE_NAMES[$i]}" "sudo systemctl restart kubelet kube-proxy" done wait-cluster-readiness @@ -317,7 +317,7 @@ function ssh-to-node { elif [[ "$node" == "$MASTER_NAME" ]]; then machine="$MASTER_IP" else - for ((i=0; i < NUM_MINIONS; i++)); do + for ((i=0; i < NUM_NODES; i++)); do if [[ "$node" == "${NODE_NAMES[$i]}" ]]; then machine="${NODE_IPS[$i]}" break diff --git a/cluster/mesos/docker/config-default.sh b/cluster/mesos/docker/config-default.sh index 2fee7df3537..45f45f9a548 100755 --- a/cluster/mesos/docker/config-default.sh +++ b/cluster/mesos/docker/config-default.sh @@ -16,10 +16,10 @@ ## Contains configuration values for interacting with the mesos/docker cluster -NUM_MINIONS=${NUM_MINIONS:-2} +NUM_NODES=${NUM_NODES:-2} INSTANCE_PREFIX="${INSTANCE_PREFIX:-kubernetes}" MASTER_NAME="${INSTANCE_PREFIX}-master" -NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) SERVICE_CLUSTER_IP_RANGE=10.10.10.0/24 diff --git a/cluster/mesos/docker/config-test.sh b/cluster/mesos/docker/config-test.sh index 4e6261bb4bd..13ab3994f7b 100644 --- a/cluster/mesos/docker/config-test.sh +++ b/cluster/mesos/docker/config-test.sh @@ -15,8 +15,8 @@ # limitations under the License. ## Contains configuration values for interacting with the docker-compose cluster in test mode -#Set NUM_MINIONS to minimum required for testing. -NUM_MINIONS=2 +#Set NUM_NODES to minimum required for testing. +NUM_NODES=2 KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../.. source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/config-default.sh" diff --git a/cluster/mesos/docker/util.sh b/cluster/mesos/docker/util.sh index 20f8f32df44..38d58991235 100644 --- a/cluster/mesos/docker/util.sh +++ b/cluster/mesos/docker/util.sh @@ -283,8 +283,8 @@ function kube-up { echo "Starting ${KUBERNETES_PROVIDER} cluster" 1>&2 cluster::mesos::docker::docker_compose up -d - echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_MINIONS} slaves" - cluster::mesos::docker::docker_compose scale mesosslave=${NUM_MINIONS} + echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_NODES} slaves" + cluster::mesos::docker::docker_compose scale mesosslave=${NUM_NODES} # await-health-check requires GNU timeout # apiserver hostname resolved by docker diff --git a/cluster/options.md b/cluster/options.md index 726941e8a3b..3b2fa150fbb 100644 --- a/cluster/options.md +++ b/cluster/options.md @@ -8,7 +8,7 @@ These options apply across providers. There are additional documents for option This is a work-in-progress; not all options are documented yet! -**NUM_MINIONS** +**NUM_NODES** The number of minion instances to create. Most providers default this to 4. diff --git a/cluster/rackspace/config-default.sh b/cluster/rackspace/config-default.sh index 96652ad059d..e383ebc9726 100755 --- a/cluster/rackspace/config-default.sh +++ b/cluster/rackspace/config-default.sh @@ -16,7 +16,7 @@ # Sane defaults for dev environments. The following variables can be easily overriden # by setting each as a ENV variable ahead of time: -# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME +# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_NODES, NOVA_NETWORK and SSH_KEY_NAME # Shared KUBE_IMAGE="${KUBE_IMAGE-f2a71670-ced3-4274-80b6-0efcd0f8f91b}" # CoreOS(Beta) @@ -32,9 +32,9 @@ MASTER_TAG="tags=${INSTANCE_PREFIX}-master" # Node KUBE_NODE_FLAVOR="${KUBE_NODE_FLAVOR-general1-2}" -NUM_MINIONS="${NUM_MINIONS-4}" +NUM_NODES="${NUM_NODES-4}" NODE_TAG="tags=${INSTANCE_PREFIX}-node" -NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_MINIONS}})) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_NODES}})) KUBE_NETWORK="10.240.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET diff --git a/cluster/ubuntu/config-default.sh b/cluster/ubuntu/config-default.sh index 680157b033b..16b6c65bad5 100755 --- a/cluster/ubuntu/config-default.sh +++ b/cluster/ubuntu/config-default.sh @@ -27,7 +27,7 @@ role=${role:-"ai i i"} export roles=($role) # Define minion numbers -export NUM_MINIONS=${NUM_MINIONS:-3} +export NUM_NODES=${NUM_NODES:-3} # define the IP range used for service cluster IPs. # according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index 86301b7b4b8..d7acde8f18f 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -17,8 +17,8 @@ ## Contains configuration values for interacting with the Vagrant cluster # Number of minions in the cluster -NUM_MINIONS=${NUM_MINIONS-"1"} -export NUM_MINIONS +NUM_NODES=${NUM_NODES-"1"} +export NUM_NODES # The IP of the master export MASTER_IP=${MASTER_IP-"10.245.1.2"} @@ -37,7 +37,7 @@ MASTER_CONTAINER_NETMASK="255.255.255.0" MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" -for ((i=0; i < NUM_MINIONS; i++)) do +for ((i=0; i < NUM_NODES; i++)) do NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))" NODE_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" diff --git a/cluster/vagrant/config-test.sh b/cluster/vagrant/config-test.sh index 8998b533905..e9919743ffd 100644 --- a/cluster/vagrant/config-test.sh +++ b/cluster/vagrant/config-test.sh @@ -15,8 +15,8 @@ # limitations under the License. ## Contains configuration values for interacting with the Vagrant cluster in test mode -#Set NUM_MINIONS to minimum required for testing. -NUM_MINIONS=2 +#Set NUM_NODES to minimum required for testing. +NUM_NODES=2 KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/vagrant/config-default.sh" diff --git a/cluster/vagrant/pod-ip-test.sh b/cluster/vagrant/pod-ip-test.sh index 23031482576..f397ab8c438 100755 --- a/cluster/vagrant/pod-ip-test.sh +++ b/cluster/vagrant/pod-ip-test.sh @@ -59,7 +59,7 @@ cd "${KUBE_ROOT}" echo All verbose output will be redirected to $logfile, use --logfile option to change. printf "Start the cluster with 2 minions .. " -export NUM_MINIONS=2 +export NUM_NODES=2 export KUBERNETES_PROVIDER=vagrant (cluster/kube-up.sh >>"$logfile" 2>&1) || true diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index e3682b219d9..7867d0f1581 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -24,7 +24,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. source "${KUBE_ROOT}/cluster/kube-env.sh" source "${KUBE_ROOT}/cluster/kube-util.sh" -EXPECTED_NUM_NODES="${NUM_MINIONS}" +EXPECTED_NUM_NODES="${NUM_NODES}" if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1)) fi diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index 6ef11a521bf..a1b8ac2107d 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -NUM_MINIONS=4 +NUM_NODES=4 DISK=./kube/kube.vmdk GUEST_ID=debian7_64Guest @@ -26,8 +26,8 @@ MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 MASTER_CPU=1 -NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) -NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) +NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24")) NODE_MEMORY_MB=2048 NODE_CPU=1 diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index e2d65854a19..0a7013fa5d2 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -NUM_MINIONS=2 +NUM_NODES=2 DISK=./kube/kube.vmdk GUEST_ID=debian7_64Guest @@ -26,8 +26,8 @@ MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 MASTER_CPU=1 -NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) -NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) +NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24")) NODE_MEMORY_MB=1024 NODE_CPU=1 diff --git a/docs/admin/cluster-large.md b/docs/admin/cluster-large.md index 350da95e9e8..ceb51ff8dfe 100644 --- a/docs/admin/cluster-large.md +++ b/docs/admin/cluster-large.md @@ -41,7 +41,7 @@ At v1.0, Kubernetes supports clusters up to 100 nodes with 30 pods per node and A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane). -Normally the number of nodes in a cluster is controlled by the the value `NUM_MINIONS` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)). +Normally the number of nodes in a cluster is controlled by the the value `NUM_NODES` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)). Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up. diff --git a/docs/design/aws_under_the_hood.md b/docs/design/aws_under_the_hood.md index 9fe46d6f288..a55c09e31e5 100644 --- a/docs/design/aws_under_the_hood.md +++ b/docs/design/aws_under_the_hood.md @@ -250,7 +250,7 @@ cross-AZ-clusters are more convenient. * For auto-scaling, on each nodes it creates a launch configuration and group. The name for both is <*KUBE_AWS_INSTANCE_PREFIX*>-minion-group. The default name is kubernetes-minion-group. The auto-scaling group has a min and max size - that are both set to NUM_MINIONS. You can change the size of the auto-scaling + that are both set to NUM_NODES. You can change the size of the auto-scaling group to add or remove the total number of nodes from within the AWS API or Console. Each nodes self-configures, meaning that they come up; run Salt with the stored configuration; connect to the master; are assigned an internal CIDR; diff --git a/docs/devel/developer-guides/vagrant.md b/docs/devel/developer-guides/vagrant.md index 291b85bc37c..2d628abb736 100644 --- a/docs/devel/developer-guides/vagrant.md +++ b/docs/devel/developer-guides/vagrant.md @@ -301,7 +301,7 @@ Congratulations! The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`: ```sh -NUM_MINIONS=3 hack/e2e-test.sh +NUM_NODES=3 hack/e2e-test.sh ``` ### Troubleshooting @@ -350,10 +350,10 @@ Are you sure you built a release first? Did you install `net-tools`? For more cl #### I want to change the number of nodes! -You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: +You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so: ```sh -export NUM_MINIONS=1 +export NUM_NODES=1 ``` #### I want my VMs to have more memory! diff --git a/docs/devel/kubemark-guide.md b/docs/devel/kubemark-guide.md index 758963dec7d..df0ecb96271 100644 --- a/docs/devel/kubemark-guide.md +++ b/docs/devel/kubemark-guide.md @@ -73,7 +73,7 @@ To start a Kubemark cluster on GCE you need to create an external cluster (it ca `make quick-release`) and run `test/kubemark/start-kubemark.sh` script. This script will create a VM for master components, Pods for HollowNodes and do all the setup necessary to let them talk to each other. It will use the configuration stored in `cluster/kubemark/config-default.sh` - you can tweak it however you want, but note that some features may not be implemented yet, as implementation of Hollow components/mocks will probably be lagging behind ‘real’ one. For performance tests interesting variables are -`NUM_MINIONS` and `MASTER_SIZE`. After start-kubemark script is finished you’ll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark +`NUM_NODES` and `MASTER_SIZE`. After start-kubemark script is finished you’ll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark cluster is stored in `test/kubemark/kubeconfig.loc`. Currently we're running HollowNode with limit of 0.05 a CPU core and ~60MB or memory, which taking into account default cluster addons and fluentD running on an 'external' diff --git a/docs/getting-started-guides/aws.md b/docs/getting-started-guides/aws.md index 7c7aab9fb67..3e36cfaa8a8 100644 --- a/docs/getting-started-guides/aws.md +++ b/docs/getting-started-guides/aws.md @@ -83,7 +83,7 @@ You can override the variables defined in [config-default.sh](http://releases.k8 ```bash export KUBE_AWS_ZONE=eu-west-1c -export NUM_MINIONS=2 +export NUM_NODES=2 export NODE_SIZE=m3.medium export AWS_S3_REGION=eu-west-1 export AWS_S3_BUCKET=mycompany-kubernetes-artifacts @@ -91,7 +91,7 @@ export INSTANCE_PREFIX=k8s ... ``` -The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`, in particular for clusters less than 50 nodes it will +The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`, in particular for clusters less than 50 nodes it will use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`. It will also try to create or reuse a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion". diff --git a/docs/getting-started-guides/libvirt-coreos.md b/docs/getting-started-guides/libvirt-coreos.md index 7d73a560491..2d3ec2ad050 100644 --- a/docs/getting-started-guides/libvirt-coreos.md +++ b/docs/getting-started-guides/libvirt-coreos.md @@ -167,7 +167,7 @@ cluster/kube-up.sh The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine. -The `NUM_MINIONS` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3. +The `NUM_NODES` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3. The `KUBE_PUSH` environment variable may be set to specify which Kubernetes binaries must be deployed on the cluster. Its possible values are: @@ -225,7 +225,7 @@ export KUBERNETES_PROVIDER=libvirt-coreos Bring up a libvirt-CoreOS cluster of 5 nodes ```sh -NUM_MINIONS=5 cluster/kube-up.sh +NUM_NODES=5 cluster/kube-up.sh ``` Destroy the libvirt-CoreOS cluster diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index 7ad434b4c81..25188d8e347 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -84,7 +84,7 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo - flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network. 2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines (we do not capture the password). 3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems. -4. We then boot as many nodes as defined via `$NUM_MINIONS`. +4. We then boot as many nodes as defined via `$NUM_NODES`. ## Some notes diff --git a/docs/getting-started-guides/ubuntu.md b/docs/getting-started-guides/ubuntu.md index 0b0041714b3..9c5151a1b65 100644 --- a/docs/getting-started-guides/ubuntu.md +++ b/docs/getting-started-guides/ubuntu.md @@ -116,7 +116,7 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223" export role="ai i i" -export NUM_MINIONS=${NUM_MINIONS:-3} +export NUM_NODES=${NUM_NODES:-3} export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 @@ -129,7 +129,7 @@ separated with blank space like ` ` Then the `role` variable defines the role of above machine in the same order, "ai" stands for machine acts as both master and node, "a" stands for master, "i" stands for node. -The `NUM_MINIONS` variable defines the total number of nodes. +The `NUM_NODES` variable defines the total number of nodes. The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service IP range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. diff --git a/docs/getting-started-guides/vagrant.md b/docs/getting-started-guides/vagrant.md index 5e48b7b1fc4..055070dd1d1 100644 --- a/docs/getting-started-guides/vagrant.md +++ b/docs/getting-started-guides/vagrant.md @@ -389,10 +389,10 @@ Log on to one of the nodes (`vagrant ssh node-1`) and inspect the salt minion lo #### I want to change the number of nodes! -You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: +You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so: ```sh -export NUM_MINIONS=1 +export NUM_NODES=1 ``` #### I want my VMs to have more memory! diff --git a/examples/celery-rabbitmq/README.md b/examples/celery-rabbitmq/README.md index d508e460701..5349acea795 100644 --- a/examples/celery-rabbitmq/README.md +++ b/examples/celery-rabbitmq/README.md @@ -57,7 +57,7 @@ At the end of the example, we will have: ## Prerequisites -You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_MINIONS` environment variable to 2 or more). +You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_NODES` environment variable to 2 or more). ## Step 1: Start the RabbitMQ service diff --git a/examples/runtime-constraints/README.md b/examples/runtime-constraints/README.md index 410988e172a..35112f9ac12 100644 --- a/examples/runtime-constraints/README.md +++ b/examples/runtime-constraints/README.md @@ -43,7 +43,7 @@ of compute resources easier to follow by starting with an empty cluster. ``` $ export KUBERNETES_PROVIDER=vagrant -$ export NUM_MINIONS=1 +$ export NUM_NODES=1 $ export KUBE_ENABLE_CLUSTER_MONITORING=none $ export KUBE_ENABLE_CLUSTER_DNS=false $ export KUBE_ENABLE_CLUSTER_UI=false diff --git a/hack/conformance-test.sh b/hack/conformance-test.sh index 60fd725cfb6..ee16a6bb1ae 100755 --- a/hack/conformance-test.sh +++ b/hack/conformance-test.sh @@ -18,7 +18,7 @@ # supports key features for Kubernetes version 1.0. # Instructions: -# - Setup a Kubernetes cluster with $NUM_MINIONS nodes (defined below). +# - Setup a Kubernetes cluster with $NUM_NODES nodes (defined below). # - Provide a Kubeconfig file whose current context is set to the # cluster to be tested, and with suitable auth setting. # - Specify the location of that kubeconfig with, e.g.: @@ -78,10 +78,10 @@ echo "Conformance test checking conformance with Kubernetes version 1.0" # somewhere in the description (i.e. either in the Describe part or the It part). # The list of tagged conformance tests can be retrieved by: # -# NUM_MINIONS=4 KUBERNETES_CONFORMANCE_TEST="y" \ +# NUM_NODES=4 KUBERNETES_CONFORMANCE_TEST="y" \ # hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.dryRun=true declare -x KUBERNETES_CONFORMANCE_TEST="y" -declare -x NUM_MINIONS=4 +declare -x NUM_NODES=4 hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.skip='\[Skipped\]' exit $? diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index 3cffaab44af..00069250390 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -102,7 +102,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}" --cluster-tag="${CLUSTER_ID:-}" \ --repo-root="${KUBE_VERSION_ROOT}" \ --node-instance-group="${NODE_INSTANCE_GROUP:-}" \ - --num-nodes="${NUM_MINIONS:-}" \ + --num-nodes="${NUM_NODES:-}" \ --prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \ ${E2E_CLEAN_START:+"--clean-start=true"} \ ${E2E_MIN_STARTUP_PODS:+"--minStartupPods=${E2E_MIN_STARTUP_PODS}"} \ diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 100258d565a..b2c36bb7d21 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -102,12 +102,12 @@ fi if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then if [[ "${PERFORMANCE:-}" == "true" ]]; then : ${MASTER_SIZE:="m3.xlarge"} - : ${NUM_MINIONS:="100"} + : ${NUM_NODES:="100"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"} else : ${MASTER_SIZE:="m3.large"} : ${NODE_SIZE:="m3.large"} - : ${NUM_MINIONS:="3"} + : ${NUM_NODES:="3"} fi fi @@ -354,7 +354,7 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins-pull"} : ${ENABLE_DEPLOYMENTS:=true} # Override GCE defaults - NUM_MINIONS=${NUM_NODES_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs all non-flaky tests on GCE in parallel. @@ -373,7 +373,7 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins"} : ${ENABLE_DEPLOYMENTS:=true} # Override GCE defaults - NUM_MINIONS=${NUM_NODES_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs all non-flaky tests on AWS in parallel. @@ -390,7 +390,7 @@ case ${JOB_NAME} in )"} : ${ENABLE_DEPLOYMENTS:=true} # Override AWS defaults. - NUM_MINIONS=${NUM_NODES_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs the flaky tests on GCE in parallel. @@ -409,7 +409,7 @@ case ${JOB_NAME} in : ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"} : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} # Override GCE defaults. - NUM_MINIONS=${NUM_NODES_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs only the reboot tests on GCE. @@ -432,7 +432,7 @@ case ${JOB_NAME} in MASTER_SIZE="n1-standard-4" NODE_SIZE="n1-standard-2" NODE_DISK_SIZE="50GB" - NUM_MINIONS="100" + NUM_NODES="100" # Reduce logs verbosity TEST_CLUSTER_LOG_LEVEL="--v=2" # Increase resync period to simulate production @@ -454,7 +454,7 @@ case ${JOB_NAME} in MASTER_SIZE="n1-standard-4" NODE_SIZE="n1-standard-2" NODE_DISK_SIZE="50GB" - NUM_MINIONS="100" + NUM_NODES="100" # Reduce logs verbosity TEST_CLUSTER_LOG_LEVEL="--v=2" # Increase resync period to simulate production @@ -1195,7 +1195,7 @@ case ${JOB_NAME} in : ${E2E_UP:="true"} : ${E2E_TEST:="false"} : ${E2E_DOWN:="false"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-gce-step2-upgrade-master) @@ -1208,7 +1208,7 @@ case ${JOB_NAME} in : ${E2E_TEST:="true"} : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1230,7 +1230,7 @@ case ${JOB_NAME} in ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-gce-step4-upgrade-cluster) @@ -1243,7 +1243,7 @@ case ${JOB_NAME} in : ${E2E_TEST:="true"} : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1263,7 +1263,7 @@ case ${JOB_NAME} in ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-gce-step6-e2e-new) @@ -1282,7 +1282,7 @@ case ${JOB_NAME} in ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \ )"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; # kubernetes-upgrade-gce-1.0-current-release @@ -1305,7 +1305,7 @@ case ${JOB_NAME} in : ${E2E_TEST:="false"} : ${E2E_DOWN:="false"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-1.0-current-release-gce-step2-upgrade-master) @@ -1321,7 +1321,7 @@ case ${JOB_NAME} in : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1342,7 +1342,7 @@ case ${JOB_NAME} in ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-1.0-current-release-gce-step4-upgrade-cluster) @@ -1358,7 +1358,7 @@ case ${JOB_NAME} in : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1379,7 +1379,7 @@ case ${JOB_NAME} in ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-1.0-current-release-gce-step6-e2e-new) @@ -1400,7 +1400,7 @@ case ${JOB_NAME} in ${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; # Run Kubemark test on a fake 100 node cluster to have a comparison @@ -1415,7 +1415,7 @@ case ${JOB_NAME} in : ${USE_KUBEMARK:="true"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters KUBE_GCE_INSTANCE_PREFIX="kubemark100" - NUM_MINIONS="10" + NUM_NODES="10" MASTER_SIZE="n1-standard-2" NODE_SIZE="n1-standard-1" KUBEMARK_MASTER_SIZE="n1-standard-4" @@ -1433,7 +1433,7 @@ case ${JOB_NAME} in : ${E2E_TEST:="false"} : ${USE_KUBEMARK:="true"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters - NUM_MINIONS="6" + NUM_NODES="6" MASTER_SIZE="n1-standard-4" NODE_SIZE="n1-standard-8" KUBE_GCE_INSTANCE_PREFIX="kubemark500" @@ -1453,10 +1453,10 @@ case ${JOB_NAME} in : ${USE_KUBEMARK:="true"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. - NUM_MINIONS="11" + NUM_NODES="11" MASTER_SIZE="n1-standard-4" NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core - # so NUM_MINIONS x cores_per_minion should + # so NUM_NODES x cores_per_minion should # be set accordingly. KUBE_GCE_INSTANCE_PREFIX="kubemark1000" E2E_ZONE="asia-east1-a" @@ -1502,7 +1502,7 @@ export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-} export MASTER_SIZE=${MASTER_SIZE:-} export NODE_SIZE=${NODE_SIZE:-} export NODE_DISK_SIZE=${NODE_DISK_SIZE:-} -export NUM_MINIONS=${NUM_MINIONS:-} +export NUM_NODES=${NUM_NODES:-} export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-} export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-} export PROJECT=${PROJECT:-} @@ -1715,15 +1715,15 @@ fi ### Start Kubemark ### if [[ "${USE_KUBEMARK:-}" == "true" ]]; then export RUN_FROM_DISTRO=true - NUM_NODES_BKP=${NUM_MINIONS} + NUM_NODES_BKP=${NUM_NODES} MASTER_SIZE_BKP=${MASTER_SIZE} ./test/kubemark/stop-kubemark.sh - NUM_MINIONS=${KUBEMARK_NUM_NODES:-$NUM_MINIONS} + NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES} MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE} ./test/kubemark/start-kubemark.sh ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false" ./test/kubemark/stop-kubemark.sh - NUM_MINIONS=${NUM_NODES_BKP} + NUM_NODES=${NUM_NODES_BKP} MASTER_SIZE=${MASTER_SIZE_BKP} unset RUN_FROM_DISTRO unset NUM_NODES_BKP diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index ddcbf65f0dd..6db0c5f378a 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -205,7 +205,7 @@ contexts: current-context: kubemark-context EOF -sed "s/##numreplicas##/${NUM_MINIONS:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json +sed "s/##numreplicas##/${NUM_NODES:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json sed -i'' -e "s/##project##/${PROJECT}/g" ${KUBE_ROOT}/test/kubemark/hollow-node.json kubectl create -f ${KUBE_ROOT}/test/kubemark/kubemark-ns.json kubectl create -f ${KUBECONFIG_SECRET} --namespace="kubemark" @@ -215,7 +215,7 @@ rm ${KUBECONFIG_SECRET} echo "Waiting for all HollowNodes to become Running..." echo "This can loop forever if something crashed." -until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_MINIONS}" ]]; do +until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_NODES}" ]]; do echo -n . sleep 1 done From a506030e63b2254e9c16ac9a68cabd51abd3bbbf Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Mon, 23 Nov 2015 19:06:47 -0800 Subject: [PATCH 14/14] Minion->Node rename: PREEMPTIBLE_NODE --- cluster/gce/config-default.sh | 2 +- cluster/gce/config-test.sh | 2 +- cluster/gce/util.sh | 2 +- cluster/kubemark/config-default.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 316c177b1dd..0556904b736 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -26,7 +26,7 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} -PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 6f3274b534b..e4fe0d5693b 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -27,7 +27,7 @@ NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} KUBE_APISERVER_REQUEST_TIMEOUT=300 -PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index aed1742c2cb..0705906f621 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -370,7 +370,7 @@ function create-node-template { local attempt=1 local preemptible_minions="" - if [[ "${PREEMPTIBLE_MINION}" == "true" ]]; then + if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then preemptible_minions="--preemptible --maintenance-policy TERMINATE" fi while true; do diff --git a/cluster/kubemark/config-default.sh b/cluster/kubemark/config-default.sh index f97df8dfece..9bc5392ad23 100644 --- a/cluster/kubemark/config-default.sh +++ b/cluster/kubemark/config-default.sh @@ -26,7 +26,7 @@ NUM_NODES=${NUM_NODES:-100} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} -PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103}