diff --git a/Vagrantfile b/Vagrantfile index 709799dbe9f..55612548fb4 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -18,11 +18,11 @@ END end # The number of minions to provision -$num_minion = (ENV['NUM_MINIONS'] || 1).to_i +$num_minion = (ENV['NUM_NODES'] || 1).to_i # ip configuration $master_ip = ENV['MASTER_IP'] -$minion_ip_base = ENV['MINION_IP_BASE'] || "" +$minion_ip_base = ENV['NODE_IP_BASE'] || "" $minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" } # Determine the OS platform to use @@ -105,7 +105,7 @@ end # When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. # This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) $vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i -$vm_minion_mem = (ENV['KUBERNETES_MINION_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i +$vm_minion_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| def setvmboxandurl(config, provider) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 7013eec1873..95f9eb4f9d4 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -16,26 +16,26 @@ ZONE=${KUBE_AWS_ZONE:-us-west-2a} MASTER_SIZE=${MASTER_SIZE:-} -MINION_SIZE=${MINION_SIZE:-} -NUM_MINIONS=${NUM_MINIONS:-4} +NODE_SIZE=${NODE_SIZE:-} +NUM_NODES=${NUM_NODES:-4} # Dynamically set node sizes so that Heapster has enough space to run -if [[ -z ${MINION_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then - MINION_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then - MINION_SIZE="t2.small" +if [[ -z ${NODE_SIZE} ]]; then + if (( ${NUM_NODES} < 50 )); then + NODE_SIZE="t2.micro" + elif (( ${NUM_NODES} < 150 )); then + NODE_SIZE="t2.small" else - MINION_SIZE="t2.medium" + NODE_SIZE="t2.medium" fi fi # Dynamically set the master size by the number of nodes, these are guesses # TODO: gather some data if [[ -z ${MASTER_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then + if (( ${NUM_NODES} < 50 )); then MASTER_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then + elif (( ${NUM_NODES} < 150 )); then MASTER_SIZE="t2.small" else MASTER_SIZE="t2.medium" @@ -56,7 +56,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-kubernetes}" CLUSTER_ID=${INSTANCE_PREFIX} AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} IAM_PROFILE_MASTER="kubernetes-master" -IAM_PROFILE_MINION="kubernetes-minion" +IAM_PROFILE_NODE="kubernetes-minion" LOG="/dev/null" @@ -66,13 +66,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} # The minions root EBS volume size (used to house Docker images) -MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}" -MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32} +NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" +NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" -MINION_SCOPES="" +NODE_TAG="${INSTANCE_PREFIX}-minion" +NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" @@ -121,7 +121,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then # TODO: actually configure ASG or similar AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi @@ -130,11 +130,11 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true} +ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" -KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}" +KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}" diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 0a38279de5c..2512c8dbeff 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -17,26 +17,26 @@ ZONE=${KUBE_AWS_ZONE:-us-west-2a} MASTER_SIZE=${MASTER_SIZE:-} -MINION_SIZE=${MINION_SIZE:-} -NUM_MINIONS=${NUM_MINIONS:-2} +NODE_SIZE=${NODE_SIZE:-} +NUM_NODES=${NUM_NODES:-2} # Dynamically set node sizes so that Heapster has enough space to run -if [[ -z ${MINION_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then - MINION_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then - MINION_SIZE="t2.small" +if [[ -z ${NODE_SIZE} ]]; then + if (( ${NUM_NODES} < 50 )); then + NODE_SIZE="t2.micro" + elif (( ${NUM_NODES} < 150 )); then + NODE_SIZE="t2.small" else - MINION_SIZE="t2.medium" + NODE_SIZE="t2.medium" fi fi # Dynamically set the master size by the number of nodes, these are guesses # TODO: gather some data if [[ -z ${MASTER_SIZE} ]]; then - if (( ${NUM_MINIONS} < 50 )); then + if (( ${NUM_NODES} < 50 )); then MASTER_SIZE="t2.micro" - elif (( ${NUM_MINIONS} < 150 )); then + elif (( ${NUM_NODES} < 150 )); then MASTER_SIZE="t2.small" else MASTER_SIZE="t2.medium" @@ -54,7 +54,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-e2e-test-${USER}}" CLUSTER_ID=${INSTANCE_PREFIX} AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} IAM_PROFILE_MASTER="kubernetes-master" -IAM_PROFILE_MINION="kubernetes-minion" +IAM_PROFILE_NODE="kubernetes-minion" LOG="/dev/null" @@ -64,13 +64,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} # The minions root EBS volume size (used to house Docker images) -MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}" -MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32} +NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" +NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" -MINION_SCOPES="" +NODE_TAG="${INSTANCE_PREFIX}-minion" +NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" @@ -117,7 +117,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then # TODO: actually configure ASG or similar AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi @@ -126,11 +126,11 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true} +ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} # OS options for minions KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" -KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}" +KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}" diff --git a/cluster/aws/coreos/util.sh b/cluster/aws/coreos/util.sh index 70763751feb..c58144bb02c 100644 --- a/cluster/aws/coreos/util.sh +++ b/cluster/aws/coreos/util.sh @@ -19,11 +19,11 @@ SSH_USER=core function detect-minion-image (){ - if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then - KUBE_MINION_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']") + if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then + KUBE_NODE_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']") fi - if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then - echo "unable to determine KUBE_MINION_IMAGE" + if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then + echo "unable to determine KUBE_NODE_IMAGE" exit 2 fi } diff --git a/cluster/aws/options.md b/cluster/aws/options.md index 9db6e60af01..ee1a77db31d 100644 --- a/cluster/aws/options.md +++ b/cluster/aws/options.md @@ -27,7 +27,7 @@ It is not a bad idea to set AWS_S3_BUCKET to something more human friendly. AWS_S3_REGION is useful for people that want to control their data location, because of regulatory restrictions for example. -**MASTER_SIZE**, **MINION_SIZE** +**MASTER_SIZE**, **NODE_SIZE** The instance type to use for creating the master/minion. Defaults to auto-sizing based on the number of nodes (see below). @@ -35,10 +35,10 @@ For production usage, we recommend bigger instances, for example: ``` export MASTER_SIZE=c4.large -export MINION_SIZE=r3.large +export NODE_SIZE=r3.large ``` -If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`. +If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`. In particular for clusters less than 50 nodes it will use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`. @@ -46,7 +46,7 @@ Please note: `kube-up` utilizes ephemeral storage available on instances for doc support ephemeral storage and will default to docker storage on the root disk which is usually only 8GB. EBS-only instance types include `t2`, `c4`, and `m4`. -**KUBE_ENABLE_MINION_PUBLIC_IP** +**KUBE_ENABLE_NODE_PUBLIC_IP** Should a public IP automatically assigned to the minions? "true" or "false" Defaults to: "true" diff --git a/cluster/aws/templates/create-dynamic-salt-files.sh b/cluster/aws/templates/create-dynamic-salt-files.sh index 54c81dfc442..e14693019b3 100644 --- a/cluster/aws/templates/create-dynamic-salt-files.sh +++ b/cluster/aws/templates/create-dynamic-salt-files.sh @@ -40,7 +40,7 @@ network_provider: '$(echo "$NETWORK_PROVIDER")' opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG")' opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")' opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")' -num_nodes: $(echo "${NUM_MINIONS}") +num_nodes: $(echo "${NUM_NODES}") e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' EOF diff --git a/cluster/aws/trusty/common.sh b/cluster/aws/trusty/common.sh index e8b827f4f8c..bfc98b1882b 100644 --- a/cluster/aws/trusty/common.sh +++ b/cluster/aws/trusty/common.sh @@ -18,9 +18,9 @@ # A library of common helper functions for Ubuntus & Debians. function detect-minion-image() { - if [[ -z "${KUBE_MINION_IMAGE=-}" ]]; then + if [[ -z "${KUBE_NODE_IMAGE=-}" ]]; then detect-image - KUBE_MINION_IMAGE=$AWS_IMAGE + KUBE_NODE_IMAGE=$AWS_IMAGE fi } diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 0c3f3a3ca85..b3dd3ffa421 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -79,14 +79,14 @@ if [[ -n "${KUBE_SUBNET_CIDR:-}" ]]; then fi MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}" -MINION_SG_NAME="kubernetes-minion-${CLUSTER_ID}" +NODE_SG_NAME="kubernetes-minion-${CLUSTER_ID}" # Be sure to map all the ephemeral drives. We can specify more than we actually have. # TODO: Actually mount the correct number (especially if we have more), though this is non-trivial, and # only affects the big storage instance types, which aren't a typical use case right now. BLOCK_DEVICE_MAPPINGS_BASE="{\"DeviceName\": \"/dev/sdc\",\"VirtualName\":\"ephemeral0\"},{\"DeviceName\": \"/dev/sdd\",\"VirtualName\":\"ephemeral1\"},{\"DeviceName\": \"/dev/sde\",\"VirtualName\":\"ephemeral2\"},{\"DeviceName\": \"/dev/sdf\",\"VirtualName\":\"ephemeral3\"}" MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" -MINION_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MINION_ROOT_DISK_SIZE},\"VolumeType\":\"${MINION_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" +NODE_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${NODE_ROOT_DISK_SIZE},\"VolumeType\":\"${NODE_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" # TODO (bburns) Parameterize this for multiple cluster per project @@ -181,38 +181,38 @@ function query-running-minions () { Name=vpc-id,Values=${VPC_ID} \ Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ Name=tag:aws:autoscaling:groupName,Values=${ASG_NAME} \ - Name=tag:Role,Values=${MINION_TAG} \ + Name=tag:Role,Values=${NODE_TAG} \ --query ${query} } function find-running-minions () { - MINION_IDS=() - MINION_NAMES=() + NODE_IDS=() + NODE_NAMES=() for id in $(query-running-minions "Reservations[].Instances[].InstanceId"); do - MINION_IDS+=("${id}") + NODE_IDS+=("${id}") # We use the minion ids as the name - MINION_NAMES+=("${id}") + NODE_NAMES+=("${id}") done } function detect-minions () { find-running-minions - # This is inefficient, but we want MINION_NAMES / MINION_IDS to be ordered the same as KUBE_MINION_IP_ADDRESSES - KUBE_MINION_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + # This is inefficient, but we want NODE_NAMES / NODE_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES + KUBE_NODE_IP_ADDRESSES=() + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do local minion_ip - if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then - minion_ip=$(get_instance_public_ip ${MINION_NAMES[$i]}) + if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then + minion_ip=$(get_instance_public_ip ${NODE_NAMES[$i]}) else - minion_ip=$(get_instance_private_ip ${MINION_NAMES[$i]}) + minion_ip=$(get_instance_private_ip ${NODE_NAMES[$i]}) fi - echo "Found minion ${i}: ${MINION_NAMES[$i]} @ ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + echo "Found minion ${i}: ${NODE_NAMES[$i]} @ ${minion_ip}" + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") done - if [[ -z "$KUBE_MINION_IP_ADDRESSES" ]]; then + if [[ -z "$KUBE_NODE_IP_ADDRESSES" ]]; then echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" exit 1 fi @@ -228,13 +228,13 @@ function detect-security-groups { echo "Using master security group: ${MASTER_SG_NAME} ${MASTER_SG_ID}" fi fi - if [[ -z "${MINION_SG_ID-}" ]]; then - MINION_SG_ID=$(get_security_group_id "${MINION_SG_NAME}") - if [[ -z "${MINION_SG_ID}" ]]; then + if [[ -z "${NODE_SG_ID-}" ]]; then + NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}") + if [[ -z "${NODE_SG_ID}" ]]; then echo "Could not detect Kubernetes minion security group. Make sure you've launched a cluster with 'kube-up.sh'" exit 1 else - echo "Using minion security group: ${MINION_SG_NAME} ${MINION_SG_ID}" + echo "Using minion security group: ${NODE_SG_NAME} ${NODE_SG_ID}" fi fi } @@ -609,9 +609,9 @@ function ensure-iam-profiles { echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}" create-iam-profile ${IAM_PROFILE_MASTER} } - aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MINION} || { - echo "Creating minion IAM profile: ${IAM_PROFILE_MINION}" - create-iam-profile ${IAM_PROFILE_MINION} + aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_NODE} || { + echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}" + create-iam-profile ${IAM_PROFILE_NODE} } } @@ -768,10 +768,10 @@ function kube-up { echo "Creating master security group." create-security-group "${MASTER_SG_NAME}" "Kubernetes security group applied to master nodes" fi - MINION_SG_ID=$(get_security_group_id "${MINION_SG_NAME}") - if [[ -z "${MINION_SG_ID}" ]]; then + NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}") + if [[ -z "${NODE_SG_ID}" ]]; then echo "Creating minion security group." - create-security-group "${MINION_SG_NAME}" "Kubernetes security group applied to minion nodes" + create-security-group "${NODE_SG_NAME}" "Kubernetes security group applied to minion nodes" fi detect-security-groups @@ -780,17 +780,17 @@ function kube-up { authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" # Minions can talk to minions - authorize-security-group-ingress "${MINION_SG_ID}" "--source-group ${MINION_SG_ID} --protocol all" + authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all" # Masters and minions can talk to each other - authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MINION_SG_ID} --protocol all" - authorize-security-group-ingress "${MINION_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" + authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all" + authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" # TODO(justinsb): Would be fairly easy to replace 0.0.0.0/0 in these rules # SSH is open to the world authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" # HTTPS to the master is allowed (for API access) authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0" @@ -990,31 +990,31 @@ function start-minions() { echo "Creating minion configuration" generate-minion-user-data > "${KUBE_TEMP}/minion-user-data" local public_ip_option - if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then + if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then public_ip_option="--associate-public-ip-address" else public_ip_option="--no-associate-public-ip-address" fi ${AWS_ASG_CMD} create-launch-configuration \ --launch-configuration-name ${ASG_NAME} \ - --image-id $KUBE_MINION_IMAGE \ - --iam-instance-profile ${IAM_PROFILE_MINION} \ - --instance-type $MINION_SIZE \ + --image-id $KUBE_NODE_IMAGE \ + --iam-instance-profile ${IAM_PROFILE_NODE} \ + --instance-type $NODE_SIZE \ --key-name ${AWS_SSH_KEY_NAME} \ - --security-groups ${MINION_SG_ID} \ + --security-groups ${NODE_SG_ID} \ ${public_ip_option} \ - --block-device-mappings "${MINION_BLOCK_DEVICE_MAPPINGS}" \ + --block-device-mappings "${NODE_BLOCK_DEVICE_MAPPINGS}" \ --user-data "file://${KUBE_TEMP}/minion-user-data" echo "Creating autoscaling group" ${AWS_ASG_CMD} create-auto-scaling-group \ --auto-scaling-group-name ${ASG_NAME} \ --launch-configuration-name ${ASG_NAME} \ - --min-size ${NUM_MINIONS} \ - --max-size ${NUM_MINIONS} \ + --min-size ${NUM_NODES} \ + --max-size ${NUM_NODES} \ --vpc-zone-identifier ${SUBNET_ID} \ --tags ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Name,Value=${NODE_INSTANCE_PREFIX} \ - ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${MINION_TAG} \ + ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${NODE_TAG} \ ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=KubernetesCluster,Value=${CLUSTER_ID} # Wait for the minions to be running @@ -1022,8 +1022,8 @@ function start-minions() { attempt=0 while true; do find-running-minions > $LOG - if [[ ${#MINION_IDS[@]} == ${NUM_MINIONS} ]]; then - echo -e " ${color_green}${#MINION_IDS[@]} minions started; ready${color_norm}" + if [[ ${#NODE_IDS[@]} == ${NUM_NODES} ]]; then + echo -e " ${color_green}${#NODE_IDS[@]} minions started; ready${color_norm}" break fi @@ -1037,7 +1037,7 @@ function start-minions() { exit 1 fi - echo -e " ${color_yellow}${#MINION_IDS[@]} minions started; waiting${color_norm}" + echo -e " ${color_yellow}${#NODE_IDS[@]} minions started; waiting${color_norm}" attempt=$(($attempt+1)) sleep 10 done @@ -1113,11 +1113,11 @@ function check-cluster() { # Basic sanity checking # TODO(justinsb): This is really not needed any more local rc # Capture return code without exiting because of errexit bash option - for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do + for (( i=0; i<${#KUBE_NODE_IP_ADDRESSES[@]}; i++)); do # Make sure docker is installed and working. local attempt=0 while true; do - local minion_ip=${KUBE_MINION_IP_ADDRESSES[$i]} + local minion_ip=${KUBE_NODE_IP_ADDRESSES[$i]} echo -n "Attempt $(($attempt+1)) to check Docker on node @ ${minion_ip} ..." local output=`check-minion ${minion_ip}` echo $output @@ -1330,12 +1330,12 @@ function test-setup { # Open up port 80 & 8080 so common containers on minions can be reached # TODO(roberthbailey): Remove this once we are no longer relying on hostPorts. - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 80 --cidr 0.0.0.0/0" - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 8080 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 80 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 8080 --cidr 0.0.0.0/0" # Open up the NodePort range # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. - authorize-security-group-ingress "${MINION_SG_ID}" "--protocol all --port 30000-32767 --cidr 0.0.0.0/0" + authorize-security-group-ingress "${NODE_SG_ID}" "--protocol all --port 30000-32767 --cidr 0.0.0.0/0" echo "test-setup complete" } diff --git a/cluster/centos/config-default.sh b/cluster/centos/config-default.sh index df04bb4f6bc..116e6f4617c 100755 --- a/cluster/centos/config-default.sh +++ b/cluster/centos/config-default.sh @@ -27,8 +27,8 @@ export NODES=${NODES:-"centos@172.10.0.12 centos@172.10.0.13"} # Number of nodes in your cluster. export NUM_NODES=${NUM_NODES:-2} -# Should be removed when NUM_MINIONS is deprecated in validate-cluster.sh -export NUM_MINIONS=${NUM_NODES} +# Should be removed when NUM_NODES is deprecated in validate-cluster.sh +export NUM_NODES=${NUM_NODES} # By default, the cluster will use the etcd installed on master. export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:4001"} diff --git a/cluster/centos/master/scripts/apiserver.sh b/cluster/centos/master/scripts/apiserver.sh index aebe85964c7..abd8cbc860c 100755 --- a/cluster/centos/master/scripts/apiserver.sh +++ b/cluster/centos/master/scripts/apiserver.sh @@ -38,7 +38,7 @@ KUBE_API_ADDRESS="--address=${MASTER_ADDRESS}" KUBE_API_PORT="--port=8080" # --kubelet-port=10250: Kubelet port -MINION_PORT="--kubelet-port=10250" +NODE_PORT="--kubelet-port=10250" # --allow-privileged=false: If true, allow privileged containers. KUBE_ALLOW_PRIV="--allow-privileged=false" @@ -75,7 +75,7 @@ KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\ \${KUBE_ETCD_SERVERS} \\ \${KUBE_API_ADDRESS} \\ \${KUBE_API_PORT} \\ - \${MINION_PORT} \\ + \${NODE_PORT} \\ \${KUBE_ALLOW_PRIV} \\ \${KUBE_SERVICE_ADDRESSES} \\ \${KUBE_ADMISSION_CONTROL} \\ diff --git a/cluster/centos/node/scripts/kubelet.sh b/cluster/centos/node/scripts/kubelet.sh index e104c434060..41192390eb0 100755 --- a/cluster/centos/node/scripts/kubelet.sh +++ b/cluster/centos/node/scripts/kubelet.sh @@ -27,13 +27,13 @@ KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=4" # --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces) -MINION_ADDRESS="--address=${NODE_ADDRESS}" +NODE_ADDRESS="--address=${NODE_ADDRESS}" # --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag. -MINION_PORT="--port=10250" +NODE_PORT="--port=10250" # --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. -MINION_HOSTNAME="--hostname-override=${NODE_ADDRESS}" +NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}" # --api-servers=[]: List of Kubernetes API servers for publishing events, # and reading pods and services. (ip:port), comma separated. @@ -48,9 +48,9 @@ EOF KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ \${KUBE_LOG_LEVEL} \\ - \${MINION_ADDRESS} \\ - \${MINION_PORT} \\ - \${MINION_HOSTNAME} \\ + \${NODE_ADDRESS} \\ + \${NODE_PORT} \\ + \${NODE_HOSTNAME} \\ \${KUBELET_API_SERVER} \\ \${KUBE_ALLOW_PRIV} \\ \${KUBELET_ARGS}" diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 3fb21a0f062..95d7575446d 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -19,20 +19,20 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} -MINION_SIZE=${MINION_SIZE:-n1-standard-2} -NUM_MINIONS=${NUM_MINIONS:-3} +NODE_SIZE=${NODE_SIZE:-n1-standard-2} +NUM_NODES=${NUM_NODES:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} -MINION_DISK_TYPE=${MINION_DISK_TYPE:-pd-standard} -MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB} +NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} +NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} -PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} -MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-"${MASTER_IMAGE}"} -MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-"${MASTER_IMAGE_PROJECT}"} +NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} +NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} @@ -40,10 +40,10 @@ NETWORK=${KUBE_GCE_NETWORK:-default} INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" -MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" +NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL="${POLL_SLEEP_INTERVAL:-3}" @@ -101,7 +101,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" ENABLE_CLUSTER_MONITORING=googleinfluxdb fi diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index d0d4153ace3..55909e1c547 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -19,21 +19,21 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} -MINION_SIZE=${MINION_SIZE:-n1-standard-2} -NUM_MINIONS=${NUM_MINIONS:-3} +NODE_SIZE=${NODE_SIZE:-n1-standard-2} +NUM_NODES=${NUM_NODES:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} -MINION_DISK_TYPE=${MINION_DISK_TYPE:-pd-standard} -MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB} +NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} +NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} KUBE_APISERVER_REQUEST_TIMEOUT=300 -PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} -MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-"${MASTER_IMAGE}"} -MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-"${MASTER_IMAGE_PROJECT}"} +NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"} +NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} @@ -41,10 +41,10 @@ NETWORK=${KUBE_GCE_NETWORK:-e2e} INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" -MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" +NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100} @@ -109,7 +109,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index fab6f6527bb..c71f6fae881 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -285,7 +285,7 @@ opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")' enable_manifest_url: '$(echo "$ENABLE_MANIFEST_URL" | sed -e "s/'/''/g")' manifest_url: '$(echo "$MANIFEST_URL" | sed -e "s/'/''/g")' manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")' -num_nodes: $(echo "${NUM_MINIONS}") +num_nodes: $(echo "${NUM_NODES}") e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' EOF diff --git a/cluster/gce/upgrade.sh b/cluster/gce/upgrade.sh index 89ad920d390..fa6839eb71b 100755 --- a/cluster/gce/upgrade.sh +++ b/cluster/gce/upgrade.sh @@ -121,15 +121,15 @@ function prepare-upgrade() { } -# Reads kube-env metadata from first node in MINION_NAMES. +# Reads kube-env metadata from first node in NODE_NAMES. # # Assumed vars: -# MINION_NAMES +# NODE_NAMES # PROJECT # ZONE function get-node-env() { # TODO(zmerlynn): Make this more reliable with retries. - gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${MINION_NAMES[0]} --command \ + gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${NODE_NAMES[0]} --command \ "curl --fail --silent -H 'Metadata-Flavor: Google' \ 'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null } @@ -145,7 +145,7 @@ function get-env-val() { # Assumed vars: # KUBE_VERSION -# MINION_SCOPES +# NODE_SCOPES # NODE_INSTANCE_PREFIX # PROJECT # ZONE @@ -167,7 +167,7 @@ function upgrade-nodes() { # # Assumed vars: # KUBE_VERSION -# MINION_SCOPES +# NODE_SCOPES # NODE_INSTANCE_PREFIX # PROJECT # ZONE @@ -188,8 +188,8 @@ function prepare-node-upgrade() { # TODO(zmerlynn): Refactor setting scope flags. local scope_flags= - if [ -n "${MINION_SCOPES}" ]; then - scope_flags="--scopes ${MINION_SCOPES}" + if [ -n "${NODE_SCOPES}" ]; then + scope_flags="--scopes ${NODE_SCOPES}" else scope_flags="--no-scopes" fi diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 655221c9d16..0705906f621 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -214,13 +214,13 @@ function upload-server-tars() { # Assumed vars: # NODE_INSTANCE_PREFIX # Vars set: -# MINION_NAMES +# NODE_NAMES function detect-minion-names { detect-project - MINION_NAMES=($(gcloud compute instance-groups managed list-instances \ + NODE_NAMES=($(gcloud compute instance-groups managed list-instances \ "${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \ --format=yaml | grep instance: | cut -d ' ' -f 2)) - echo "MINION_NAMES=${MINION_NAMES[*]}" >&2 + echo "NODE_NAMES=${NODE_NAMES[*]}" >&2 } # Detect the information about the minions @@ -228,24 +228,24 @@ function detect-minion-names { # Assumed vars: # ZONE # Vars set: -# MINION_NAMES -# KUBE_MINION_IP_ADDRESSES (array) +# NODE_NAMES +# KUBE_NODE_IP_ADDRESSES (array) function detect-minions () { detect-project detect-minion-names - KUBE_MINION_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + KUBE_NODE_IP_ADDRESSES=() + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \ - "${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \ + "${NODE_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \ --format=text | awk '{ print $2 }') if [[ -z "${minion_ip-}" ]] ; then - echo "Did not find ${MINION_NAMES[$i]}" >&2 + echo "Did not find ${NODE_NAMES[$i]}" >&2 else - echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + echo "Found ${NODE_NAMES[$i]} at ${minion_ip}" + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") fi done - if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then + if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 exit 1 fi @@ -370,19 +370,19 @@ function create-node-template { local attempt=1 local preemptible_minions="" - if [[ "${PREEMPTIBLE_MINION}" == "true" ]]; then + if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then preemptible_minions="--preemptible --maintenance-policy TERMINATE" fi while true; do echo "Attempt ${attempt} to create ${1}" >&2 if ! gcloud compute instance-templates create "$template_name" \ --project "${PROJECT}" \ - --machine-type "${MINION_SIZE}" \ - --boot-disk-type "${MINION_DISK_TYPE}" \ - --boot-disk-size "${MINION_DISK_SIZE}" \ - --image-project="${MINION_IMAGE_PROJECT}" \ - --image "${MINION_IMAGE}" \ - --tags "${MINION_TAG}" \ + --machine-type "${NODE_SIZE}" \ + --boot-disk-type "${NODE_DISK_TYPE}" \ + --boot-disk-size "${NODE_DISK_SIZE}" \ + --image-project="${NODE_IMAGE_PROJECT}" \ + --image "${NODE_IMAGE}" \ + --tags "${NODE_TAG}" \ --network "${NETWORK}" \ ${preemptible_minions} \ $2 \ @@ -649,7 +649,7 @@ function kube-up { create-master-instance "${MASTER_RESERVED_IP}" & # Create a single firewall rule for all minions. - create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" & + create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" & # Report logging choice (if any). if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then @@ -663,8 +663,8 @@ function kube-up { # TODO(zmerlynn): Refactor setting scope flags. local scope_flags= - if [ -n "${MINION_SCOPES}" ]; then - scope_flags="--scopes ${MINION_SCOPES}" + if [ -n "${NODE_SCOPES}" ]; then + scope_flags="--scopes ${NODE_SCOPES}" else scope_flags="--no-scopes" fi @@ -680,7 +680,7 @@ function kube-up { --project "${PROJECT}" \ --zone "${ZONE}" \ --base-instance-name "${NODE_INSTANCE_PREFIX}" \ - --size "${NUM_MINIONS}" \ + --size "${NUM_NODES}" \ --template "$template_name" || true; gcloud compute instance-groups managed wait-until-stable \ "${NODE_INSTANCE_PREFIX}-group" \ @@ -877,11 +877,11 @@ function kube-down { fi # Delete firewall rule for minions. - if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then + if gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-all" &>/dev/null; then gcloud compute firewall-rules delete \ --project "${PROJECT}" \ --quiet \ - "${MINION_TAG}-all" + "${NODE_TAG}-all" fi # Delete routes. @@ -989,7 +989,7 @@ function check-resources { return 1 fi - if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then + if gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-all" &>/dev/null; then KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all" return 1 fi @@ -1040,8 +1040,8 @@ function prepare-push() { # TODO(zmerlynn): Refactor setting scope flags. local scope_flags= - if [ -n "${MINION_SCOPES}" ]; then - scope_flags="--scopes ${MINION_SCOPES}" + if [ -n "${NODE_SCOPES}" ]; then + scope_flags="--scopes ${NODE_SCOPES}" else scope_flags="--no-scopes" fi @@ -1105,8 +1105,8 @@ function kube-push { push-master - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - push-node "${MINION_NAMES[$i]}" & + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + push-node "${NODE_NAMES[$i]}" & done wait-for-jobs @@ -1153,34 +1153,34 @@ function test-setup { local start=`date +%s` gcloud compute firewall-rules create \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG}" \ + --target-tags "${NODE_TAG}" \ --allow tcp:80,tcp:8080 \ --network "${NETWORK}" \ - "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true + "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true # As there is no simple way to wait longer for this operation we need to manually # wait some additional time (20 minutes altogether). - until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] + until gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] do sleep 5 done # Check if the firewall rule exists and fail if it does not. - gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" + gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" # Open up the NodePort range # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. start=`date +%s` gcloud compute firewall-rules create \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG}" \ + --target-tags "${NODE_TAG}" \ --allow tcp:30000-32767,udp:30000-32767 \ --network "${NETWORK}" \ - "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true + "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true # As there is no simple way to wait longer for this operation we need to manually # wait some additional time (20 minutes altogether). - until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] + until gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] do sleep 5 done # Check if the firewall rule exists and fail if it does not. - gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" + gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" } # Execute after running tests to perform any required clean-up. This is called @@ -1191,11 +1191,11 @@ function test-teardown { gcloud compute firewall-rules delete \ --project "${PROJECT}" \ --quiet \ - "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true + "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" || true gcloud compute firewall-rules delete \ --project "${PROJECT}" \ --quiet \ - "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" || true + "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" || true "${KUBE_ROOT}/cluster/kube-down.sh" } @@ -1333,7 +1333,7 @@ KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-}) ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false}) MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-}) MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-}) -NUM_MINIONS: $(yaml-quote ${NUM_MINIONS}) +NUM_NODES: $(yaml-quote ${NUM_NODES}) EOF if [ -n "${APISERVER_TEST_ARGS:-}" ]; then cat >>$file <&2 @@ -143,9 +143,9 @@ function kube-up() { local create_args=( "--zone=${ZONE}" "--project=${PROJECT}" - "--num-nodes=${NUM_MINIONS}" + "--num-nodes=${NUM_NODES}" "--network=${NETWORK}" - "--scopes=${MINION_SCOPES}" + "--scopes=${NODE_SCOPES}" "--cluster-version=${CLUSTER_API_VERSION}" "--machine-type=${MACHINE_TYPE}" ) @@ -163,7 +163,7 @@ function kube-up() { # GCLOUD # ZONE # Vars set: -# MINION_TAG +# NODE_TAG function test-setup() { echo "... in gke:test-setup()" >&2 # Detect the project into $PROJECT if it isn't set @@ -171,22 +171,22 @@ function test-setup() { detect-minions >&2 # At this point, CLUSTER_NAME should have been used, so its value is final. - MINION_TAG=$($GCLOUD compute instances describe ${MINION_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1) - OLD_MINION_TAG="k8s-${CLUSTER_NAME}-node" + NODE_TAG=$($GCLOUD compute instances describe ${NODE_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1) + OLD_NODE_TAG="k8s-${CLUSTER_NAME}-node" # Open up port 80 & 8080 so common containers on minions can be reached. "${GCLOUD}" compute firewall-rules create \ "${CLUSTER_NAME}-http-alt" \ --allow tcp:80,tcp:8080 \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG},${OLD_MINION_TAG}" \ + --target-tags "${NODE_TAG},${OLD_NODE_TAG}" \ --network="${NETWORK}" "${GCLOUD}" compute firewall-rules create \ "${CLUSTER_NAME}-nodeports" \ --allow tcp:30000-32767,udp:30000-32767 \ --project "${PROJECT}" \ - --target-tags "${MINION_TAG},${OLD_MINION_TAG}" \ + --target-tags "${NODE_TAG},${OLD_NODE_TAG}" \ --network="${NETWORK}" } @@ -209,7 +209,7 @@ function detect-master() { # Assumed vars: # none # Vars set: -# MINION_NAMES +# NODE_NAMES function detect-minions() { echo "... in gke:detect-minions()" >&2 detect-minion-names @@ -220,16 +220,16 @@ function detect-minions() { # Assumed vars: # none # Vars set: -# MINION_NAMES +# NODE_NAMES function detect-minion-names { echo "... in gke:detect-minion-names()" >&2 detect-project detect-node-instance-group - MINION_NAMES=($(gcloud compute instance-groups managed list-instances \ + NODE_NAMES=($(gcloud compute instance-groups managed list-instances \ "${NODE_INSTANCE_GROUP}" --zone "${ZONE}" --project "${PROJECT}" \ --format=yaml | grep instance: | cut -d ' ' -f 2)) - echo "MINION_NAMES=${MINION_NAMES[*]}" + echo "NODE_NAMES=${NODE_NAMES[*]}" } # Detect instance group name generated by gke diff --git a/cluster/juju/config-test.sh b/cluster/juju/config-test.sh index 7f7ad16b1f0..7bda9c8cbcc 100644 --- a/cluster/juju/config-test.sh +++ b/cluster/juju/config-test.sh @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -NUM_MINIONS=${NUM_MINIONS:-2} +NUM_NODES=${NUM_NODES:-2} diff --git a/cluster/juju/util.sh b/cluster/juju/util.sh index 2ed2dd70aba..031c3cc0717 100755 --- a/cluster/juju/util.sh +++ b/cluster/juju/util.sh @@ -69,9 +69,9 @@ function detect-minions() { # ] # Strip out the IP addresses - export KUBE_MINION_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}")) - # echo "Kubernetes minions: " ${KUBE_MINION_IP_ADDRESSES[@]} 1>&2 - export NUM_MINIONS=${#KUBE_MINION_IP_ADDRESSES[@]} + export KUBE_NODE_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}")) + # echo "Kubernetes minions: " ${KUBE_NODE_IP_ADDRESSES[@]} 1>&2 + export NUM_NODES=${#KUBE_NODE_IP_ADDRESSES[@]} } function get-password() { diff --git a/cluster/kube-util.sh b/cluster/kube-util.sh index 8bbb3b97ca2..d78622cd255 100644 --- a/cluster/kube-util.sh +++ b/cluster/kube-util.sh @@ -26,12 +26,12 @@ function detect-master { # Get minion names if they are not static. function detect-minion-names { - echo "MINION_NAMES: [${MINION_NAMES[*]}]" 1>&2 + echo "NODE_NAMES: [${NODE_NAMES[*]}]" 1>&2 } -# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] function detect-minions { - echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2 + echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2 } # Verify prereqs on host machine diff --git a/cluster/kubemark/config-default.sh b/cluster/kubemark/config-default.sh index babcdd6c4c3..9bc5392ad23 100644 --- a/cluster/kubemark/config-default.sh +++ b/cluster/kubemark/config-default.sh @@ -22,11 +22,11 @@ GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} MASTER_SIZE=${MASTER_SIZE:-n1-standard-4} -NUM_MINIONS=${NUM_MINIONS:-100} +NUM_NODES=${NUM_NODES:-100} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} -PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} diff --git a/cluster/libvirt-coreos/config-default.sh b/cluster/libvirt-coreos/config-default.sh index d03fbe47067..93197d064fb 100644 --- a/cluster/libvirt-coreos/config-default.sh +++ b/cluster/libvirt-coreos/config-default.sh @@ -17,8 +17,8 @@ ## Contains configuration values for interacting with the libvirt CoreOS cluster # Number of minions in the cluster -NUM_MINIONS=${NUM_MINIONS:-3} -export NUM_MINIONS +NUM_NODES=${NUM_NODES:-3} +export NUM_NODES # The IP of the master export MASTER_IP="192.168.10.1" @@ -33,18 +33,18 @@ MASTER_CONTAINER_NETMASK="255.255.255.0" MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" -if [[ "$NUM_MINIONS" -gt 253 ]]; then +if [[ "$NUM_NODES" -gt 253 ]]; then echo "ERROR: Because of how IPs are allocated in ${BASH_SOURCE}, you cannot create more than 253 nodes" exit 1 fi -for ((i=0; i < NUM_MINIONS; i++)) do +for ((i=0; i < NUM_NODES; i++)) do NODE_IPS[$i]="${NODE_IP_BASE}$((i+2))" NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" done -NODE_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET +NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh index 4a1c74be1b1..76dea254afb 100644 --- a/cluster/libvirt-coreos/util.sh +++ b/cluster/libvirt-coreos/util.sh @@ -167,8 +167,8 @@ function wait-cluster-readiness { local timeout=120 while [[ $timeout -ne 0 ]]; do nb_ready_nodes=$("${kubectl}" get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true) - echo "Nb ready nodes: $nb_ready_nodes / $NUM_MINIONS" - if [[ "$nb_ready_nodes" -eq "$NUM_MINIONS" ]]; then + echo "Nb ready nodes: $nb_ready_nodes / $NUM_NODES" + if [[ "$nb_ready_nodes" -eq "$NUM_NODES" ]]; then return 0 fi @@ -191,8 +191,8 @@ function kube-up { readonly kubernetes_dir="$POOL_PATH/kubernetes" local i - for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do - if [[ $i -eq $NUM_MINIONS ]]; then + for (( i = 0 ; i <= $NUM_NODES ; i++ )); do + if [[ $i -eq $NUM_NODES ]]; then etcd2_initial_cluster[$i]="${MASTER_NAME}=http://${MASTER_IP}:2380" else etcd2_initial_cluster[$i]="${NODE_NAMES[$i]}=http://${NODE_IPS[$i]}:2380" @@ -201,8 +201,8 @@ function kube-up { etcd2_initial_cluster=$(join , "${etcd2_initial_cluster[@]}") readonly machines=$(join , "${KUBE_NODE_IP_ADDRESSES[@]}") - for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do - if [[ $i -eq $NUM_MINIONS ]]; then + for (( i = 0 ; i <= $NUM_NODES ; i++ )); do + if [[ $i -eq $NUM_NODES ]]; then type=master name=$MASTER_NAME public_ip=$MASTER_IP @@ -262,7 +262,7 @@ function upload-server-tars { function kube-push { kube-push-internal ssh-to-node "$MASTER_NAME" "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler" - for ((i=0; i < NUM_MINIONS; i++)); do + for ((i=0; i < NUM_NODES; i++)); do ssh-to-node "${NODE_NAMES[$i]}" "sudo systemctl restart kubelet kube-proxy" done wait-cluster-readiness @@ -317,7 +317,7 @@ function ssh-to-node { elif [[ "$node" == "$MASTER_NAME" ]]; then machine="$MASTER_IP" else - for ((i=0; i < NUM_MINIONS; i++)); do + for ((i=0; i < NUM_NODES; i++)); do if [[ "$node" == "${NODE_NAMES[$i]}" ]]; then machine="${NODE_IPS[$i]}" break diff --git a/cluster/mesos/docker/config-default.sh b/cluster/mesos/docker/config-default.sh index ab2482d0917..45f45f9a548 100755 --- a/cluster/mesos/docker/config-default.sh +++ b/cluster/mesos/docker/config-default.sh @@ -16,10 +16,10 @@ ## Contains configuration values for interacting with the mesos/docker cluster -NUM_MINIONS=${NUM_MINIONS:-2} +NUM_NODES=${NUM_NODES:-2} INSTANCE_PREFIX="${INSTANCE_PREFIX:-kubernetes}" MASTER_NAME="${INSTANCE_PREFIX}-master" -MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) SERVICE_CLUSTER_IP_RANGE=10.10.10.0/24 diff --git a/cluster/mesos/docker/config-test.sh b/cluster/mesos/docker/config-test.sh index 4e6261bb4bd..13ab3994f7b 100644 --- a/cluster/mesos/docker/config-test.sh +++ b/cluster/mesos/docker/config-test.sh @@ -15,8 +15,8 @@ # limitations under the License. ## Contains configuration values for interacting with the docker-compose cluster in test mode -#Set NUM_MINIONS to minimum required for testing. -NUM_MINIONS=2 +#Set NUM_NODES to minimum required for testing. +NUM_NODES=2 KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../.. source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/config-default.sh" diff --git a/cluster/mesos/docker/util.sh b/cluster/mesos/docker/util.sh index 838936f9ec3..38d58991235 100644 --- a/cluster/mesos/docker/util.sh +++ b/cluster/mesos/docker/util.sh @@ -204,7 +204,7 @@ function detect-master { echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" 1>&2 } -# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] # These Mesos slaves MAY host Kublets, # but might not have a Kublet running unless a kubernetes task has been scheduled on them. function detect-minions { @@ -215,9 +215,9 @@ function detect-minions { fi while read -r docker_id; do local minion_ip=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" "${docker_id}") - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") done <<< "$docker_ids" - echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2 + echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2 } # Verify prereqs on host machine @@ -283,8 +283,8 @@ function kube-up { echo "Starting ${KUBERNETES_PROVIDER} cluster" 1>&2 cluster::mesos::docker::docker_compose up -d - echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_MINIONS} slaves" - cluster::mesos::docker::docker_compose scale mesosslave=${NUM_MINIONS} + echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_NODES} slaves" + cluster::mesos::docker::docker_compose scale mesosslave=${NUM_NODES} # await-health-check requires GNU timeout # apiserver hostname resolved by docker diff --git a/cluster/options.md b/cluster/options.md index 726941e8a3b..3b2fa150fbb 100644 --- a/cluster/options.md +++ b/cluster/options.md @@ -8,7 +8,7 @@ These options apply across providers. There are additional documents for option This is a work-in-progress; not all options are documented yet! -**NUM_MINIONS** +**NUM_NODES** The number of minion instances to create. Most providers default this to 4. diff --git a/cluster/rackspace/config-default.sh b/cluster/rackspace/config-default.sh index 96652ad059d..e383ebc9726 100755 --- a/cluster/rackspace/config-default.sh +++ b/cluster/rackspace/config-default.sh @@ -16,7 +16,7 @@ # Sane defaults for dev environments. The following variables can be easily overriden # by setting each as a ENV variable ahead of time: -# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME +# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_NODES, NOVA_NETWORK and SSH_KEY_NAME # Shared KUBE_IMAGE="${KUBE_IMAGE-f2a71670-ced3-4274-80b6-0efcd0f8f91b}" # CoreOS(Beta) @@ -32,9 +32,9 @@ MASTER_TAG="tags=${INSTANCE_PREFIX}-master" # Node KUBE_NODE_FLAVOR="${KUBE_NODE_FLAVOR-general1-2}" -NUM_MINIONS="${NUM_MINIONS-4}" +NUM_NODES="${NUM_NODES-4}" NODE_TAG="tags=${INSTANCE_PREFIX}-node" -NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_MINIONS}})) +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_NODES}})) KUBE_NETWORK="10.240.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET diff --git a/cluster/ubuntu/config-default.sh b/cluster/ubuntu/config-default.sh index 680157b033b..16b6c65bad5 100755 --- a/cluster/ubuntu/config-default.sh +++ b/cluster/ubuntu/config-default.sh @@ -27,7 +27,7 @@ role=${role:-"ai i i"} export roles=($role) # Define minion numbers -export NUM_MINIONS=${NUM_MINIONS:-3} +export NUM_NODES=${NUM_NODES:-3} # define the IP range used for service cluster IPs. # according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index 2a8609b6a05..d7acde8f18f 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -17,8 +17,8 @@ ## Contains configuration values for interacting with the Vagrant cluster # Number of minions in the cluster -NUM_MINIONS=${NUM_MINIONS-"1"} -export NUM_MINIONS +NUM_NODES=${NUM_NODES-"1"} +export NUM_NODES # The IP of the master export MASTER_IP=${MASTER_IP-"10.245.1.2"} @@ -31,19 +31,19 @@ export MASTER_NAME="${INSTANCE_PREFIX}-master" REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} # Map out the IPs, names and container subnets of each minion -export MINION_IP_BASE=${MINION_IP_BASE-"10.245.1."} -MINION_CONTAINER_SUBNET_BASE="10.246" +export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."} +NODE_CONTAINER_SUBNET_BASE="10.246" MASTER_CONTAINER_NETMASK="255.255.255.0" -MASTER_CONTAINER_ADDR="${MINION_CONTAINER_SUBNET_BASE}.0.1" -MASTER_CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.1/24" -CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.0/16" -for ((i=0; i < NUM_MINIONS; i++)) do - MINION_IPS[$i]="${MINION_IP_BASE}$((i+3))" - MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" - MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" - MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1" - MINION_CONTAINER_NETMASKS[$i]="255.255.255.0" - VAGRANT_MINION_NAMES[$i]="minion-$((i+1))" +MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" +MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" +CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" +for ((i=0; i < NUM_NODES; i++)) do + NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))" + NODE_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" + NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" + NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" + NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" + VAGRANT_NODE_NAMES[$i]="minion-$((i+1))" done SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET diff --git a/cluster/vagrant/config-test.sh b/cluster/vagrant/config-test.sh index 8998b533905..e9919743ffd 100644 --- a/cluster/vagrant/config-test.sh +++ b/cluster/vagrant/config-test.sh @@ -15,8 +15,8 @@ # limitations under the License. ## Contains configuration values for interacting with the Vagrant cluster in test mode -#Set NUM_MINIONS to minimum required for testing. -NUM_MINIONS=2 +#Set NUM_NODES to minimum required for testing. +NUM_NODES=2 KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/vagrant/config-default.sh" diff --git a/cluster/vagrant/pod-ip-test.sh b/cluster/vagrant/pod-ip-test.sh index 23031482576..f397ab8c438 100755 --- a/cluster/vagrant/pod-ip-test.sh +++ b/cluster/vagrant/pod-ip-test.sh @@ -59,7 +59,7 @@ cd "${KUBE_ROOT}" echo All verbose output will be redirected to $logfile, use --logfile option to change. printf "Start the cluster with 2 minions .. " -export NUM_MINIONS=2 +export NUM_NODES=2 export KUBERNETES_PROVIDER=vagrant (cluster/kube-up.sh >>"$logfile" 2>&1) || true diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index da94a72cf72..5a16929087b 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -68,9 +68,9 @@ fi # Setup hosts file to support ping by hostname to each minion in the cluster from apiserver -for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - minion=${MINION_NAMES[$i]} - ip=${MINION_IPS[$i]} +for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + minion=${NODE_NAMES[$i]} + ip=${NODE_IPS[$i]} if [ ! "$(cat /etc/hosts | grep $minion)" ]; then echo "Adding $minion to hosts file" echo "$ip $minion" >> /etc/hosts diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-minion.sh index f0cebe264ae..f5d2927c8cc 100755 --- a/cluster/vagrant/provision-minion.sh +++ b/cluster/vagrant/provision-minion.sh @@ -70,7 +70,7 @@ EOF # Set the host name explicitly # See: https://github.com/mitchellh/vagrant/issues/2430 -hostnamectl set-hostname ${MINION_NAME} +hostnamectl set-hostname ${NODE_NAME} if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=21 ]]; then # Workaround to vagrant inability to guess interface naming sequence @@ -94,12 +94,12 @@ if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then echo "Adding $MASTER_NAME to hosts file" echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts fi -echo "$MINION_IP $MINION_NAME" >> /etc/hosts +echo "$NODE_IP $NODE_NAME" >> /etc/hosts # Setup hosts file to support ping by hostname to each minion in the cluster -for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - minion=${MINION_NAMES[$i]} - ip=${MINION_IPS[$i]} +for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + minion=${NODE_NAMES[$i]} + ip=${NODE_IPS[$i]} if [ ! "$(cat /etc/hosts | grep $minion)" ]; then echo "Adding $minion to hosts file" echo "$ip $minion" >> /etc/hosts @@ -145,13 +145,13 @@ cat </etc/salt/minion.d/grains.conf grains: cloud: vagrant network_mode: openvswitch - node_ip: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' + node_ip: '$(echo "$NODE_IP" | sed -e "s/'/''/g")' api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' roles: - kubernetes-pool cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")' - hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' + hostname_override: '$(echo "$NODE_IP" | sed -e "s/'/''/g")' docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' EOF diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index b4ddc99fbb8..5f2464c4529 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -25,10 +25,10 @@ function detect-master () { echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 } -# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] function detect-minions { echo "Minions already detected" 1>&2 - KUBE_MINION_IP_ADDRESSES=("${MINION_IPS[@]}") + KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}") } # Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so @@ -124,15 +124,15 @@ function create-provision-scripts { echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'" echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" echo "MASTER_IP='${MASTER_IP}'" - echo "MINION_NAMES=(${MINION_NAMES[@]})" - echo "MINION_IPS=(${MINION_IPS[@]})" + echo "NODE_NAMES=(${NODE_NAMES[@]})" + echo "NODE_IPS=(${NODE_IPS[@]})" echo "NODE_IP='${MASTER_IP}'" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" - echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'" - echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" + echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'" + echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "MASTER_USER='${MASTER_USER}'" echo "MASTER_PASSWD='${MASTER_PASSWD}'" @@ -163,21 +163,21 @@ function create-provision-scripts { awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" ) > "${KUBE_TEMP}/master-start.sh" - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do ( echo "#! /bin/bash" echo "MASTER_NAME='${MASTER_NAME}'" echo "MASTER_IP='${MASTER_IP}'" - echo "MINION_NAMES=(${MINION_NAMES[@]})" - echo "MINION_NAME=(${MINION_NAMES[$i]})" - echo "MINION_IPS=(${MINION_IPS[@]})" - echo "MINION_IP='${MINION_IPS[$i]}'" - echo "MINION_ID='$i'" - echo "NODE_IP='${MINION_IPS[$i]}'" + echo "NODE_NAMES=(${NODE_NAMES[@]})" + echo "NODE_NAME=(${NODE_NAMES[$i]})" + echo "NODE_IPS=(${NODE_IPS[@]})" + echo "NODE_IP='${NODE_IPS[$i]}'" + echo "NODE_ID='$i'" + echo "NODE_IP='${NODE_IPS[$i]}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" - echo "CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'" - echo "CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'" - echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" + echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" + echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'" + echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" @@ -222,9 +222,9 @@ function verify-cluster { # verify each minion has all required daemons local i - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - echo "Validating ${VAGRANT_MINION_NAMES[$i]}" - local machine=${VAGRANT_MINION_NAMES[$i]} + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + echo "Validating ${VAGRANT_NODE_NAMES[$i]}" + local machine=${VAGRANT_NODE_NAMES[$i]} local -a required_daemon=("salt-minion" "kubelet" "docker") local validated="1" until [[ "$validated" == "0" ]]; do @@ -242,13 +242,13 @@ function verify-cluster { echo echo "Waiting for each minion to be registered with cloud provider" - for (( i=0; i<${#MINION_IPS[@]}; i++)); do - local machine="${MINION_IPS[$i]}" + for (( i=0; i<${#NODE_IPS[@]}; i++)); do + local machine="${NODE_IPS[$i]}" local count="0" until [[ "$count" == "1" ]]; do local minions minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o go-template='{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1) - count=$(echo $minions | grep -c "${MINION_IPS[i]}") || { + count=$(echo $minions | grep -c "${NODE_IPS[i]}") || { printf "." sleep 2 count="0" @@ -339,7 +339,7 @@ function test-teardown { # Find the minion name based on the IP address function find-vagrant-name-by-ip { local ip="$1" - local ip_pattern="${MINION_IP_BASE}(.*)" + local ip_pattern="${NODE_IP_BASE}(.*)" # This is subtle. We map 10.245.2.2 -> minion-1. We do this by matching a # regexp and using the capture to construct the name. diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index 9b448c98479..23137a0e60a 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -24,7 +24,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. source "${KUBE_ROOT}/cluster/kube-env.sh" source "${KUBE_ROOT}/cluster/kube-util.sh" -EXPECTED_NUM_NODES="${NUM_MINIONS}" +EXPECTED_NUM_NODES="${NUM_NODES}" if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1)) fi diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index e039f4b71f8..a1b8ac2107d 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -14,22 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -NUM_MINIONS=4 +NUM_NODES=4 DISK=./kube/kube.vmdk GUEST_ID=debian7_64Guest INSTANCE_PREFIX=kubernetes MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 MASTER_CPU=1 -MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) -MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) -MINION_MEMORY_MB=2048 -MINION_CPU=1 +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) +NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24")) +NODE_MEMORY_MB=2048 +NODE_CPU=1 SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index 88740908d0d..0a7013fa5d2 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -14,22 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -NUM_MINIONS=2 +NUM_NODES=2 DISK=./kube/kube.vmdk GUEST_ID=debian7_64Guest INSTANCE_PREFIX="e2e-test-${USER}" MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" +NODE_TAG="${INSTANCE_PREFIX}-minion" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 MASTER_CPU=1 -MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) -MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) -MINION_MEMORY_MB=1024 -MINION_CPU=1 +NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) +NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24")) +NODE_MEMORY_MB=1024 +NODE_CPU=1 SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/templates/salt-minion.sh b/cluster/vsphere/templates/salt-minion.sh index 2f7dcafc915..3a02aa2bbfc 100755 --- a/cluster/vsphere/templates/salt-minion.sh +++ b/cluster/vsphere/templates/salt-minion.sh @@ -41,7 +41,7 @@ grains: roles: - kubernetes-pool - kubernetes-pool-vsphere - cbr-cidr: $MINION_IP_RANGE + cbr-cidr: $NODE_IP_RANGE EOF # Install Salt diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index c14452fbfb2..a003f725ba6 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -45,21 +45,21 @@ function detect-master { # Detect the information about the minions # # Assumed vars: -# MINION_NAMES +# NODE_NAMES # Vars set: -# KUBE_MINION_IP_ADDRESS (array) +# KUBE_NODE_IP_ADDRESS (array) function detect-minions { - KUBE_MINION_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]}) + KUBE_NODE_IP_ADDRESSES=() + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + local minion_ip=$(govc vm.ip ${NODE_NAMES[$i]}) if [[ -z "${minion_ip-}" ]] ; then - echo "Did not find ${MINION_NAMES[$i]}" >&2 + echo "Did not find ${NODE_NAMES[$i]}" >&2 else - echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + echo "Found ${NODE_NAMES[$i]} at ${minion_ip}" + KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") fi done - if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then + if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 exit 1 fi @@ -266,20 +266,20 @@ function kube-up { echo "Starting minion VMs (this can take a minute)..." - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do ( echo "#! /bin/bash" - echo "readonly MY_NAME=${MINION_NAMES[$i]}" + echo "readonly MY_NAME=${NODE_NAMES[$i]}" grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" echo "KUBE_MASTER=${KUBE_MASTER}" echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" - echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" + echo "NODE_IP_RANGE=${NODE_IP_RANGES[$i]}" grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh" ) > "${KUBE_TEMP}/minion-start-${i}.sh" ( - kube-up-vm "${MINION_NAMES[$i]}" -c ${MINION_CPU-1} -m ${MINION_MEMORY_MB-1024} - kube-run "${MINION_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" + kube-up-vm "${NODE_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024} + kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" ) & done @@ -312,10 +312,10 @@ function kube-up { printf " OK\n" local i - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - printf "Waiting for ${MINION_NAMES[$i]} to become available..." + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + printf "Waiting for ${NODE_NAMES[$i]} to become available..." until curl --max-time 5 \ - --fail --output /dev/null --silent "http://${KUBE_MINION_IP_ADDRESSES[$i]}:10250/healthz"; do + --fail --output /dev/null --silent "http://${KUBE_NODE_IP_ADDRESSES[$i]}:10250/healthz"; do printf "." sleep 2 done @@ -347,10 +347,10 @@ function kube-up { # Basic sanity checking local i - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do # Make sure docker is installed - kube-ssh "${KUBE_MINION_IP_ADDRESSES[$i]}" which docker > /dev/null || { - echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 + kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || { + echo "Docker failed to install on ${NODE_NAMES[$i]}. Your cluster is unlikely" >&2 echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 echo "cluster. (sorry!)" >&2 exit 1 @@ -372,8 +372,8 @@ function kube-up { function kube-down { govc vm.destroy ${MASTER_NAME} & - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - govc vm.destroy ${MINION_NAMES[i]} & + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + govc vm.destroy ${NODE_NAMES[i]} & done wait diff --git a/docs/admin/cluster-large.md b/docs/admin/cluster-large.md index 350da95e9e8..ceb51ff8dfe 100644 --- a/docs/admin/cluster-large.md +++ b/docs/admin/cluster-large.md @@ -41,7 +41,7 @@ At v1.0, Kubernetes supports clusters up to 100 nodes with 30 pods per node and A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane). -Normally the number of nodes in a cluster is controlled by the the value `NUM_MINIONS` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)). +Normally the number of nodes in a cluster is controlled by the the value `NUM_NODES` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)). Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up. diff --git a/docs/design/aws_under_the_hood.md b/docs/design/aws_under_the_hood.md index 9fe46d6f288..a55c09e31e5 100644 --- a/docs/design/aws_under_the_hood.md +++ b/docs/design/aws_under_the_hood.md @@ -250,7 +250,7 @@ cross-AZ-clusters are more convenient. * For auto-scaling, on each nodes it creates a launch configuration and group. The name for both is <*KUBE_AWS_INSTANCE_PREFIX*>-minion-group. The default name is kubernetes-minion-group. The auto-scaling group has a min and max size - that are both set to NUM_MINIONS. You can change the size of the auto-scaling + that are both set to NUM_NODES. You can change the size of the auto-scaling group to add or remove the total number of nodes from within the AWS API or Console. Each nodes self-configures, meaning that they come up; run Salt with the stored configuration; connect to the master; are assigned an internal CIDR; diff --git a/docs/design/networking.md b/docs/design/networking.md index 56009d5b85b..b110ca75ade 100644 --- a/docs/design/networking.md +++ b/docs/design/networking.md @@ -132,11 +132,11 @@ differentiate it from `docker0`) is set up outside of Docker proper. Example of GCE's advanced routing rules: ```sh -gcloud compute routes add "${MINION_NAMES[$i]}" \ +gcloud compute routes add "${NODE_NAMES[$i]}" \ --project "${PROJECT}" \ - --destination-range "${MINION_IP_RANGES[$i]}" \ + --destination-range "${NODE_IP_RANGES[$i]}" \ --network "${NETWORK}" \ - --next-hop-instance "${MINION_NAMES[$i]}" \ + --next-hop-instance "${NODE_NAMES[$i]}" \ --next-hop-instance-zone "${ZONE}" & ``` diff --git a/docs/devel/developer-guides/vagrant.md b/docs/devel/developer-guides/vagrant.md index 61560db7f48..2d628abb736 100644 --- a/docs/devel/developer-guides/vagrant.md +++ b/docs/devel/developer-guides/vagrant.md @@ -301,7 +301,7 @@ Congratulations! The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`: ```sh -NUM_MINIONS=3 hack/e2e-test.sh +NUM_NODES=3 hack/e2e-test.sh ``` ### Troubleshooting @@ -350,10 +350,10 @@ Are you sure you built a release first? Did you install `net-tools`? For more cl #### I want to change the number of nodes! -You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: +You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so: ```sh -export NUM_MINIONS=1 +export NUM_NODES=1 ``` #### I want my VMs to have more memory! @@ -369,7 +369,7 @@ If you need more granular control, you can set the amount of memory for the mast ```sh export KUBERNETES_MASTER_MEMORY=1536 -export KUBERNETES_MINION_MEMORY=2048 +export KUBERNETES_NODE_MEMORY=2048 ``` #### I ran vagrant suspend and nothing works! diff --git a/docs/devel/kubemark-guide.md b/docs/devel/kubemark-guide.md index 758963dec7d..df0ecb96271 100644 --- a/docs/devel/kubemark-guide.md +++ b/docs/devel/kubemark-guide.md @@ -73,7 +73,7 @@ To start a Kubemark cluster on GCE you need to create an external cluster (it ca `make quick-release`) and run `test/kubemark/start-kubemark.sh` script. This script will create a VM for master components, Pods for HollowNodes and do all the setup necessary to let them talk to each other. It will use the configuration stored in `cluster/kubemark/config-default.sh` - you can tweak it however you want, but note that some features may not be implemented yet, as implementation of Hollow components/mocks will probably be lagging behind ‘real’ one. For performance tests interesting variables are -`NUM_MINIONS` and `MASTER_SIZE`. After start-kubemark script is finished you’ll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark +`NUM_NODES` and `MASTER_SIZE`. After start-kubemark script is finished you’ll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark cluster is stored in `test/kubemark/kubeconfig.loc`. Currently we're running HollowNode with limit of 0.05 a CPU core and ~60MB or memory, which taking into account default cluster addons and fluentD running on an 'external' diff --git a/docs/getting-started-guides/aws.md b/docs/getting-started-guides/aws.md index 3c092c1e439..3e36cfaa8a8 100644 --- a/docs/getting-started-guides/aws.md +++ b/docs/getting-started-guides/aws.md @@ -83,15 +83,15 @@ You can override the variables defined in [config-default.sh](http://releases.k8 ```bash export KUBE_AWS_ZONE=eu-west-1c -export NUM_MINIONS=2 -export MINION_SIZE=m3.medium +export NUM_NODES=2 +export NODE_SIZE=m3.medium export AWS_S3_REGION=eu-west-1 export AWS_S3_BUCKET=mycompany-kubernetes-artifacts export INSTANCE_PREFIX=k8s ... ``` -The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`, in particular for clusters less than 50 nodes it will +The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`, in particular for clusters less than 50 nodes it will use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`. It will also try to create or reuse a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion". diff --git a/docs/getting-started-guides/libvirt-coreos.md b/docs/getting-started-guides/libvirt-coreos.md index 7d73a560491..2d3ec2ad050 100644 --- a/docs/getting-started-guides/libvirt-coreos.md +++ b/docs/getting-started-guides/libvirt-coreos.md @@ -167,7 +167,7 @@ cluster/kube-up.sh The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine. -The `NUM_MINIONS` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3. +The `NUM_NODES` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3. The `KUBE_PUSH` environment variable may be set to specify which Kubernetes binaries must be deployed on the cluster. Its possible values are: @@ -225,7 +225,7 @@ export KUBERNETES_PROVIDER=libvirt-coreos Bring up a libvirt-CoreOS cluster of 5 nodes ```sh -NUM_MINIONS=5 cluster/kube-up.sh +NUM_NODES=5 cluster/kube-up.sh ``` Destroy the libvirt-CoreOS cluster diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index 7ad434b4c81..25188d8e347 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -84,7 +84,7 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo - flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network. 2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines (we do not capture the password). 3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems. -4. We then boot as many nodes as defined via `$NUM_MINIONS`. +4. We then boot as many nodes as defined via `$NUM_NODES`. ## Some notes diff --git a/docs/getting-started-guides/rkt/README.md b/docs/getting-started-guides/rkt/README.md index cae3b48d6fa..c9c2208bd29 100644 --- a/docs/getting-started-guides/rkt/README.md +++ b/docs/getting-started-guides/rkt/README.md @@ -72,8 +72,8 @@ To use rkt as the container runtime for your CoreOS cluster on GCE, you need to ```console $ export KUBE_OS_DISTRIBUTION=coreos -$ export KUBE_GCE_MINION_IMAGE= -$ export KUBE_GCE_MINION_PROJECT=coreos-cloud +$ export KUBE_GCE_NODE_IMAGE= +$ export KUBE_GCE_NODE_PROJECT=coreos-cloud $ export KUBE_CONTAINER_RUNTIME=rkt ``` diff --git a/docs/getting-started-guides/ubuntu.md b/docs/getting-started-guides/ubuntu.md index 0b0041714b3..9c5151a1b65 100644 --- a/docs/getting-started-guides/ubuntu.md +++ b/docs/getting-started-guides/ubuntu.md @@ -116,7 +116,7 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223" export role="ai i i" -export NUM_MINIONS=${NUM_MINIONS:-3} +export NUM_NODES=${NUM_NODES:-3} export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 @@ -129,7 +129,7 @@ separated with blank space like ` ` Then the `role` variable defines the role of above machine in the same order, "ai" stands for machine acts as both master and node, "a" stands for master, "i" stands for node. -The `NUM_MINIONS` variable defines the total number of nodes. +The `NUM_NODES` variable defines the total number of nodes. The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service IP range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. diff --git a/docs/getting-started-guides/vagrant.md b/docs/getting-started-guides/vagrant.md index e103369b2c3..055070dd1d1 100644 --- a/docs/getting-started-guides/vagrant.md +++ b/docs/getting-started-guides/vagrant.md @@ -389,10 +389,10 @@ Log on to one of the nodes (`vagrant ssh node-1`) and inspect the salt minion lo #### I want to change the number of nodes! -You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: +You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so: ```sh -export NUM_MINIONS=1 +export NUM_NODES=1 ``` #### I want my VMs to have more memory! @@ -408,7 +408,7 @@ If you need more granular control, you can set the amount of memory for the mast ```sh export KUBERNETES_MASTER_MEMORY=1536 -export KUBERNETES_MINION_MEMORY=2048 +export KUBERNETES_NODE_MEMORY=2048 ``` #### I ran vagrant suspend and nothing works! diff --git a/examples/celery-rabbitmq/README.md b/examples/celery-rabbitmq/README.md index d508e460701..5349acea795 100644 --- a/examples/celery-rabbitmq/README.md +++ b/examples/celery-rabbitmq/README.md @@ -57,7 +57,7 @@ At the end of the example, we will have: ## Prerequisites -You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_MINIONS` environment variable to 2 or more). +You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_NODES` environment variable to 2 or more). ## Step 1: Start the RabbitMQ service diff --git a/examples/runtime-constraints/README.md b/examples/runtime-constraints/README.md index 410988e172a..35112f9ac12 100644 --- a/examples/runtime-constraints/README.md +++ b/examples/runtime-constraints/README.md @@ -43,7 +43,7 @@ of compute resources easier to follow by starting with an empty cluster. ``` $ export KUBERNETES_PROVIDER=vagrant -$ export NUM_MINIONS=1 +$ export NUM_NODES=1 $ export KUBE_ENABLE_CLUSTER_MONITORING=none $ export KUBE_ENABLE_CLUSTER_DNS=false $ export KUBE_ENABLE_CLUSTER_UI=false diff --git a/hack/conformance-test.sh b/hack/conformance-test.sh index 60fd725cfb6..ee16a6bb1ae 100755 --- a/hack/conformance-test.sh +++ b/hack/conformance-test.sh @@ -18,7 +18,7 @@ # supports key features for Kubernetes version 1.0. # Instructions: -# - Setup a Kubernetes cluster with $NUM_MINIONS nodes (defined below). +# - Setup a Kubernetes cluster with $NUM_NODES nodes (defined below). # - Provide a Kubeconfig file whose current context is set to the # cluster to be tested, and with suitable auth setting. # - Specify the location of that kubeconfig with, e.g.: @@ -78,10 +78,10 @@ echo "Conformance test checking conformance with Kubernetes version 1.0" # somewhere in the description (i.e. either in the Describe part or the It part). # The list of tagged conformance tests can be retrieved by: # -# NUM_MINIONS=4 KUBERNETES_CONFORMANCE_TEST="y" \ +# NUM_NODES=4 KUBERNETES_CONFORMANCE_TEST="y" \ # hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.dryRun=true declare -x KUBERNETES_CONFORMANCE_TEST="y" -declare -x NUM_MINIONS=4 +declare -x NUM_NODES=4 hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.skip='\[Skipped\]' exit $? diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index ead6eac1725..c09b14f7dfe 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -103,7 +103,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}" --cluster-tag="${CLUSTER_ID:-}" \ --repo-root="${KUBE_VERSION_ROOT}" \ --node-instance-group="${NODE_INSTANCE_GROUP:-}" \ - --num-nodes="${NUM_MINIONS:-}" \ + --num-nodes="${NUM_NODES:-}" \ --prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \ ${E2E_CLEAN_START:+"--clean-start=true"} \ ${E2E_MIN_STARTUP_PODS:+"--minStartupPods=${E2E_MIN_STARTUP_PODS}"} \ diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 17e644cee37..507a22846e6 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -87,7 +87,7 @@ if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then KUBERNETES_PROVIDER="gce" : ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_ZONE:="us-central1-f"} - : ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel + : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then KUBERNETES_PROVIDER="gke" @@ -96,18 +96,18 @@ elif [[ ${JOB_NAME} =~ ^kubernetes-.*-aws ]]; then KUBERNETES_PROVIDER="aws" : ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_ZONE:="us-east-1a"} - : ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel + : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel fi if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then if [[ "${PERFORMANCE:-}" == "true" ]]; then : ${MASTER_SIZE:="m3.xlarge"} - : ${NUM_MINIONS:="100"} + : ${NUM_NODES:="100"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"} else : ${MASTER_SIZE:="m3.large"} - : ${MINION_SIZE:="m3.large"} - : ${NUM_MINIONS:="3"} + : ${NODE_SIZE:="m3.large"} + : ${NUM_NODES:="3"} fi fi @@ -360,7 +360,7 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins-pull"} : ${ENABLE_DEPLOYMENTS:=true} # Override GCE defaults - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs all non-flaky tests on GCE in parallel. @@ -379,7 +379,7 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins"} : ${ENABLE_DEPLOYMENTS:=true} # Override GCE defaults - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs all non-flaky tests on AWS in parallel. @@ -396,7 +396,7 @@ case ${JOB_NAME} in )"} : ${ENABLE_DEPLOYMENTS:=true} # Override AWS defaults. - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs the flaky tests on GCE in parallel. @@ -415,7 +415,7 @@ case ${JOB_NAME} in : ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"} : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} # Override GCE defaults. - NUM_MINIONS=${NUM_MINIONS_PARALLEL} + NUM_NODES=${NUM_NODES_PARALLEL} ;; # Runs only the reboot tests on GCE. @@ -436,9 +436,9 @@ case ${JOB_NAME} in : ${PROJECT:="kubernetes-jenkins"} # Override GCE defaults. MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-2" - MINION_DISK_SIZE="50GB" - NUM_MINIONS="100" + NODE_SIZE="n1-standard-2" + NODE_DISK_SIZE="50GB" + NUM_NODES="100" # Reduce logs verbosity TEST_CLUSTER_LOG_LEVEL="--v=2" # Increase resync period to simulate production @@ -458,9 +458,9 @@ case ${JOB_NAME} in # Override GCE defaults. E2E_ZONE="us-east1-b" MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-2" - MINION_DISK_SIZE="50GB" - NUM_MINIONS="100" + NODE_SIZE="n1-standard-2" + NODE_DISK_SIZE="50GB" + NUM_NODES="100" # Reduce logs verbosity TEST_CLUSTER_LOG_LEVEL="--v=2" # Increase resync period to simulate production @@ -559,8 +559,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${PROJECT:="kubekins-e2e-gce-trusty-rls"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -577,8 +577,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-slow"} : ${PROJECT:="k8s-e2e-gce-trusty-slow"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -600,8 +600,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${PROJECT:="k8s-e2e-gce-trusty-beta"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -619,8 +619,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-beta-slow"} : ${PROJECT:="k8s-e2e-gce-trusty-beta-slow"} - : ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} - : ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} + : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} + : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_OS_DISTRIBUTION:="trusty"} : ${ENABLE_CLUSTER_REGISTRY:=false} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} @@ -820,7 +820,7 @@ case ${JOB_NAME} in : ${GINKGO_TEST_ARGS:="--ginkgo.focus=GCE\sL7\sLoadBalancer\sController|Job|Horizontal\spod\sautoscaling"} # At least n1-standard-2 nodes are required for the cluster to # have enough cpu/ram to run the Horizontal pod autoscaling tests. - MINION_SIZE="n1-standard-2" + NODE_SIZE="n1-standard-2" ;; # Sets up the GKE soak cluster weekly using the latest CI release. @@ -835,7 +835,7 @@ case ${JOB_NAME} in : ${E2E_UP:="true"} : ${PROJECT:="kubernetes-jenkins"} # Need at least n1-standard-2 nodes to run kubelet_perf tests - MINION_SIZE="n1-standard-2" + NODE_SIZE="n1-standard-2" ;; # Runs tests on GKE soak cluster. @@ -1232,7 +1232,7 @@ case ${JOB_NAME} in : ${E2E_UP:="true"} : ${E2E_TEST:="false"} : ${E2E_DOWN:="false"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-gce-step2-upgrade-master) @@ -1245,7 +1245,7 @@ case ${JOB_NAME} in : ${E2E_TEST:="true"} : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1267,7 +1267,7 @@ case ${JOB_NAME} in ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-gce-step4-upgrade-cluster) @@ -1280,7 +1280,7 @@ case ${JOB_NAME} in : ${E2E_TEST:="true"} : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1300,7 +1300,7 @@ case ${JOB_NAME} in ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-gce-step6-e2e-new) @@ -1319,7 +1319,7 @@ case ${JOB_NAME} in ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \ )"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; # kubernetes-upgrade-gce-1.0-current-release @@ -1342,7 +1342,7 @@ case ${JOB_NAME} in : ${E2E_TEST:="false"} : ${E2E_DOWN:="false"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-1.0-current-release-gce-step2-upgrade-master) @@ -1358,7 +1358,7 @@ case ${JOB_NAME} in : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1379,7 +1379,7 @@ case ${JOB_NAME} in ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-1.0-current-release-gce-step4-upgrade-cluster) @@ -1395,7 +1395,7 @@ case ${JOB_NAME} in : ${E2E_DOWN:="false"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} : ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true} ;; @@ -1416,7 +1416,7 @@ case ${JOB_NAME} in ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; kubernetes-upgrade-1.0-current-release-gce-step6-e2e-new) @@ -1437,7 +1437,7 @@ case ${JOB_NAME} in ${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} - : ${NUM_MINIONS:=5} + : ${NUM_NODES:=5} ;; # Run Kubemark test on a fake 100 node cluster to have a comparison @@ -1452,11 +1452,11 @@ case ${JOB_NAME} in : ${USE_KUBEMARK:="true"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters KUBE_GCE_INSTANCE_PREFIX="kubemark100" - NUM_MINIONS="10" + NUM_NODES="10" MASTER_SIZE="n1-standard-2" - MINION_SIZE="n1-standard-1" + NODE_SIZE="n1-standard-1" KUBEMARK_MASTER_SIZE="n1-standard-4" - KUBEMARK_NUM_MINIONS="100" + KUBEMARK_NUM_NODES="100" ;; # Run Kubemark test on a fake 500 node cluster to test for regressions on @@ -1470,13 +1470,13 @@ case ${JOB_NAME} in : ${E2E_TEST:="false"} : ${USE_KUBEMARK:="true"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters - NUM_MINIONS="6" + NUM_NODES="6" MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-8" + NODE_SIZE="n1-standard-8" KUBE_GCE_INSTANCE_PREFIX="kubemark500" E2E_ZONE="asia-east1-a" KUBEMARK_MASTER_SIZE="n1-standard-16" - KUBEMARK_NUM_MINIONS="500" + KUBEMARK_NUM_NODES="500" ;; # Run big Kubemark test, this currently means a 1000 node cluster and 16 core master @@ -1490,15 +1490,15 @@ case ${JOB_NAME} in : ${USE_KUBEMARK:="true"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. - NUM_MINIONS="11" + NUM_NODES="11" MASTER_SIZE="n1-standard-4" - MINION_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core - # so NUM_MINIONS x cores_per_minion should + NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core + # so NUM_NODES x cores_per_minion should # be set accordingly. KUBE_GCE_INSTANCE_PREFIX="kubemark1000" E2E_ZONE="asia-east1-a" KUBEMARK_MASTER_SIZE="n1-standard-16" - KUBEMARK_NUM_MINIONS="1000" + KUBEMARK_NUM_NODES="1000" ;; esac @@ -1512,8 +1512,8 @@ export KUBE_GCE_ZONE=${E2E_ZONE} export KUBE_GCE_NETWORK=${E2E_NETWORK} export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-} export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-} -export KUBE_GCE_MINION_PROJECT=${KUBE_GCE_MINION_PROJECT:-} -export KUBE_GCE_MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-} +export KUBE_GCE_NODE_PROJECT=${KUBE_GCE_NODE_PROJECT:-} +export KUBE_GCE_NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-} export KUBE_OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-} # GKE variables @@ -1523,7 +1523,7 @@ export KUBE_GKE_NETWORK=${E2E_NETWORK} export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-} export DOGFOOD_GCLOUD=${DOGFOOD_GCLOUD:-} export CMD_GROUP=${CMD_GROUP:-} -export MACHINE_TYPE=${MINION_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size +export MACHINE_TYPE=${NODE_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT} @@ -1537,9 +1537,9 @@ export KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER=${ENABLE_HORIZONTAL_POD_AUTOSCALER: export KUBE_ENABLE_DEPLOYMENTS=${ENABLE_DEPLOYMENTS:-} export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-} export MASTER_SIZE=${MASTER_SIZE:-} -export MINION_SIZE=${MINION_SIZE:-} -export MINION_DISK_SIZE=${MINION_DISK_SIZE:-} -export NUM_MINIONS=${NUM_MINIONS:-} +export NODE_SIZE=${NODE_SIZE:-} +export NODE_DISK_SIZE=${NODE_DISK_SIZE:-} +export NUM_NODES=${NUM_NODES:-} export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-} export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-} export PROJECT=${PROJECT:-} @@ -1752,18 +1752,18 @@ fi ### Start Kubemark ### if [[ "${USE_KUBEMARK:-}" == "true" ]]; then export RUN_FROM_DISTRO=true - NUM_MINIONS_BKP=${NUM_MINIONS} + NUM_NODES_BKP=${NUM_NODES} MASTER_SIZE_BKP=${MASTER_SIZE} ./test/kubemark/stop-kubemark.sh - NUM_MINIONS=${KUBEMARK_NUM_MINIONS:-$NUM_MINIONS} + NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES} MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE} ./test/kubemark/start-kubemark.sh ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false" ./test/kubemark/stop-kubemark.sh - NUM_MINIONS=${NUM_MINIONS_BKP} + NUM_NODES=${NUM_NODES_BKP} MASTER_SIZE=${MASTER_SIZE_BKP} unset RUN_FROM_DISTRO - unset NUM_MINIONS_BKP + unset NUM_NODES_BKP unset MASTER_SIZE_BKP fi diff --git a/hack/kube-dump.sh b/hack/kube-dump.sh index fd6321db343..76cc9a5bf86 100755 --- a/hack/kube-dump.sh +++ b/hack/kube-dump.sh @@ -32,7 +32,7 @@ source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" detect-project &> /dev/null echo "kube-dump.sh: Getting docker statuses on all nodes..." -ALL_NODES=(${MINION_NAMES[*]} ${MASTER_NAME}) +ALL_NODES=(${NODE_NAMES[*]} ${MASTER_NAME}) for NODE in ${ALL_NODES[*]}; do echo "kube-dump.sh: Node $NODE:" ssh-to-node "${NODE}" ' diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index ddcbf65f0dd..6db0c5f378a 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -205,7 +205,7 @@ contexts: current-context: kubemark-context EOF -sed "s/##numreplicas##/${NUM_MINIONS:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json +sed "s/##numreplicas##/${NUM_NODES:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json sed -i'' -e "s/##project##/${PROJECT}/g" ${KUBE_ROOT}/test/kubemark/hollow-node.json kubectl create -f ${KUBE_ROOT}/test/kubemark/kubemark-ns.json kubectl create -f ${KUBECONFIG_SECRET} --namespace="kubemark" @@ -215,7 +215,7 @@ rm ${KUBECONFIG_SECRET} echo "Waiting for all HollowNodes to become Running..." echo "This can loop forever if something crashed." -until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_MINIONS}" ]]; do +until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_NODES}" ]]; do echo -n . sleep 1 done