Merge pull request #17601 from eosrei/shell-var-MINION-to-NODE

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-11-26 05:02:44 -08:00
commit 0c4f302e5e
60 changed files with 403 additions and 403 deletions

6
Vagrantfile vendored
View File

@ -18,11 +18,11 @@ END
end end
# The number of minions to provision # The number of minions to provision
$num_minion = (ENV['NUM_MINIONS'] || 1).to_i $num_minion = (ENV['NUM_NODES'] || 1).to_i
# ip configuration # ip configuration
$master_ip = ENV['MASTER_IP'] $master_ip = ENV['MASTER_IP']
$minion_ip_base = ENV['MINION_IP_BASE'] || "" $minion_ip_base = ENV['NODE_IP_BASE'] || ""
$minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" } $minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" }
# Determine the OS platform to use # Determine the OS platform to use
@ -105,7 +105,7 @@ end
# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. # When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens.
# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) # This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.)
$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i $vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i
$vm_minion_mem = (ENV['KUBERNETES_MINION_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i $vm_minion_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
def setvmboxandurl(config, provider) def setvmboxandurl(config, provider)

View File

@ -16,26 +16,26 @@
ZONE=${KUBE_AWS_ZONE:-us-west-2a} ZONE=${KUBE_AWS_ZONE:-us-west-2a}
MASTER_SIZE=${MASTER_SIZE:-} MASTER_SIZE=${MASTER_SIZE:-}
MINION_SIZE=${MINION_SIZE:-} NODE_SIZE=${NODE_SIZE:-}
NUM_MINIONS=${NUM_MINIONS:-4} NUM_NODES=${NUM_NODES:-4}
# Dynamically set node sizes so that Heapster has enough space to run # Dynamically set node sizes so that Heapster has enough space to run
if [[ -z ${MINION_SIZE} ]]; then if [[ -z ${NODE_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then if (( ${NUM_NODES} < 50 )); then
MINION_SIZE="t2.micro" NODE_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then elif (( ${NUM_NODES} < 150 )); then
MINION_SIZE="t2.small" NODE_SIZE="t2.small"
else else
MINION_SIZE="t2.medium" NODE_SIZE="t2.medium"
fi fi
fi fi
# Dynamically set the master size by the number of nodes, these are guesses # Dynamically set the master size by the number of nodes, these are guesses
# TODO: gather some data # TODO: gather some data
if [[ -z ${MASTER_SIZE} ]]; then if [[ -z ${MASTER_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then if (( ${NUM_NODES} < 50 )); then
MASTER_SIZE="t2.micro" MASTER_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then elif (( ${NUM_NODES} < 150 )); then
MASTER_SIZE="t2.small" MASTER_SIZE="t2.small"
else else
MASTER_SIZE="t2.medium" MASTER_SIZE="t2.medium"
@ -56,7 +56,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-kubernetes}"
CLUSTER_ID=${INSTANCE_PREFIX} CLUSTER_ID=${INSTANCE_PREFIX}
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
IAM_PROFILE_MASTER="kubernetes-master" IAM_PROFILE_MASTER="kubernetes-master"
IAM_PROFILE_MINION="kubernetes-minion" IAM_PROFILE_NODE="kubernetes-minion"
LOG="/dev/null" LOG="/dev/null"
@ -66,13 +66,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20}
MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}"
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8}
# The minions root EBS volume size (used to house Docker images) # The minions root EBS volume size (used to house Docker images)
MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}" NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}"
MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32} NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32}
MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" NODE_TAG="${INSTANCE_PREFIX}-minion"
MINION_SCOPES="" NODE_SCOPES=""
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
@ -121,7 +121,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
# TODO: actually configure ASG or similar # TODO: actually configure ASG or similar
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi fi
@ -130,11 +130,11 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco
# Optional: Enable/disable public IP assignment for minions. # Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!
ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true} ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true}
# OS options for minions # OS options for minions
KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}"
KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}" KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}"
COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}"
CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}"
RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}" RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}"

View File

@ -17,26 +17,26 @@
ZONE=${KUBE_AWS_ZONE:-us-west-2a} ZONE=${KUBE_AWS_ZONE:-us-west-2a}
MASTER_SIZE=${MASTER_SIZE:-} MASTER_SIZE=${MASTER_SIZE:-}
MINION_SIZE=${MINION_SIZE:-} NODE_SIZE=${NODE_SIZE:-}
NUM_MINIONS=${NUM_MINIONS:-2} NUM_NODES=${NUM_NODES:-2}
# Dynamically set node sizes so that Heapster has enough space to run # Dynamically set node sizes so that Heapster has enough space to run
if [[ -z ${MINION_SIZE} ]]; then if [[ -z ${NODE_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then if (( ${NUM_NODES} < 50 )); then
MINION_SIZE="t2.micro" NODE_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then elif (( ${NUM_NODES} < 150 )); then
MINION_SIZE="t2.small" NODE_SIZE="t2.small"
else else
MINION_SIZE="t2.medium" NODE_SIZE="t2.medium"
fi fi
fi fi
# Dynamically set the master size by the number of nodes, these are guesses # Dynamically set the master size by the number of nodes, these are guesses
# TODO: gather some data # TODO: gather some data
if [[ -z ${MASTER_SIZE} ]]; then if [[ -z ${MASTER_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then if (( ${NUM_NODES} < 50 )); then
MASTER_SIZE="t2.micro" MASTER_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then elif (( ${NUM_NODES} < 150 )); then
MASTER_SIZE="t2.small" MASTER_SIZE="t2.small"
else else
MASTER_SIZE="t2.medium" MASTER_SIZE="t2.medium"
@ -54,7 +54,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-e2e-test-${USER}}"
CLUSTER_ID=${INSTANCE_PREFIX} CLUSTER_ID=${INSTANCE_PREFIX}
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
IAM_PROFILE_MASTER="kubernetes-master" IAM_PROFILE_MASTER="kubernetes-master"
IAM_PROFILE_MINION="kubernetes-minion" IAM_PROFILE_NODE="kubernetes-minion"
LOG="/dev/null" LOG="/dev/null"
@ -64,13 +64,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20}
MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}"
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8}
# The minions root EBS volume size (used to house Docker images) # The minions root EBS volume size (used to house Docker images)
MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}" NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}"
MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32} NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32}
MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" NODE_TAG="${INSTANCE_PREFIX}-minion"
MINION_SCOPES="" NODE_SCOPES=""
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
@ -117,7 +117,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
# TODO: actually configure ASG or similar # TODO: actually configure ASG or similar
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi fi
@ -126,11 +126,11 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco
# Optional: Enable/disable public IP assignment for minions. # Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!
ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true} ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true}
# OS options for minions # OS options for minions
KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}" KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}"
KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}" KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}"
COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}"
CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}"
RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}" RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}"

View File

@ -19,11 +19,11 @@
SSH_USER=core SSH_USER=core
function detect-minion-image (){ function detect-minion-image (){
if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then
KUBE_MINION_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']") KUBE_NODE_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']")
fi fi
if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then
echo "unable to determine KUBE_MINION_IMAGE" echo "unable to determine KUBE_NODE_IMAGE"
exit 2 exit 2
fi fi
} }

View File

@ -27,7 +27,7 @@ It is not a bad idea to set AWS_S3_BUCKET to something more human friendly.
AWS_S3_REGION is useful for people that want to control their data location, because of regulatory restrictions for example. AWS_S3_REGION is useful for people that want to control their data location, because of regulatory restrictions for example.
**MASTER_SIZE**, **MINION_SIZE** **MASTER_SIZE**, **NODE_SIZE**
The instance type to use for creating the master/minion. Defaults to auto-sizing based on the number of nodes (see below). The instance type to use for creating the master/minion. Defaults to auto-sizing based on the number of nodes (see below).
@ -35,10 +35,10 @@ For production usage, we recommend bigger instances, for example:
``` ```
export MASTER_SIZE=c4.large export MASTER_SIZE=c4.large
export MINION_SIZE=r3.large export NODE_SIZE=r3.large
``` ```
If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`. If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`.
In particular for clusters less than 50 nodes it will In particular for clusters less than 50 nodes it will
use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`. use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`.
@ -46,7 +46,7 @@ Please note: `kube-up` utilizes ephemeral storage available on instances for doc
support ephemeral storage and will default to docker storage on the root disk which is usually only 8GB. support ephemeral storage and will default to docker storage on the root disk which is usually only 8GB.
EBS-only instance types include `t2`, `c4`, and `m4`. EBS-only instance types include `t2`, `c4`, and `m4`.
**KUBE_ENABLE_MINION_PUBLIC_IP** **KUBE_ENABLE_NODE_PUBLIC_IP**
Should a public IP automatically assigned to the minions? "true" or "false" Should a public IP automatically assigned to the minions? "true" or "false"
Defaults to: "true" Defaults to: "true"

View File

@ -40,7 +40,7 @@ network_provider: '$(echo "$NETWORK_PROVIDER")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG")' opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")' opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")' opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
num_nodes: $(echo "${NUM_MINIONS}") num_nodes: $(echo "${NUM_NODES}")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
EOF EOF

View File

@ -18,9 +18,9 @@
# A library of common helper functions for Ubuntus & Debians. # A library of common helper functions for Ubuntus & Debians.
function detect-minion-image() { function detect-minion-image() {
if [[ -z "${KUBE_MINION_IMAGE=-}" ]]; then if [[ -z "${KUBE_NODE_IMAGE=-}" ]]; then
detect-image detect-image
KUBE_MINION_IMAGE=$AWS_IMAGE KUBE_NODE_IMAGE=$AWS_IMAGE
fi fi
} }

View File

@ -79,14 +79,14 @@ if [[ -n "${KUBE_SUBNET_CIDR:-}" ]]; then
fi fi
MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}" MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}"
MINION_SG_NAME="kubernetes-minion-${CLUSTER_ID}" NODE_SG_NAME="kubernetes-minion-${CLUSTER_ID}"
# Be sure to map all the ephemeral drives. We can specify more than we actually have. # Be sure to map all the ephemeral drives. We can specify more than we actually have.
# TODO: Actually mount the correct number (especially if we have more), though this is non-trivial, and # TODO: Actually mount the correct number (especially if we have more), though this is non-trivial, and
# only affects the big storage instance types, which aren't a typical use case right now. # only affects the big storage instance types, which aren't a typical use case right now.
BLOCK_DEVICE_MAPPINGS_BASE="{\"DeviceName\": \"/dev/sdc\",\"VirtualName\":\"ephemeral0\"},{\"DeviceName\": \"/dev/sdd\",\"VirtualName\":\"ephemeral1\"},{\"DeviceName\": \"/dev/sde\",\"VirtualName\":\"ephemeral2\"},{\"DeviceName\": \"/dev/sdf\",\"VirtualName\":\"ephemeral3\"}" BLOCK_DEVICE_MAPPINGS_BASE="{\"DeviceName\": \"/dev/sdc\",\"VirtualName\":\"ephemeral0\"},{\"DeviceName\": \"/dev/sdd\",\"VirtualName\":\"ephemeral1\"},{\"DeviceName\": \"/dev/sde\",\"VirtualName\":\"ephemeral2\"},{\"DeviceName\": \"/dev/sdf\",\"VirtualName\":\"ephemeral3\"}"
MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]"
MINION_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MINION_ROOT_DISK_SIZE},\"VolumeType\":\"${MINION_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]" NODE_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${NODE_ROOT_DISK_SIZE},\"VolumeType\":\"${NODE_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]"
# TODO (bburns) Parameterize this for multiple cluster per project # TODO (bburns) Parameterize this for multiple cluster per project
@ -181,38 +181,38 @@ function query-running-minions () {
Name=vpc-id,Values=${VPC_ID} \ Name=vpc-id,Values=${VPC_ID} \
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
Name=tag:aws:autoscaling:groupName,Values=${ASG_NAME} \ Name=tag:aws:autoscaling:groupName,Values=${ASG_NAME} \
Name=tag:Role,Values=${MINION_TAG} \ Name=tag:Role,Values=${NODE_TAG} \
--query ${query} --query ${query}
} }
function find-running-minions () { function find-running-minions () {
MINION_IDS=() NODE_IDS=()
MINION_NAMES=() NODE_NAMES=()
for id in $(query-running-minions "Reservations[].Instances[].InstanceId"); do for id in $(query-running-minions "Reservations[].Instances[].InstanceId"); do
MINION_IDS+=("${id}") NODE_IDS+=("${id}")
# We use the minion ids as the name # We use the minion ids as the name
MINION_NAMES+=("${id}") NODE_NAMES+=("${id}")
done done
} }
function detect-minions () { function detect-minions () {
find-running-minions find-running-minions
# This is inefficient, but we want MINION_NAMES / MINION_IDS to be ordered the same as KUBE_MINION_IP_ADDRESSES # This is inefficient, but we want NODE_NAMES / NODE_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES
KUBE_MINION_IP_ADDRESSES=() KUBE_NODE_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local minion_ip local minion_ip
if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then
minion_ip=$(get_instance_public_ip ${MINION_NAMES[$i]}) minion_ip=$(get_instance_public_ip ${NODE_NAMES[$i]})
else else
minion_ip=$(get_instance_private_ip ${MINION_NAMES[$i]}) minion_ip=$(get_instance_private_ip ${NODE_NAMES[$i]})
fi fi
echo "Found minion ${i}: ${MINION_NAMES[$i]} @ ${minion_ip}" echo "Found minion ${i}: ${NODE_NAMES[$i]} @ ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") KUBE_NODE_IP_ADDRESSES+=("${minion_ip}")
done done
if [[ -z "$KUBE_MINION_IP_ADDRESSES" ]]; then if [[ -z "$KUBE_NODE_IP_ADDRESSES" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1 exit 1
fi fi
@ -228,13 +228,13 @@ function detect-security-groups {
echo "Using master security group: ${MASTER_SG_NAME} ${MASTER_SG_ID}" echo "Using master security group: ${MASTER_SG_NAME} ${MASTER_SG_ID}"
fi fi
fi fi
if [[ -z "${MINION_SG_ID-}" ]]; then if [[ -z "${NODE_SG_ID-}" ]]; then
MINION_SG_ID=$(get_security_group_id "${MINION_SG_NAME}") NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}")
if [[ -z "${MINION_SG_ID}" ]]; then if [[ -z "${NODE_SG_ID}" ]]; then
echo "Could not detect Kubernetes minion security group. Make sure you've launched a cluster with 'kube-up.sh'" echo "Could not detect Kubernetes minion security group. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1 exit 1
else else
echo "Using minion security group: ${MINION_SG_NAME} ${MINION_SG_ID}" echo "Using minion security group: ${NODE_SG_NAME} ${NODE_SG_ID}"
fi fi
fi fi
} }
@ -609,9 +609,9 @@ function ensure-iam-profiles {
echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}" echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}"
create-iam-profile ${IAM_PROFILE_MASTER} create-iam-profile ${IAM_PROFILE_MASTER}
} }
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MINION} || { aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_NODE} || {
echo "Creating minion IAM profile: ${IAM_PROFILE_MINION}" echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}"
create-iam-profile ${IAM_PROFILE_MINION} create-iam-profile ${IAM_PROFILE_NODE}
} }
} }
@ -768,10 +768,10 @@ function kube-up {
echo "Creating master security group." echo "Creating master security group."
create-security-group "${MASTER_SG_NAME}" "Kubernetes security group applied to master nodes" create-security-group "${MASTER_SG_NAME}" "Kubernetes security group applied to master nodes"
fi fi
MINION_SG_ID=$(get_security_group_id "${MINION_SG_NAME}") NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}")
if [[ -z "${MINION_SG_ID}" ]]; then if [[ -z "${NODE_SG_ID}" ]]; then
echo "Creating minion security group." echo "Creating minion security group."
create-security-group "${MINION_SG_NAME}" "Kubernetes security group applied to minion nodes" create-security-group "${NODE_SG_NAME}" "Kubernetes security group applied to minion nodes"
fi fi
detect-security-groups detect-security-groups
@ -780,17 +780,17 @@ function kube-up {
authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all"
# Minions can talk to minions # Minions can talk to minions
authorize-security-group-ingress "${MINION_SG_ID}" "--source-group ${MINION_SG_ID} --protocol all" authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all"
# Masters and minions can talk to each other # Masters and minions can talk to each other
authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MINION_SG_ID} --protocol all" authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all"
authorize-security-group-ingress "${MINION_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all"
# TODO(justinsb): Would be fairly easy to replace 0.0.0.0/0 in these rules # TODO(justinsb): Would be fairly easy to replace 0.0.0.0/0 in these rules
# SSH is open to the world # SSH is open to the world
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0"
authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0" authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 22 --cidr 0.0.0.0/0"
# HTTPS to the master is allowed (for API access) # HTTPS to the master is allowed (for API access)
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0" authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0"
@ -990,31 +990,31 @@ function start-minions() {
echo "Creating minion configuration" echo "Creating minion configuration"
generate-minion-user-data > "${KUBE_TEMP}/minion-user-data" generate-minion-user-data > "${KUBE_TEMP}/minion-user-data"
local public_ip_option local public_ip_option
if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then
public_ip_option="--associate-public-ip-address" public_ip_option="--associate-public-ip-address"
else else
public_ip_option="--no-associate-public-ip-address" public_ip_option="--no-associate-public-ip-address"
fi fi
${AWS_ASG_CMD} create-launch-configuration \ ${AWS_ASG_CMD} create-launch-configuration \
--launch-configuration-name ${ASG_NAME} \ --launch-configuration-name ${ASG_NAME} \
--image-id $KUBE_MINION_IMAGE \ --image-id $KUBE_NODE_IMAGE \
--iam-instance-profile ${IAM_PROFILE_MINION} \ --iam-instance-profile ${IAM_PROFILE_NODE} \
--instance-type $MINION_SIZE \ --instance-type $NODE_SIZE \
--key-name ${AWS_SSH_KEY_NAME} \ --key-name ${AWS_SSH_KEY_NAME} \
--security-groups ${MINION_SG_ID} \ --security-groups ${NODE_SG_ID} \
${public_ip_option} \ ${public_ip_option} \
--block-device-mappings "${MINION_BLOCK_DEVICE_MAPPINGS}" \ --block-device-mappings "${NODE_BLOCK_DEVICE_MAPPINGS}" \
--user-data "file://${KUBE_TEMP}/minion-user-data" --user-data "file://${KUBE_TEMP}/minion-user-data"
echo "Creating autoscaling group" echo "Creating autoscaling group"
${AWS_ASG_CMD} create-auto-scaling-group \ ${AWS_ASG_CMD} create-auto-scaling-group \
--auto-scaling-group-name ${ASG_NAME} \ --auto-scaling-group-name ${ASG_NAME} \
--launch-configuration-name ${ASG_NAME} \ --launch-configuration-name ${ASG_NAME} \
--min-size ${NUM_MINIONS} \ --min-size ${NUM_NODES} \
--max-size ${NUM_MINIONS} \ --max-size ${NUM_NODES} \
--vpc-zone-identifier ${SUBNET_ID} \ --vpc-zone-identifier ${SUBNET_ID} \
--tags ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Name,Value=${NODE_INSTANCE_PREFIX} \ --tags ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Name,Value=${NODE_INSTANCE_PREFIX} \
ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${MINION_TAG} \ ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${NODE_TAG} \
ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=KubernetesCluster,Value=${CLUSTER_ID} ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=KubernetesCluster,Value=${CLUSTER_ID}
# Wait for the minions to be running # Wait for the minions to be running
@ -1022,8 +1022,8 @@ function start-minions() {
attempt=0 attempt=0
while true; do while true; do
find-running-minions > $LOG find-running-minions > $LOG
if [[ ${#MINION_IDS[@]} == ${NUM_MINIONS} ]]; then if [[ ${#NODE_IDS[@]} == ${NUM_NODES} ]]; then
echo -e " ${color_green}${#MINION_IDS[@]} minions started; ready${color_norm}" echo -e " ${color_green}${#NODE_IDS[@]} minions started; ready${color_norm}"
break break
fi fi
@ -1037,7 +1037,7 @@ function start-minions() {
exit 1 exit 1
fi fi
echo -e " ${color_yellow}${#MINION_IDS[@]} minions started; waiting${color_norm}" echo -e " ${color_yellow}${#NODE_IDS[@]} minions started; waiting${color_norm}"
attempt=$(($attempt+1)) attempt=$(($attempt+1))
sleep 10 sleep 10
done done
@ -1113,11 +1113,11 @@ function check-cluster() {
# Basic sanity checking # Basic sanity checking
# TODO(justinsb): This is really not needed any more # TODO(justinsb): This is really not needed any more
local rc # Capture return code without exiting because of errexit bash option local rc # Capture return code without exiting because of errexit bash option
for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do for (( i=0; i<${#KUBE_NODE_IP_ADDRESSES[@]}; i++)); do
# Make sure docker is installed and working. # Make sure docker is installed and working.
local attempt=0 local attempt=0
while true; do while true; do
local minion_ip=${KUBE_MINION_IP_ADDRESSES[$i]} local minion_ip=${KUBE_NODE_IP_ADDRESSES[$i]}
echo -n "Attempt $(($attempt+1)) to check Docker on node @ ${minion_ip} ..." echo -n "Attempt $(($attempt+1)) to check Docker on node @ ${minion_ip} ..."
local output=`check-minion ${minion_ip}` local output=`check-minion ${minion_ip}`
echo $output echo $output
@ -1330,12 +1330,12 @@ function test-setup {
# Open up port 80 & 8080 so common containers on minions can be reached # Open up port 80 & 8080 so common containers on minions can be reached
# TODO(roberthbailey): Remove this once we are no longer relying on hostPorts. # TODO(roberthbailey): Remove this once we are no longer relying on hostPorts.
authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 80 --cidr 0.0.0.0/0" authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 80 --cidr 0.0.0.0/0"
authorize-security-group-ingress "${MINION_SG_ID}" "--protocol tcp --port 8080 --cidr 0.0.0.0/0" authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 8080 --cidr 0.0.0.0/0"
# Open up the NodePort range # Open up the NodePort range
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
authorize-security-group-ingress "${MINION_SG_ID}" "--protocol all --port 30000-32767 --cidr 0.0.0.0/0" authorize-security-group-ingress "${NODE_SG_ID}" "--protocol all --port 30000-32767 --cidr 0.0.0.0/0"
echo "test-setup complete" echo "test-setup complete"
} }

View File

@ -27,8 +27,8 @@ export NODES=${NODES:-"centos@172.10.0.12 centos@172.10.0.13"}
# Number of nodes in your cluster. # Number of nodes in your cluster.
export NUM_NODES=${NUM_NODES:-2} export NUM_NODES=${NUM_NODES:-2}
# Should be removed when NUM_MINIONS is deprecated in validate-cluster.sh # Should be removed when NUM_NODES is deprecated in validate-cluster.sh
export NUM_MINIONS=${NUM_NODES} export NUM_NODES=${NUM_NODES}
# By default, the cluster will use the etcd installed on master. # By default, the cluster will use the etcd installed on master.
export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:4001"} export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:4001"}

View File

@ -38,7 +38,7 @@ KUBE_API_ADDRESS="--address=${MASTER_ADDRESS}"
KUBE_API_PORT="--port=8080" KUBE_API_PORT="--port=8080"
# --kubelet-port=10250: Kubelet port # --kubelet-port=10250: Kubelet port
MINION_PORT="--kubelet-port=10250" NODE_PORT="--kubelet-port=10250"
# --allow-privileged=false: If true, allow privileged containers. # --allow-privileged=false: If true, allow privileged containers.
KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_ALLOW_PRIV="--allow-privileged=false"
@ -75,7 +75,7 @@ KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_ETCD_SERVERS} \\ \${KUBE_ETCD_SERVERS} \\
\${KUBE_API_ADDRESS} \\ \${KUBE_API_ADDRESS} \\
\${KUBE_API_PORT} \\ \${KUBE_API_PORT} \\
\${MINION_PORT} \\ \${NODE_PORT} \\
\${KUBE_ALLOW_PRIV} \\ \${KUBE_ALLOW_PRIV} \\
\${KUBE_SERVICE_ADDRESSES} \\ \${KUBE_SERVICE_ADDRESSES} \\
\${KUBE_ADMISSION_CONTROL} \\ \${KUBE_ADMISSION_CONTROL} \\

View File

@ -27,13 +27,13 @@ KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4" KUBE_LOG_LEVEL="--v=4"
# --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces) # --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)
MINION_ADDRESS="--address=${NODE_ADDRESS}" NODE_ADDRESS="--address=${NODE_ADDRESS}"
# --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag. # --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag.
MINION_PORT="--port=10250" NODE_PORT="--port=10250"
# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. # --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
MINION_HOSTNAME="--hostname-override=${NODE_ADDRESS}" NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
# --api-servers=[]: List of Kubernetes API servers for publishing events, # --api-servers=[]: List of Kubernetes API servers for publishing events,
# and reading pods and services. (ip:port), comma separated. # and reading pods and services. (ip:port), comma separated.
@ -48,9 +48,9 @@ EOF
KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\ \${KUBE_LOG_LEVEL} \\
\${MINION_ADDRESS} \\ \${NODE_ADDRESS} \\
\${MINION_PORT} \\ \${NODE_PORT} \\
\${MINION_HOSTNAME} \\ \${NODE_HOSTNAME} \\
\${KUBELET_API_SERVER} \\ \${KUBELET_API_SERVER} \\
\${KUBE_ALLOW_PRIV} \\ \${KUBE_ALLOW_PRIV} \\
\${KUBELET_ARGS}" \${KUBELET_ARGS}"

View File

@ -19,20 +19,20 @@
GCLOUD=gcloud GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b} ZONE=${KUBE_GCE_ZONE:-us-central1-b}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2}
MINION_SIZE=${MINION_SIZE:-n1-standard-2} NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_MINIONS=${NUM_MINIONS:-3} NUM_NODES=${NUM_NODES:-3}
MASTER_DISK_TYPE=pd-ssd MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
MINION_DISK_TYPE=${MINION_DISK_TYPE:-pd-standard} NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB} NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-"${MASTER_IMAGE}"} NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"}
MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-"${MASTER_IMAGE_PROJECT}"} NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5}
@ -40,10 +40,10 @@ NETWORK=${KUBE_GCE_NETWORK:-default}
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}" INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}"
MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" NODE_TAG="${INSTANCE_PREFIX}-minion"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL="${POLL_SLEEP_INTERVAL:-3}" POLL_SLEEP_INTERVAL="${POLL_SLEEP_INTERVAL:-3}"
@ -101,7 +101,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
ENABLE_CLUSTER_MONITORING=googleinfluxdb ENABLE_CLUSTER_MONITORING=googleinfluxdb
fi fi

View File

@ -19,21 +19,21 @@
GCLOUD=gcloud GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b} ZONE=${KUBE_GCE_ZONE:-us-central1-b}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} MASTER_SIZE=${MASTER_SIZE:-n1-standard-2}
MINION_SIZE=${MINION_SIZE:-n1-standard-2} NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_MINIONS=${NUM_MINIONS:-3} NUM_NODES=${NUM_NODES:-3}
MASTER_DISK_TYPE=pd-ssd MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
MINION_DISK_TYPE=${MINION_DISK_TYPE:-pd-standard} NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB} NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
KUBE_APISERVER_REQUEST_TIMEOUT=300 KUBE_APISERVER_REQUEST_TIMEOUT=300
PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-"${MASTER_IMAGE}"} NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-"${MASTER_IMAGE}"}
MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-"${MASTER_IMAGE_PROJECT}"} NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-"${MASTER_IMAGE_PROJECT}"}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5} RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5}
@ -41,10 +41,10 @@ NETWORK=${KUBE_GCE_NETWORK:-e2e}
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}" INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100} TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
@ -109,7 +109,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}" AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi fi

View File

@ -285,7 +285,7 @@ opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
enable_manifest_url: '$(echo "$ENABLE_MANIFEST_URL" | sed -e "s/'/''/g")' enable_manifest_url: '$(echo "$ENABLE_MANIFEST_URL" | sed -e "s/'/''/g")'
manifest_url: '$(echo "$MANIFEST_URL" | sed -e "s/'/''/g")' manifest_url: '$(echo "$MANIFEST_URL" | sed -e "s/'/''/g")'
manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")' manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")'
num_nodes: $(echo "${NUM_MINIONS}") num_nodes: $(echo "${NUM_NODES}")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
EOF EOF

View File

@ -121,15 +121,15 @@ function prepare-upgrade() {
} }
# Reads kube-env metadata from first node in MINION_NAMES. # Reads kube-env metadata from first node in NODE_NAMES.
# #
# Assumed vars: # Assumed vars:
# MINION_NAMES # NODE_NAMES
# PROJECT # PROJECT
# ZONE # ZONE
function get-node-env() { function get-node-env() {
# TODO(zmerlynn): Make this more reliable with retries. # TODO(zmerlynn): Make this more reliable with retries.
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${MINION_NAMES[0]} --command \ gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${NODE_NAMES[0]} --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \ "curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null 'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
} }
@ -145,7 +145,7 @@ function get-env-val() {
# Assumed vars: # Assumed vars:
# KUBE_VERSION # KUBE_VERSION
# MINION_SCOPES # NODE_SCOPES
# NODE_INSTANCE_PREFIX # NODE_INSTANCE_PREFIX
# PROJECT # PROJECT
# ZONE # ZONE
@ -167,7 +167,7 @@ function upgrade-nodes() {
# #
# Assumed vars: # Assumed vars:
# KUBE_VERSION # KUBE_VERSION
# MINION_SCOPES # NODE_SCOPES
# NODE_INSTANCE_PREFIX # NODE_INSTANCE_PREFIX
# PROJECT # PROJECT
# ZONE # ZONE
@ -188,8 +188,8 @@ function prepare-node-upgrade() {
# TODO(zmerlynn): Refactor setting scope flags. # TODO(zmerlynn): Refactor setting scope flags.
local scope_flags= local scope_flags=
if [ -n "${MINION_SCOPES}" ]; then if [ -n "${NODE_SCOPES}" ]; then
scope_flags="--scopes ${MINION_SCOPES}" scope_flags="--scopes ${NODE_SCOPES}"
else else
scope_flags="--no-scopes" scope_flags="--no-scopes"
fi fi

View File

@ -214,13 +214,13 @@ function upload-server-tars() {
# Assumed vars: # Assumed vars:
# NODE_INSTANCE_PREFIX # NODE_INSTANCE_PREFIX
# Vars set: # Vars set:
# MINION_NAMES # NODE_NAMES
function detect-minion-names { function detect-minion-names {
detect-project detect-project
MINION_NAMES=($(gcloud compute instance-groups managed list-instances \ NODE_NAMES=($(gcloud compute instance-groups managed list-instances \
"${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \ "${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \
--format=yaml | grep instance: | cut -d ' ' -f 2)) --format=yaml | grep instance: | cut -d ' ' -f 2))
echo "MINION_NAMES=${MINION_NAMES[*]}" >&2 echo "NODE_NAMES=${NODE_NAMES[*]}" >&2
} }
# Detect the information about the minions # Detect the information about the minions
@ -228,24 +228,24 @@ function detect-minion-names {
# Assumed vars: # Assumed vars:
# ZONE # ZONE
# Vars set: # Vars set:
# MINION_NAMES # NODE_NAMES
# KUBE_MINION_IP_ADDRESSES (array) # KUBE_NODE_IP_ADDRESSES (array)
function detect-minions () { function detect-minions () {
detect-project detect-project
detect-minion-names detect-minion-names
KUBE_MINION_IP_ADDRESSES=() KUBE_NODE_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \ local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
"${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \ "${NODE_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \
--format=text | awk '{ print $2 }') --format=text | awk '{ print $2 }')
if [[ -z "${minion_ip-}" ]] ; then if [[ -z "${minion_ip-}" ]] ; then
echo "Did not find ${MINION_NAMES[$i]}" >&2 echo "Did not find ${NODE_NAMES[$i]}" >&2
else else
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" echo "Found ${NODE_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") KUBE_NODE_IP_ADDRESSES+=("${minion_ip}")
fi fi
done done
if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1 exit 1
fi fi
@ -370,19 +370,19 @@ function create-node-template {
local attempt=1 local attempt=1
local preemptible_minions="" local preemptible_minions=""
if [[ "${PREEMPTIBLE_MINION}" == "true" ]]; then if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
preemptible_minions="--preemptible --maintenance-policy TERMINATE" preemptible_minions="--preemptible --maintenance-policy TERMINATE"
fi fi
while true; do while true; do
echo "Attempt ${attempt} to create ${1}" >&2 echo "Attempt ${attempt} to create ${1}" >&2
if ! gcloud compute instance-templates create "$template_name" \ if ! gcloud compute instance-templates create "$template_name" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--machine-type "${MINION_SIZE}" \ --machine-type "${NODE_SIZE}" \
--boot-disk-type "${MINION_DISK_TYPE}" \ --boot-disk-type "${NODE_DISK_TYPE}" \
--boot-disk-size "${MINION_DISK_SIZE}" \ --boot-disk-size "${NODE_DISK_SIZE}" \
--image-project="${MINION_IMAGE_PROJECT}" \ --image-project="${NODE_IMAGE_PROJECT}" \
--image "${MINION_IMAGE}" \ --image "${NODE_IMAGE}" \
--tags "${MINION_TAG}" \ --tags "${NODE_TAG}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
${preemptible_minions} \ ${preemptible_minions} \
$2 \ $2 \
@ -649,7 +649,7 @@ function kube-up {
create-master-instance "${MASTER_RESERVED_IP}" & create-master-instance "${MASTER_RESERVED_IP}" &
# Create a single firewall rule for all minions. # Create a single firewall rule for all minions.
create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" & create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" &
# Report logging choice (if any). # Report logging choice (if any).
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then
@ -663,8 +663,8 @@ function kube-up {
# TODO(zmerlynn): Refactor setting scope flags. # TODO(zmerlynn): Refactor setting scope flags.
local scope_flags= local scope_flags=
if [ -n "${MINION_SCOPES}" ]; then if [ -n "${NODE_SCOPES}" ]; then
scope_flags="--scopes ${MINION_SCOPES}" scope_flags="--scopes ${NODE_SCOPES}"
else else
scope_flags="--no-scopes" scope_flags="--no-scopes"
fi fi
@ -680,7 +680,7 @@ function kube-up {
--project "${PROJECT}" \ --project "${PROJECT}" \
--zone "${ZONE}" \ --zone "${ZONE}" \
--base-instance-name "${NODE_INSTANCE_PREFIX}" \ --base-instance-name "${NODE_INSTANCE_PREFIX}" \
--size "${NUM_MINIONS}" \ --size "${NUM_NODES}" \
--template "$template_name" || true; --template "$template_name" || true;
gcloud compute instance-groups managed wait-until-stable \ gcloud compute instance-groups managed wait-until-stable \
"${NODE_INSTANCE_PREFIX}-group" \ "${NODE_INSTANCE_PREFIX}-group" \
@ -877,11 +877,11 @@ function kube-down {
fi fi
# Delete firewall rule for minions. # Delete firewall rule for minions.
if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then if gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-all" &>/dev/null; then
gcloud compute firewall-rules delete \ gcloud compute firewall-rules delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--quiet \ --quiet \
"${MINION_TAG}-all" "${NODE_TAG}-all"
fi fi
# Delete routes. # Delete routes.
@ -989,7 +989,7 @@ function check-resources {
return 1 return 1
fi fi
if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then if gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-all" &>/dev/null; then
KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all" KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all"
return 1 return 1
fi fi
@ -1040,8 +1040,8 @@ function prepare-push() {
# TODO(zmerlynn): Refactor setting scope flags. # TODO(zmerlynn): Refactor setting scope flags.
local scope_flags= local scope_flags=
if [ -n "${MINION_SCOPES}" ]; then if [ -n "${NODE_SCOPES}" ]; then
scope_flags="--scopes ${MINION_SCOPES}" scope_flags="--scopes ${NODE_SCOPES}"
else else
scope_flags="--no-scopes" scope_flags="--no-scopes"
fi fi
@ -1105,8 +1105,8 @@ function kube-push {
push-master push-master
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
push-node "${MINION_NAMES[$i]}" & push-node "${NODE_NAMES[$i]}" &
done done
wait-for-jobs wait-for-jobs
@ -1153,34 +1153,34 @@ function test-setup {
local start=`date +%s` local start=`date +%s`
gcloud compute firewall-rules create \ gcloud compute firewall-rules create \
--project "${PROJECT}" \ --project "${PROJECT}" \
--target-tags "${MINION_TAG}" \ --target-tags "${NODE_TAG}" \
--allow tcp:80,tcp:8080 \ --allow tcp:80,tcp:8080 \
--network "${NETWORK}" \ --network "${NETWORK}" \
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true
# As there is no simple way to wait longer for this operation we need to manually # As there is no simple way to wait longer for this operation we need to manually
# wait some additional time (20 minutes altogether). # wait some additional time (20 minutes altogether).
until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] until gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ]
do sleep 5 do sleep 5
done done
# Check if the firewall rule exists and fail if it does not. # Check if the firewall rule exists and fail if it does not.
gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt"
# Open up the NodePort range # Open up the NodePort range
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
start=`date +%s` start=`date +%s`
gcloud compute firewall-rules create \ gcloud compute firewall-rules create \
--project "${PROJECT}" \ --project "${PROJECT}" \
--target-tags "${MINION_TAG}" \ --target-tags "${NODE_TAG}" \
--allow tcp:30000-32767,udp:30000-32767 \ --allow tcp:30000-32767,udp:30000-32767 \
--network "${NETWORK}" \ --network "${NETWORK}" \
"${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true
# As there is no simple way to wait longer for this operation we need to manually # As there is no simple way to wait longer for this operation we need to manually
# wait some additional time (20 minutes altogether). # wait some additional time (20 minutes altogether).
until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ] until gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ]
do sleep 5 do sleep 5
done done
# Check if the firewall rule exists and fail if it does not. # Check if the firewall rule exists and fail if it does not.
gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" gcloud compute firewall-rules describe --project "${PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports"
} }
# Execute after running tests to perform any required clean-up. This is called # Execute after running tests to perform any required clean-up. This is called
@ -1191,11 +1191,11 @@ function test-teardown {
gcloud compute firewall-rules delete \ gcloud compute firewall-rules delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--quiet \ --quiet \
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" || true
gcloud compute firewall-rules delete \ gcloud compute firewall-rules delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--quiet \ --quiet \
"${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" || true "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" || true
"${KUBE_ROOT}/cluster/kube-down.sh" "${KUBE_ROOT}/cluster/kube-down.sh"
} }
@ -1333,7 +1333,7 @@ KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false}) ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false})
MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-}) MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-})
MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-}) MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-})
NUM_MINIONS: $(yaml-quote ${NUM_MINIONS}) NUM_NODES: $(yaml-quote ${NUM_NODES})
EOF EOF
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF cat >>$file <<EOF

View File

@ -19,7 +19,7 @@
# - CLUSTER_NAME (the name of the cluster) # - CLUSTER_NAME (the name of the cluster)
ZONE="${ZONE:-us-central1-f}" ZONE="${ZONE:-us-central1-f}"
NUM_MINIONS="${NUM_MINIONS:-3}" NUM_NODES="${NUM_NODES:-3}"
CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}" CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}"
NETWORK="${NETWORK:-default}" NETWORK="${NETWORK:-default}"
NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}" NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}"
@ -27,7 +27,7 @@ FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}"
GCLOUD="${GCLOUD:-gcloud}" GCLOUD="${GCLOUD:-gcloud}"
CMD_GROUP="${CMD_GROUP:-}" CMD_GROUP="${CMD_GROUP:-}"
GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}" GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}"
MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}" NODE_SCOPES="${NODE_SCOPES:-"compute-rw,storage-ro"}"
MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-2}" MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-2}"
# WARNING: any new vars added here must correspond to options that can be # WARNING: any new vars added here must correspond to options that can be

View File

@ -17,7 +17,7 @@
# The following are test-specific settings. # The following are test-specific settings.
CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}" CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}"
NETWORK=${KUBE_GKE_NETWORK:-e2e} NETWORK=${KUBE_GKE_NETWORK:-e2e}
MINION_TAG="k8s-${CLUSTER_NAME}-node" NODE_TAG="k8s-${CLUSTER_NAME}-node"
# For ease of maintenance, extract any pieces that do not vary between default # For ease of maintenance, extract any pieces that do not vary between default

View File

@ -112,8 +112,8 @@ function verify-prereqs() {
# CLUSTER_NAME # CLUSTER_NAME
# ZONE # ZONE
# CLUSTER_API_VERSION (optional) # CLUSTER_API_VERSION (optional)
# NUM_MINIONS # NUM_NODES
# MINION_SCOPES # NODE_SCOPES
# MACHINE_TYPE # MACHINE_TYPE
function kube-up() { function kube-up() {
echo "... in gke:kube-up()" >&2 echo "... in gke:kube-up()" >&2
@ -143,9 +143,9 @@ function kube-up() {
local create_args=( local create_args=(
"--zone=${ZONE}" "--zone=${ZONE}"
"--project=${PROJECT}" "--project=${PROJECT}"
"--num-nodes=${NUM_MINIONS}" "--num-nodes=${NUM_NODES}"
"--network=${NETWORK}" "--network=${NETWORK}"
"--scopes=${MINION_SCOPES}" "--scopes=${NODE_SCOPES}"
"--cluster-version=${CLUSTER_API_VERSION}" "--cluster-version=${CLUSTER_API_VERSION}"
"--machine-type=${MACHINE_TYPE}" "--machine-type=${MACHINE_TYPE}"
) )
@ -163,7 +163,7 @@ function kube-up() {
# GCLOUD # GCLOUD
# ZONE # ZONE
# Vars set: # Vars set:
# MINION_TAG # NODE_TAG
function test-setup() { function test-setup() {
echo "... in gke:test-setup()" >&2 echo "... in gke:test-setup()" >&2
# Detect the project into $PROJECT if it isn't set # Detect the project into $PROJECT if it isn't set
@ -171,22 +171,22 @@ function test-setup() {
detect-minions >&2 detect-minions >&2
# At this point, CLUSTER_NAME should have been used, so its value is final. # At this point, CLUSTER_NAME should have been used, so its value is final.
MINION_TAG=$($GCLOUD compute instances describe ${MINION_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1) NODE_TAG=$($GCLOUD compute instances describe ${NODE_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node" | head -1)
OLD_MINION_TAG="k8s-${CLUSTER_NAME}-node" OLD_NODE_TAG="k8s-${CLUSTER_NAME}-node"
# Open up port 80 & 8080 so common containers on minions can be reached. # Open up port 80 & 8080 so common containers on minions can be reached.
"${GCLOUD}" compute firewall-rules create \ "${GCLOUD}" compute firewall-rules create \
"${CLUSTER_NAME}-http-alt" \ "${CLUSTER_NAME}-http-alt" \
--allow tcp:80,tcp:8080 \ --allow tcp:80,tcp:8080 \
--project "${PROJECT}" \ --project "${PROJECT}" \
--target-tags "${MINION_TAG},${OLD_MINION_TAG}" \ --target-tags "${NODE_TAG},${OLD_NODE_TAG}" \
--network="${NETWORK}" --network="${NETWORK}"
"${GCLOUD}" compute firewall-rules create \ "${GCLOUD}" compute firewall-rules create \
"${CLUSTER_NAME}-nodeports" \ "${CLUSTER_NAME}-nodeports" \
--allow tcp:30000-32767,udp:30000-32767 \ --allow tcp:30000-32767,udp:30000-32767 \
--project "${PROJECT}" \ --project "${PROJECT}" \
--target-tags "${MINION_TAG},${OLD_MINION_TAG}" \ --target-tags "${NODE_TAG},${OLD_NODE_TAG}" \
--network="${NETWORK}" --network="${NETWORK}"
} }
@ -209,7 +209,7 @@ function detect-master() {
# Assumed vars: # Assumed vars:
# none # none
# Vars set: # Vars set:
# MINION_NAMES # NODE_NAMES
function detect-minions() { function detect-minions() {
echo "... in gke:detect-minions()" >&2 echo "... in gke:detect-minions()" >&2
detect-minion-names detect-minion-names
@ -220,16 +220,16 @@ function detect-minions() {
# Assumed vars: # Assumed vars:
# none # none
# Vars set: # Vars set:
# MINION_NAMES # NODE_NAMES
function detect-minion-names { function detect-minion-names {
echo "... in gke:detect-minion-names()" >&2 echo "... in gke:detect-minion-names()" >&2
detect-project detect-project
detect-node-instance-group detect-node-instance-group
MINION_NAMES=($(gcloud compute instance-groups managed list-instances \ NODE_NAMES=($(gcloud compute instance-groups managed list-instances \
"${NODE_INSTANCE_GROUP}" --zone "${ZONE}" --project "${PROJECT}" \ "${NODE_INSTANCE_GROUP}" --zone "${ZONE}" --project "${PROJECT}" \
--format=yaml | grep instance: | cut -d ' ' -f 2)) --format=yaml | grep instance: | cut -d ' ' -f 2))
echo "MINION_NAMES=${MINION_NAMES[*]}" echo "NODE_NAMES=${NODE_NAMES[*]}"
} }
# Detect instance group name generated by gke # Detect instance group name generated by gke

View File

@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
NUM_MINIONS=${NUM_MINIONS:-2} NUM_NODES=${NUM_NODES:-2}

View File

@ -69,9 +69,9 @@ function detect-minions() {
# ] # ]
# Strip out the IP addresses # Strip out the IP addresses
export KUBE_MINION_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}")) export KUBE_NODE_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}"))
# echo "Kubernetes minions: " ${KUBE_MINION_IP_ADDRESSES[@]} 1>&2 # echo "Kubernetes minions: " ${KUBE_NODE_IP_ADDRESSES[@]} 1>&2
export NUM_MINIONS=${#KUBE_MINION_IP_ADDRESSES[@]} export NUM_NODES=${#KUBE_NODE_IP_ADDRESSES[@]}
} }
function get-password() { function get-password() {

View File

@ -26,12 +26,12 @@ function detect-master {
# Get minion names if they are not static. # Get minion names if they are not static.
function detect-minion-names { function detect-minion-names {
echo "MINION_NAMES: [${MINION_NAMES[*]}]" 1>&2 echo "NODE_NAMES: [${NODE_NAMES[*]}]" 1>&2
} }
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] # Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
function detect-minions { function detect-minions {
echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2 echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2
} }
# Verify prereqs on host machine # Verify prereqs on host machine

View File

@ -22,11 +22,11 @@
GCLOUD=gcloud GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b} ZONE=${KUBE_GCE_ZONE:-us-central1-b}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-4} MASTER_SIZE=${MASTER_SIZE:-n1-standard-4}
NUM_MINIONS=${NUM_MINIONS:-100} NUM_NODES=${NUM_NODES:-100}
MASTER_DISK_TYPE=pd-ssd MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
PREEMPTIBLE_MINION=${PREEMPTIBLE_MINION:-false} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian} OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20151103}

View File

@ -17,8 +17,8 @@
## Contains configuration values for interacting with the libvirt CoreOS cluster ## Contains configuration values for interacting with the libvirt CoreOS cluster
# Number of minions in the cluster # Number of minions in the cluster
NUM_MINIONS=${NUM_MINIONS:-3} NUM_NODES=${NUM_NODES:-3}
export NUM_MINIONS export NUM_NODES
# The IP of the master # The IP of the master
export MASTER_IP="192.168.10.1" export MASTER_IP="192.168.10.1"
@ -33,18 +33,18 @@ MASTER_CONTAINER_NETMASK="255.255.255.0"
MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1"
MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24"
CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16"
if [[ "$NUM_MINIONS" -gt 253 ]]; then if [[ "$NUM_NODES" -gt 253 ]]; then
echo "ERROR: Because of how IPs are allocated in ${BASH_SOURCE}, you cannot create more than 253 nodes" echo "ERROR: Because of how IPs are allocated in ${BASH_SOURCE}, you cannot create more than 253 nodes"
exit 1 exit 1
fi fi
for ((i=0; i < NUM_MINIONS; i++)) do for ((i=0; i < NUM_NODES; i++)) do
NODE_IPS[$i]="${NODE_IP_BASE}$((i+2))" NODE_IPS[$i]="${NODE_IP_BASE}$((i+2))"
NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))" NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))"
NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1"
NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" NODE_CONTAINER_NETMASKS[$i]="255.255.255.0"
done done
NODE_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET
SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET

View File

@ -167,8 +167,8 @@ function wait-cluster-readiness {
local timeout=120 local timeout=120
while [[ $timeout -ne 0 ]]; do while [[ $timeout -ne 0 ]]; do
nb_ready_nodes=$("${kubectl}" get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true) nb_ready_nodes=$("${kubectl}" get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true)
echo "Nb ready nodes: $nb_ready_nodes / $NUM_MINIONS" echo "Nb ready nodes: $nb_ready_nodes / $NUM_NODES"
if [[ "$nb_ready_nodes" -eq "$NUM_MINIONS" ]]; then if [[ "$nb_ready_nodes" -eq "$NUM_NODES" ]]; then
return 0 return 0
fi fi
@ -191,8 +191,8 @@ function kube-up {
readonly kubernetes_dir="$POOL_PATH/kubernetes" readonly kubernetes_dir="$POOL_PATH/kubernetes"
local i local i
for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do for (( i = 0 ; i <= $NUM_NODES ; i++ )); do
if [[ $i -eq $NUM_MINIONS ]]; then if [[ $i -eq $NUM_NODES ]]; then
etcd2_initial_cluster[$i]="${MASTER_NAME}=http://${MASTER_IP}:2380" etcd2_initial_cluster[$i]="${MASTER_NAME}=http://${MASTER_IP}:2380"
else else
etcd2_initial_cluster[$i]="${NODE_NAMES[$i]}=http://${NODE_IPS[$i]}:2380" etcd2_initial_cluster[$i]="${NODE_NAMES[$i]}=http://${NODE_IPS[$i]}:2380"
@ -201,8 +201,8 @@ function kube-up {
etcd2_initial_cluster=$(join , "${etcd2_initial_cluster[@]}") etcd2_initial_cluster=$(join , "${etcd2_initial_cluster[@]}")
readonly machines=$(join , "${KUBE_NODE_IP_ADDRESSES[@]}") readonly machines=$(join , "${KUBE_NODE_IP_ADDRESSES[@]}")
for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do for (( i = 0 ; i <= $NUM_NODES ; i++ )); do
if [[ $i -eq $NUM_MINIONS ]]; then if [[ $i -eq $NUM_NODES ]]; then
type=master type=master
name=$MASTER_NAME name=$MASTER_NAME
public_ip=$MASTER_IP public_ip=$MASTER_IP
@ -262,7 +262,7 @@ function upload-server-tars {
function kube-push { function kube-push {
kube-push-internal kube-push-internal
ssh-to-node "$MASTER_NAME" "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler" ssh-to-node "$MASTER_NAME" "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler"
for ((i=0; i < NUM_MINIONS; i++)); do for ((i=0; i < NUM_NODES; i++)); do
ssh-to-node "${NODE_NAMES[$i]}" "sudo systemctl restart kubelet kube-proxy" ssh-to-node "${NODE_NAMES[$i]}" "sudo systemctl restart kubelet kube-proxy"
done done
wait-cluster-readiness wait-cluster-readiness
@ -317,7 +317,7 @@ function ssh-to-node {
elif [[ "$node" == "$MASTER_NAME" ]]; then elif [[ "$node" == "$MASTER_NAME" ]]; then
machine="$MASTER_IP" machine="$MASTER_IP"
else else
for ((i=0; i < NUM_MINIONS; i++)); do for ((i=0; i < NUM_NODES; i++)); do
if [[ "$node" == "${NODE_NAMES[$i]}" ]]; then if [[ "$node" == "${NODE_NAMES[$i]}" ]]; then
machine="${NODE_IPS[$i]}" machine="${NODE_IPS[$i]}"
break break

View File

@ -16,10 +16,10 @@
## Contains configuration values for interacting with the mesos/docker cluster ## Contains configuration values for interacting with the mesos/docker cluster
NUM_MINIONS=${NUM_MINIONS:-2} NUM_NODES=${NUM_NODES:-2}
INSTANCE_PREFIX="${INSTANCE_PREFIX:-kubernetes}" INSTANCE_PREFIX="${INSTANCE_PREFIX:-kubernetes}"
MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_NAME="${INSTANCE_PREFIX}-master"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
SERVICE_CLUSTER_IP_RANGE=10.10.10.0/24 SERVICE_CLUSTER_IP_RANGE=10.10.10.0/24

View File

@ -15,8 +15,8 @@
# limitations under the License. # limitations under the License.
## Contains configuration values for interacting with the docker-compose cluster in test mode ## Contains configuration values for interacting with the docker-compose cluster in test mode
#Set NUM_MINIONS to minimum required for testing. #Set NUM_NODES to minimum required for testing.
NUM_MINIONS=2 NUM_NODES=2
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../.. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/config-default.sh" source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/config-default.sh"

View File

@ -204,7 +204,7 @@ function detect-master {
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" 1>&2 echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" 1>&2
} }
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] # Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
# These Mesos slaves MAY host Kublets, # These Mesos slaves MAY host Kublets,
# but might not have a Kublet running unless a kubernetes task has been scheduled on them. # but might not have a Kublet running unless a kubernetes task has been scheduled on them.
function detect-minions { function detect-minions {
@ -215,9 +215,9 @@ function detect-minions {
fi fi
while read -r docker_id; do while read -r docker_id; do
local minion_ip=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" "${docker_id}") local minion_ip=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" "${docker_id}")
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") KUBE_NODE_IP_ADDRESSES+=("${minion_ip}")
done <<< "$docker_ids" done <<< "$docker_ids"
echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2 echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2
} }
# Verify prereqs on host machine # Verify prereqs on host machine
@ -283,8 +283,8 @@ function kube-up {
echo "Starting ${KUBERNETES_PROVIDER} cluster" 1>&2 echo "Starting ${KUBERNETES_PROVIDER} cluster" 1>&2
cluster::mesos::docker::docker_compose up -d cluster::mesos::docker::docker_compose up -d
echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_MINIONS} slaves" echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_NODES} slaves"
cluster::mesos::docker::docker_compose scale mesosslave=${NUM_MINIONS} cluster::mesos::docker::docker_compose scale mesosslave=${NUM_NODES}
# await-health-check requires GNU timeout # await-health-check requires GNU timeout
# apiserver hostname resolved by docker # apiserver hostname resolved by docker

View File

@ -8,7 +8,7 @@ These options apply across providers. There are additional documents for option
This is a work-in-progress; not all options are documented yet! This is a work-in-progress; not all options are documented yet!
**NUM_MINIONS** **NUM_NODES**
The number of minion instances to create. Most providers default this to 4. The number of minion instances to create. Most providers default this to 4.

View File

@ -16,7 +16,7 @@
# Sane defaults for dev environments. The following variables can be easily overriden # Sane defaults for dev environments. The following variables can be easily overriden
# by setting each as a ENV variable ahead of time: # by setting each as a ENV variable ahead of time:
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME # KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_NODES, NOVA_NETWORK and SSH_KEY_NAME
# Shared # Shared
KUBE_IMAGE="${KUBE_IMAGE-f2a71670-ced3-4274-80b6-0efcd0f8f91b}" # CoreOS(Beta) KUBE_IMAGE="${KUBE_IMAGE-f2a71670-ced3-4274-80b6-0efcd0f8f91b}" # CoreOS(Beta)
@ -32,9 +32,9 @@ MASTER_TAG="tags=${INSTANCE_PREFIX}-master"
# Node # Node
KUBE_NODE_FLAVOR="${KUBE_NODE_FLAVOR-general1-2}" KUBE_NODE_FLAVOR="${KUBE_NODE_FLAVOR-general1-2}"
NUM_MINIONS="${NUM_MINIONS-4}" NUM_NODES="${NUM_NODES-4}"
NODE_TAG="tags=${INSTANCE_PREFIX}-node" NODE_TAG="tags=${INSTANCE_PREFIX}-node"
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_MINIONS}})) NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_NODES}}))
KUBE_NETWORK="10.240.0.0/16" KUBE_NETWORK="10.240.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET

View File

@ -27,7 +27,7 @@ role=${role:-"ai i i"}
export roles=($role) export roles=($role)
# Define minion numbers # Define minion numbers
export NUM_MINIONS=${NUM_MINIONS:-3} export NUM_NODES=${NUM_NODES:-3}
# define the IP range used for service cluster IPs. # define the IP range used for service cluster IPs.
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. # according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET

View File

@ -17,8 +17,8 @@
## Contains configuration values for interacting with the Vagrant cluster ## Contains configuration values for interacting with the Vagrant cluster
# Number of minions in the cluster # Number of minions in the cluster
NUM_MINIONS=${NUM_MINIONS-"1"} NUM_NODES=${NUM_NODES-"1"}
export NUM_MINIONS export NUM_NODES
# The IP of the master # The IP of the master
export MASTER_IP=${MASTER_IP-"10.245.1.2"} export MASTER_IP=${MASTER_IP-"10.245.1.2"}
@ -31,19 +31,19 @@ export MASTER_NAME="${INSTANCE_PREFIX}-master"
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
# Map out the IPs, names and container subnets of each minion # Map out the IPs, names and container subnets of each minion
export MINION_IP_BASE=${MINION_IP_BASE-"10.245.1."} export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."}
MINION_CONTAINER_SUBNET_BASE="10.246" NODE_CONTAINER_SUBNET_BASE="10.246"
MASTER_CONTAINER_NETMASK="255.255.255.0" MASTER_CONTAINER_NETMASK="255.255.255.0"
MASTER_CONTAINER_ADDR="${MINION_CONTAINER_SUBNET_BASE}.0.1" MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1"
MASTER_CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.1/24" MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24"
CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.0/16" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16"
for ((i=0; i < NUM_MINIONS; i++)) do for ((i=0; i < NUM_NODES; i++)) do
MINION_IPS[$i]="${MINION_IP_BASE}$((i+3))" NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))"
MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" NODE_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))"
MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1" NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1"
MINION_CONTAINER_NETMASKS[$i]="255.255.255.0" NODE_CONTAINER_NETMASKS[$i]="255.255.255.0"
VAGRANT_MINION_NAMES[$i]="minion-$((i+1))" VAGRANT_NODE_NAMES[$i]="minion-$((i+1))"
done done
SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET

View File

@ -15,8 +15,8 @@
# limitations under the License. # limitations under the License.
## Contains configuration values for interacting with the Vagrant cluster in test mode ## Contains configuration values for interacting with the Vagrant cluster in test mode
#Set NUM_MINIONS to minimum required for testing. #Set NUM_NODES to minimum required for testing.
NUM_MINIONS=2 NUM_NODES=2
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/config-default.sh" source "${KUBE_ROOT}/cluster/vagrant/config-default.sh"

View File

@ -59,7 +59,7 @@ cd "${KUBE_ROOT}"
echo All verbose output will be redirected to $logfile, use --logfile option to change. echo All verbose output will be redirected to $logfile, use --logfile option to change.
printf "Start the cluster with 2 minions .. " printf "Start the cluster with 2 minions .. "
export NUM_MINIONS=2 export NUM_NODES=2
export KUBERNETES_PROVIDER=vagrant export KUBERNETES_PROVIDER=vagrant
(cluster/kube-up.sh >>"$logfile" 2>&1) || true (cluster/kube-up.sh >>"$logfile" 2>&1) || true

View File

@ -68,9 +68,9 @@ fi
# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver # Setup hosts file to support ping by hostname to each minion in the cluster from apiserver
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
minion=${MINION_NAMES[$i]} minion=${NODE_NAMES[$i]}
ip=${MINION_IPS[$i]} ip=${NODE_IPS[$i]}
if [ ! "$(cat /etc/hosts | grep $minion)" ]; then if [ ! "$(cat /etc/hosts | grep $minion)" ]; then
echo "Adding $minion to hosts file" echo "Adding $minion to hosts file"
echo "$ip $minion" >> /etc/hosts echo "$ip $minion" >> /etc/hosts

View File

@ -70,7 +70,7 @@ EOF
# Set the host name explicitly # Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430 # See: https://github.com/mitchellh/vagrant/issues/2430
hostnamectl set-hostname ${MINION_NAME} hostnamectl set-hostname ${NODE_NAME}
if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=21 ]]; then if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=21 ]]; then
# Workaround to vagrant inability to guess interface naming sequence # Workaround to vagrant inability to guess interface naming sequence
@ -94,12 +94,12 @@ if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
echo "Adding $MASTER_NAME to hosts file" echo "Adding $MASTER_NAME to hosts file"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
fi fi
echo "$MINION_IP $MINION_NAME" >> /etc/hosts echo "$NODE_IP $NODE_NAME" >> /etc/hosts
# Setup hosts file to support ping by hostname to each minion in the cluster # Setup hosts file to support ping by hostname to each minion in the cluster
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
minion=${MINION_NAMES[$i]} minion=${NODE_NAMES[$i]}
ip=${MINION_IPS[$i]} ip=${NODE_IPS[$i]}
if [ ! "$(cat /etc/hosts | grep $minion)" ]; then if [ ! "$(cat /etc/hosts | grep $minion)" ]; then
echo "Adding $minion to hosts file" echo "Adding $minion to hosts file"
echo "$ip $minion" >> /etc/hosts echo "$ip $minion" >> /etc/hosts
@ -145,13 +145,13 @@ cat <<EOF >/etc/salt/minion.d/grains.conf
grains: grains:
cloud: vagrant cloud: vagrant
network_mode: openvswitch network_mode: openvswitch
node_ip: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' node_ip: '$(echo "$NODE_IP" | sed -e "s/'/''/g")'
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
roles: roles:
- kubernetes-pool - kubernetes-pool
cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")' cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")'
hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' hostname_override: '$(echo "$NODE_IP" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF EOF

View File

@ -25,10 +25,10 @@ function detect-master () {
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
} }
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] # Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
function detect-minions { function detect-minions {
echo "Minions already detected" 1>&2 echo "Minions already detected" 1>&2
KUBE_MINION_IP_ADDRESSES=("${MINION_IPS[@]}") KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
} }
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so # Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
@ -124,15 +124,15 @@ function create-provision-scripts {
echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'" echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
echo "MASTER_IP='${MASTER_IP}'" echo "MASTER_IP='${MASTER_IP}'"
echo "MINION_NAMES=(${MINION_NAMES[@]})" echo "NODE_NAMES=(${NODE_NAMES[@]})"
echo "MINION_IPS=(${MINION_IPS[@]})" echo "NODE_IPS=(${NODE_IPS[@]})"
echo "NODE_IP='${MASTER_IP}'" echo "NODE_IP='${MASTER_IP}'"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'" echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "MASTER_USER='${MASTER_USER}'" echo "MASTER_USER='${MASTER_USER}'"
echo "MASTER_PASSWD='${MASTER_PASSWD}'" echo "MASTER_PASSWD='${MASTER_PASSWD}'"
@ -163,21 +163,21 @@ function create-provision-scripts {
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
) > "${KUBE_TEMP}/master-start.sh" ) > "${KUBE_TEMP}/master-start.sh"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
( (
echo "#! /bin/bash" echo "#! /bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'" echo "MASTER_NAME='${MASTER_NAME}'"
echo "MASTER_IP='${MASTER_IP}'" echo "MASTER_IP='${MASTER_IP}'"
echo "MINION_NAMES=(${MINION_NAMES[@]})" echo "NODE_NAMES=(${NODE_NAMES[@]})"
echo "MINION_NAME=(${MINION_NAMES[$i]})" echo "NODE_NAME=(${NODE_NAMES[$i]})"
echo "MINION_IPS=(${MINION_IPS[@]})" echo "NODE_IPS=(${NODE_IPS[@]})"
echo "MINION_IP='${MINION_IPS[$i]}'" echo "NODE_IP='${NODE_IPS[$i]}'"
echo "MINION_ID='$i'" echo "NODE_ID='$i'"
echo "NODE_IP='${MINION_IPS[$i]}'" echo "NODE_IP='${NODE_IPS[$i]}'"
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
echo "CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'" echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'"
echo "CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'" echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'"
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
@ -222,9 +222,9 @@ function verify-cluster {
# verify each minion has all required daemons # verify each minion has all required daemons
local i local i
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
echo "Validating ${VAGRANT_MINION_NAMES[$i]}" echo "Validating ${VAGRANT_NODE_NAMES[$i]}"
local machine=${VAGRANT_MINION_NAMES[$i]} local machine=${VAGRANT_NODE_NAMES[$i]}
local -a required_daemon=("salt-minion" "kubelet" "docker") local -a required_daemon=("salt-minion" "kubelet" "docker")
local validated="1" local validated="1"
until [[ "$validated" == "0" ]]; do until [[ "$validated" == "0" ]]; do
@ -242,13 +242,13 @@ function verify-cluster {
echo echo
echo "Waiting for each minion to be registered with cloud provider" echo "Waiting for each minion to be registered with cloud provider"
for (( i=0; i<${#MINION_IPS[@]}; i++)); do for (( i=0; i<${#NODE_IPS[@]}; i++)); do
local machine="${MINION_IPS[$i]}" local machine="${NODE_IPS[$i]}"
local count="0" local count="0"
until [[ "$count" == "1" ]]; do until [[ "$count" == "1" ]]; do
local minions local minions
minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o go-template='{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1) minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o go-template='{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1)
count=$(echo $minions | grep -c "${MINION_IPS[i]}") || { count=$(echo $minions | grep -c "${NODE_IPS[i]}") || {
printf "." printf "."
sleep 2 sleep 2
count="0" count="0"
@ -339,7 +339,7 @@ function test-teardown {
# Find the minion name based on the IP address # Find the minion name based on the IP address
function find-vagrant-name-by-ip { function find-vagrant-name-by-ip {
local ip="$1" local ip="$1"
local ip_pattern="${MINION_IP_BASE}(.*)" local ip_pattern="${NODE_IP_BASE}(.*)"
# This is subtle. We map 10.245.2.2 -> minion-1. We do this by matching a # This is subtle. We map 10.245.2.2 -> minion-1. We do this by matching a
# regexp and using the capture to construct the name. # regexp and using the capture to construct the name.

View File

@ -24,7 +24,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/kube-env.sh" source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh" source "${KUBE_ROOT}/cluster/kube-util.sh"
EXPECTED_NUM_NODES="${NUM_MINIONS}" EXPECTED_NUM_NODES="${NUM_NODES}"
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1)) EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1))
fi fi

View File

@ -14,22 +14,22 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
NUM_MINIONS=4 NUM_NODES=4
DISK=./kube/kube.vmdk DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest GUEST_ID=debian7_64Guest
INSTANCE_PREFIX=kubernetes INSTANCE_PREFIX=kubernetes
MASTER_TAG="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" NODE_TAG="${INSTANCE_PREFIX}-minion"
MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_MEMORY_MB=1024 MASTER_MEMORY_MB=1024
MASTER_CPU=1 MASTER_CPU=1
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24"))
MINION_MEMORY_MB=2048 NODE_MEMORY_MB=2048
MINION_CPU=1 NODE_CPU=1
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET

View File

@ -14,22 +14,22 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
NUM_MINIONS=2 NUM_NODES=2
DISK=./kube/kube.vmdk DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest GUEST_ID=debian7_64Guest
INSTANCE_PREFIX="e2e-test-${USER}" INSTANCE_PREFIX="e2e-test-${USER}"
MASTER_TAG="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" NODE_TAG="${INSTANCE_PREFIX}-minion"
MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_MEMORY_MB=1024 MASTER_MEMORY_MB=1024
MASTER_CPU=1 MASTER_CPU=1
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24"))
MINION_MEMORY_MB=1024 NODE_MEMORY_MB=1024
MINION_CPU=1 NODE_CPU=1
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET

View File

@ -41,7 +41,7 @@ grains:
roles: roles:
- kubernetes-pool - kubernetes-pool
- kubernetes-pool-vsphere - kubernetes-pool-vsphere
cbr-cidr: $MINION_IP_RANGE cbr-cidr: $NODE_IP_RANGE
EOF EOF
# Install Salt # Install Salt

View File

@ -45,21 +45,21 @@ function detect-master {
# Detect the information about the minions # Detect the information about the minions
# #
# Assumed vars: # Assumed vars:
# MINION_NAMES # NODE_NAMES
# Vars set: # Vars set:
# KUBE_MINION_IP_ADDRESS (array) # KUBE_NODE_IP_ADDRESS (array)
function detect-minions { function detect-minions {
KUBE_MINION_IP_ADDRESSES=() KUBE_NODE_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]}) local minion_ip=$(govc vm.ip ${NODE_NAMES[$i]})
if [[ -z "${minion_ip-}" ]] ; then if [[ -z "${minion_ip-}" ]] ; then
echo "Did not find ${MINION_NAMES[$i]}" >&2 echo "Did not find ${NODE_NAMES[$i]}" >&2
else else
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" echo "Found ${NODE_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") KUBE_NODE_IP_ADDRESSES+=("${minion_ip}")
fi fi
done done
if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1 exit 1
fi fi
@ -266,20 +266,20 @@ function kube-up {
echo "Starting minion VMs (this can take a minute)..." echo "Starting minion VMs (this can take a minute)..."
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
( (
echo "#! /bin/bash" echo "#! /bin/bash"
echo "readonly MY_NAME=${MINION_NAMES[$i]}" echo "readonly MY_NAME=${NODE_NAMES[$i]}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
echo "KUBE_MASTER=${KUBE_MASTER}" echo "KUBE_MASTER=${KUBE_MASTER}"
echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" echo "NODE_IP_RANGE=${NODE_IP_RANGES[$i]}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh" grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh" ) > "${KUBE_TEMP}/minion-start-${i}.sh"
( (
kube-up-vm "${MINION_NAMES[$i]}" -c ${MINION_CPU-1} -m ${MINION_MEMORY_MB-1024} kube-up-vm "${NODE_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024}
kube-run "${MINION_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh"
) & ) &
done done
@ -312,10 +312,10 @@ function kube-up {
printf " OK\n" printf " OK\n"
local i local i
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "Waiting for ${MINION_NAMES[$i]} to become available..." printf "Waiting for ${NODE_NAMES[$i]} to become available..."
until curl --max-time 5 \ until curl --max-time 5 \
--fail --output /dev/null --silent "http://${KUBE_MINION_IP_ADDRESSES[$i]}:10250/healthz"; do --fail --output /dev/null --silent "http://${KUBE_NODE_IP_ADDRESSES[$i]}:10250/healthz"; do
printf "." printf "."
sleep 2 sleep 2
done done
@ -347,10 +347,10 @@ function kube-up {
# Basic sanity checking # Basic sanity checking
local i local i
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
# Make sure docker is installed # Make sure docker is installed
kube-ssh "${KUBE_MINION_IP_ADDRESSES[$i]}" which docker > /dev/null || { kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || {
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 echo "Docker failed to install on ${NODE_NAMES[$i]}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo "cluster. (sorry!)" >&2 echo "cluster. (sorry!)" >&2
exit 1 exit 1
@ -372,8 +372,8 @@ function kube-up {
function kube-down { function kube-down {
govc vm.destroy ${MASTER_NAME} & govc vm.destroy ${MASTER_NAME} &
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
govc vm.destroy ${MINION_NAMES[i]} & govc vm.destroy ${NODE_NAMES[i]} &
done done
wait wait

View File

@ -41,7 +41,7 @@ At v1.0, Kubernetes supports clusters up to 100 nodes with 30 pods per node and
A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane). A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane).
Normally the number of nodes in a cluster is controlled by the the value `NUM_MINIONS` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)). Normally the number of nodes in a cluster is controlled by the the value `NUM_NODES` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)).
Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up. Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up.

View File

@ -250,7 +250,7 @@ cross-AZ-clusters are more convenient.
* For auto-scaling, on each nodes it creates a launch configuration and group. * For auto-scaling, on each nodes it creates a launch configuration and group.
The name for both is <*KUBE_AWS_INSTANCE_PREFIX*>-minion-group. The default The name for both is <*KUBE_AWS_INSTANCE_PREFIX*>-minion-group. The default
name is kubernetes-minion-group. The auto-scaling group has a min and max size name is kubernetes-minion-group. The auto-scaling group has a min and max size
that are both set to NUM_MINIONS. You can change the size of the auto-scaling that are both set to NUM_NODES. You can change the size of the auto-scaling
group to add or remove the total number of nodes from within the AWS API or group to add or remove the total number of nodes from within the AWS API or
Console. Each nodes self-configures, meaning that they come up; run Salt with Console. Each nodes self-configures, meaning that they come up; run Salt with
the stored configuration; connect to the master; are assigned an internal CIDR; the stored configuration; connect to the master; are assigned an internal CIDR;

View File

@ -132,11 +132,11 @@ differentiate it from `docker0`) is set up outside of Docker proper.
Example of GCE's advanced routing rules: Example of GCE's advanced routing rules:
```sh ```sh
gcloud compute routes add "${MINION_NAMES[$i]}" \ gcloud compute routes add "${NODE_NAMES[$i]}" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--destination-range "${MINION_IP_RANGES[$i]}" \ --destination-range "${NODE_IP_RANGES[$i]}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--next-hop-instance "${MINION_NAMES[$i]}" \ --next-hop-instance "${NODE_NAMES[$i]}" \
--next-hop-instance-zone "${ZONE}" & --next-hop-instance-zone "${ZONE}" &
``` ```

View File

@ -301,7 +301,7 @@ Congratulations!
The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`: The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`:
```sh ```sh
NUM_MINIONS=3 hack/e2e-test.sh NUM_NODES=3 hack/e2e-test.sh
``` ```
### Troubleshooting ### Troubleshooting
@ -350,10 +350,10 @@ Are you sure you built a release first? Did you install `net-tools`? For more cl
#### I want to change the number of nodes! #### I want to change the number of nodes!
You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so:
```sh ```sh
export NUM_MINIONS=1 export NUM_NODES=1
``` ```
#### I want my VMs to have more memory! #### I want my VMs to have more memory!
@ -369,7 +369,7 @@ If you need more granular control, you can set the amount of memory for the mast
```sh ```sh
export KUBERNETES_MASTER_MEMORY=1536 export KUBERNETES_MASTER_MEMORY=1536
export KUBERNETES_MINION_MEMORY=2048 export KUBERNETES_NODE_MEMORY=2048
``` ```
#### I ran vagrant suspend and nothing works! #### I ran vagrant suspend and nothing works!

View File

@ -73,7 +73,7 @@ To start a Kubemark cluster on GCE you need to create an external cluster (it ca
`make quick-release`) and run `test/kubemark/start-kubemark.sh` script. This script will create a VM for master components, Pods for HollowNodes and do all the setup necessary `make quick-release`) and run `test/kubemark/start-kubemark.sh` script. This script will create a VM for master components, Pods for HollowNodes and do all the setup necessary
to let them talk to each other. It will use the configuration stored in `cluster/kubemark/config-default.sh` - you can tweak it however you want, but note that some features to let them talk to each other. It will use the configuration stored in `cluster/kubemark/config-default.sh` - you can tweak it however you want, but note that some features
may not be implemented yet, as implementation of Hollow components/mocks will probably be lagging behind real one. For performance tests interesting variables are may not be implemented yet, as implementation of Hollow components/mocks will probably be lagging behind real one. For performance tests interesting variables are
`NUM_MINIONS` and `MASTER_SIZE`. After start-kubemark script is finished youll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark `NUM_NODES` and `MASTER_SIZE`. After start-kubemark script is finished youll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark
cluster is stored in `test/kubemark/kubeconfig.loc`. cluster is stored in `test/kubemark/kubeconfig.loc`.
Currently we're running HollowNode with limit of 0.05 a CPU core and ~60MB or memory, which taking into account default cluster addons and fluentD running on an 'external' Currently we're running HollowNode with limit of 0.05 a CPU core and ~60MB or memory, which taking into account default cluster addons and fluentD running on an 'external'

View File

@ -83,15 +83,15 @@ You can override the variables defined in [config-default.sh](http://releases.k8
```bash ```bash
export KUBE_AWS_ZONE=eu-west-1c export KUBE_AWS_ZONE=eu-west-1c
export NUM_MINIONS=2 export NUM_NODES=2
export MINION_SIZE=m3.medium export NODE_SIZE=m3.medium
export AWS_S3_REGION=eu-west-1 export AWS_S3_REGION=eu-west-1
export AWS_S3_BUCKET=mycompany-kubernetes-artifacts export AWS_S3_BUCKET=mycompany-kubernetes-artifacts
export INSTANCE_PREFIX=k8s export INSTANCE_PREFIX=k8s
... ...
``` ```
The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`, in particular for clusters less than 50 nodes it will The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`, in particular for clusters less than 50 nodes it will
use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`. use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`.
It will also try to create or reuse a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion". It will also try to create or reuse a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion".

View File

@ -167,7 +167,7 @@ cluster/kube-up.sh
The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine. The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine.
The `NUM_MINIONS` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3. The `NUM_NODES` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3.
The `KUBE_PUSH` environment variable may be set to specify which Kubernetes binaries must be deployed on the cluster. Its possible values are: The `KUBE_PUSH` environment variable may be set to specify which Kubernetes binaries must be deployed on the cluster. Its possible values are:
@ -225,7 +225,7 @@ export KUBERNETES_PROVIDER=libvirt-coreos
Bring up a libvirt-CoreOS cluster of 5 nodes Bring up a libvirt-CoreOS cluster of 5 nodes
```sh ```sh
NUM_MINIONS=5 cluster/kube-up.sh NUM_NODES=5 cluster/kube-up.sh
``` ```
Destroy the libvirt-CoreOS cluster Destroy the libvirt-CoreOS cluster

View File

@ -84,7 +84,7 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo
- flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network. - flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network.
2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines (we do not capture the password). 2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines (we do not capture the password).
3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems. 3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems.
4. We then boot as many nodes as defined via `$NUM_MINIONS`. 4. We then boot as many nodes as defined via `$NUM_NODES`.
## Some notes ## Some notes

View File

@ -72,8 +72,8 @@ To use rkt as the container runtime for your CoreOS cluster on GCE, you need to
```console ```console
$ export KUBE_OS_DISTRIBUTION=coreos $ export KUBE_OS_DISTRIBUTION=coreos
$ export KUBE_GCE_MINION_IMAGE=<image_id> $ export KUBE_GCE_NODE_IMAGE=<image_id>
$ export KUBE_GCE_MINION_PROJECT=coreos-cloud $ export KUBE_GCE_NODE_PROJECT=coreos-cloud
$ export KUBE_CONTAINER_RUNTIME=rkt $ export KUBE_CONTAINER_RUNTIME=rkt
``` ```

View File

@ -116,7 +116,7 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
export role="ai i i" export role="ai i i"
export NUM_MINIONS=${NUM_MINIONS:-3} export NUM_NODES=${NUM_NODES:-3}
export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24
@ -129,7 +129,7 @@ separated with blank space like `<user_1@ip_1> <user_2@ip_2> <user_3@ip_3> `
Then the `role` variable defines the role of above machine in the same order, "ai" stands for machine Then the `role` variable defines the role of above machine in the same order, "ai" stands for machine
acts as both master and node, "a" stands for master, "i" stands for node. acts as both master and node, "a" stands for master, "i" stands for node.
The `NUM_MINIONS` variable defines the total number of nodes. The `NUM_NODES` variable defines the total number of nodes.
The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service IP range. Please make sure The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service IP range. Please make sure
that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips.

View File

@ -389,10 +389,10 @@ Log on to one of the nodes (`vagrant ssh node-1`) and inspect the salt minion lo
#### I want to change the number of nodes! #### I want to change the number of nodes!
You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so:
```sh ```sh
export NUM_MINIONS=1 export NUM_NODES=1
``` ```
#### I want my VMs to have more memory! #### I want my VMs to have more memory!
@ -408,7 +408,7 @@ If you need more granular control, you can set the amount of memory for the mast
```sh ```sh
export KUBERNETES_MASTER_MEMORY=1536 export KUBERNETES_MASTER_MEMORY=1536
export KUBERNETES_MINION_MEMORY=2048 export KUBERNETES_NODE_MEMORY=2048
``` ```
#### I ran vagrant suspend and nothing works! #### I ran vagrant suspend and nothing works!

View File

@ -57,7 +57,7 @@ At the end of the example, we will have:
## Prerequisites ## Prerequisites
You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_MINIONS` environment variable to 2 or more). You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_NODES` environment variable to 2 or more).
## Step 1: Start the RabbitMQ service ## Step 1: Start the RabbitMQ service

View File

@ -43,7 +43,7 @@ of compute resources easier to follow by starting with an empty cluster.
``` ```
$ export KUBERNETES_PROVIDER=vagrant $ export KUBERNETES_PROVIDER=vagrant
$ export NUM_MINIONS=1 $ export NUM_NODES=1
$ export KUBE_ENABLE_CLUSTER_MONITORING=none $ export KUBE_ENABLE_CLUSTER_MONITORING=none
$ export KUBE_ENABLE_CLUSTER_DNS=false $ export KUBE_ENABLE_CLUSTER_DNS=false
$ export KUBE_ENABLE_CLUSTER_UI=false $ export KUBE_ENABLE_CLUSTER_UI=false

View File

@ -18,7 +18,7 @@
# supports key features for Kubernetes version 1.0. # supports key features for Kubernetes version 1.0.
# Instructions: # Instructions:
# - Setup a Kubernetes cluster with $NUM_MINIONS nodes (defined below). # - Setup a Kubernetes cluster with $NUM_NODES nodes (defined below).
# - Provide a Kubeconfig file whose current context is set to the # - Provide a Kubeconfig file whose current context is set to the
# cluster to be tested, and with suitable auth setting. # cluster to be tested, and with suitable auth setting.
# - Specify the location of that kubeconfig with, e.g.: # - Specify the location of that kubeconfig with, e.g.:
@ -78,10 +78,10 @@ echo "Conformance test checking conformance with Kubernetes version 1.0"
# somewhere in the description (i.e. either in the Describe part or the It part). # somewhere in the description (i.e. either in the Describe part or the It part).
# The list of tagged conformance tests can be retrieved by: # The list of tagged conformance tests can be retrieved by:
# #
# NUM_MINIONS=4 KUBERNETES_CONFORMANCE_TEST="y" \ # NUM_NODES=4 KUBERNETES_CONFORMANCE_TEST="y" \
# hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.dryRun=true # hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.dryRun=true
declare -x KUBERNETES_CONFORMANCE_TEST="y" declare -x KUBERNETES_CONFORMANCE_TEST="y"
declare -x NUM_MINIONS=4 declare -x NUM_NODES=4
hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.skip='\[Skipped\]' hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.skip='\[Skipped\]'
exit $? exit $?

View File

@ -103,7 +103,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}"
--cluster-tag="${CLUSTER_ID:-}" \ --cluster-tag="${CLUSTER_ID:-}" \
--repo-root="${KUBE_VERSION_ROOT}" \ --repo-root="${KUBE_VERSION_ROOT}" \
--node-instance-group="${NODE_INSTANCE_GROUP:-}" \ --node-instance-group="${NODE_INSTANCE_GROUP:-}" \
--num-nodes="${NUM_MINIONS:-}" \ --num-nodes="${NUM_NODES:-}" \
--prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \ --prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \
${E2E_CLEAN_START:+"--clean-start=true"} \ ${E2E_CLEAN_START:+"--clean-start=true"} \
${E2E_MIN_STARTUP_PODS:+"--minStartupPods=${E2E_MIN_STARTUP_PODS}"} \ ${E2E_MIN_STARTUP_PODS:+"--minStartupPods=${E2E_MIN_STARTUP_PODS}"} \

View File

@ -87,7 +87,7 @@ if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then
KUBERNETES_PROVIDER="gce" KUBERNETES_PROVIDER="gce"
: ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_MIN_STARTUP_PODS:="1"}
: ${E2E_ZONE:="us-central1-f"} : ${E2E_ZONE:="us-central1-f"}
: ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel
elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then
KUBERNETES_PROVIDER="gke" KUBERNETES_PROVIDER="gke"
@ -96,18 +96,18 @@ elif [[ ${JOB_NAME} =~ ^kubernetes-.*-aws ]]; then
KUBERNETES_PROVIDER="aws" KUBERNETES_PROVIDER="aws"
: ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_MIN_STARTUP_PODS:="1"}
: ${E2E_ZONE:="us-east-1a"} : ${E2E_ZONE:="us-east-1a"}
: ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel
fi fi
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
if [[ "${PERFORMANCE:-}" == "true" ]]; then if [[ "${PERFORMANCE:-}" == "true" ]]; then
: ${MASTER_SIZE:="m3.xlarge"} : ${MASTER_SIZE:="m3.xlarge"}
: ${NUM_MINIONS:="100"} : ${NUM_NODES:="100"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"}
else else
: ${MASTER_SIZE:="m3.large"} : ${MASTER_SIZE:="m3.large"}
: ${MINION_SIZE:="m3.large"} : ${NODE_SIZE:="m3.large"}
: ${NUM_MINIONS:="3"} : ${NUM_NODES:="3"}
fi fi
fi fi
@ -360,7 +360,7 @@ case ${JOB_NAME} in
: ${PROJECT:="kubernetes-jenkins-pull"} : ${PROJECT:="kubernetes-jenkins-pull"}
: ${ENABLE_DEPLOYMENTS:=true} : ${ENABLE_DEPLOYMENTS:=true}
# Override GCE defaults # Override GCE defaults
NUM_MINIONS=${NUM_MINIONS_PARALLEL} NUM_NODES=${NUM_NODES_PARALLEL}
;; ;;
# Runs all non-flaky tests on GCE in parallel. # Runs all non-flaky tests on GCE in parallel.
@ -379,7 +379,7 @@ case ${JOB_NAME} in
: ${PROJECT:="kubernetes-jenkins"} : ${PROJECT:="kubernetes-jenkins"}
: ${ENABLE_DEPLOYMENTS:=true} : ${ENABLE_DEPLOYMENTS:=true}
# Override GCE defaults # Override GCE defaults
NUM_MINIONS=${NUM_MINIONS_PARALLEL} NUM_NODES=${NUM_NODES_PARALLEL}
;; ;;
# Runs all non-flaky tests on AWS in parallel. # Runs all non-flaky tests on AWS in parallel.
@ -396,7 +396,7 @@ case ${JOB_NAME} in
)"} )"}
: ${ENABLE_DEPLOYMENTS:=true} : ${ENABLE_DEPLOYMENTS:=true}
# Override AWS defaults. # Override AWS defaults.
NUM_MINIONS=${NUM_MINIONS_PARALLEL} NUM_NODES=${NUM_NODES_PARALLEL}
;; ;;
# Runs the flaky tests on GCE in parallel. # Runs the flaky tests on GCE in parallel.
@ -415,7 +415,7 @@ case ${JOB_NAME} in
: ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"} : ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
# Override GCE defaults. # Override GCE defaults.
NUM_MINIONS=${NUM_MINIONS_PARALLEL} NUM_NODES=${NUM_NODES_PARALLEL}
;; ;;
# Runs only the reboot tests on GCE. # Runs only the reboot tests on GCE.
@ -436,9 +436,9 @@ case ${JOB_NAME} in
: ${PROJECT:="kubernetes-jenkins"} : ${PROJECT:="kubernetes-jenkins"}
# Override GCE defaults. # Override GCE defaults.
MASTER_SIZE="n1-standard-4" MASTER_SIZE="n1-standard-4"
MINION_SIZE="n1-standard-2" NODE_SIZE="n1-standard-2"
MINION_DISK_SIZE="50GB" NODE_DISK_SIZE="50GB"
NUM_MINIONS="100" NUM_NODES="100"
# Reduce logs verbosity # Reduce logs verbosity
TEST_CLUSTER_LOG_LEVEL="--v=2" TEST_CLUSTER_LOG_LEVEL="--v=2"
# Increase resync period to simulate production # Increase resync period to simulate production
@ -458,9 +458,9 @@ case ${JOB_NAME} in
# Override GCE defaults. # Override GCE defaults.
E2E_ZONE="us-east1-b" E2E_ZONE="us-east1-b"
MASTER_SIZE="n1-standard-4" MASTER_SIZE="n1-standard-4"
MINION_SIZE="n1-standard-2" NODE_SIZE="n1-standard-2"
MINION_DISK_SIZE="50GB" NODE_DISK_SIZE="50GB"
NUM_MINIONS="100" NUM_NODES="100"
# Reduce logs verbosity # Reduce logs verbosity
TEST_CLUSTER_LOG_LEVEL="--v=2" TEST_CLUSTER_LOG_LEVEL="--v=2"
# Increase resync period to simulate production # Increase resync period to simulate production
@ -559,8 +559,8 @@ case ${JOB_NAME} in
)"} )"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
: ${PROJECT:="kubekins-e2e-gce-trusty-rls"} : ${PROJECT:="kubekins-e2e-gce-trusty-rls"}
: ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"}
: ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"}
: ${KUBE_OS_DISTRIBUTION:="trusty"} : ${KUBE_OS_DISTRIBUTION:="trusty"}
: ${ENABLE_CLUSTER_REGISTRY:=false} : ${ENABLE_CLUSTER_REGISTRY:=false}
: ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"}
@ -577,8 +577,8 @@ case ${JOB_NAME} in
)"} )"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-slow"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-slow"}
: ${PROJECT:="k8s-e2e-gce-trusty-slow"} : ${PROJECT:="k8s-e2e-gce-trusty-slow"}
: ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"}
: ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"}
: ${KUBE_OS_DISTRIBUTION:="trusty"} : ${KUBE_OS_DISTRIBUTION:="trusty"}
: ${ENABLE_CLUSTER_REGISTRY:=false} : ${ENABLE_CLUSTER_REGISTRY:=false}
: ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"}
@ -600,8 +600,8 @@ case ${JOB_NAME} in
)"} )"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
: ${PROJECT:="k8s-e2e-gce-trusty-beta"} : ${PROJECT:="k8s-e2e-gce-trusty-beta"}
: ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"}
: ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"}
: ${KUBE_OS_DISTRIBUTION:="trusty"} : ${KUBE_OS_DISTRIBUTION:="trusty"}
: ${ENABLE_CLUSTER_REGISTRY:=false} : ${ENABLE_CLUSTER_REGISTRY:=false}
: ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"}
@ -619,8 +619,8 @@ case ${JOB_NAME} in
)"} )"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-beta-slow"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-trusty-beta-slow"}
: ${PROJECT:="k8s-e2e-gce-trusty-beta-slow"} : ${PROJECT:="k8s-e2e-gce-trusty-beta-slow"}
: ${KUBE_GCE_MINION_PROJECT:="${TRUSTY_IMAGE_PROJECT}"} : ${KUBE_GCE_NODE_PROJECT:="${TRUSTY_IMAGE_PROJECT}"}
: ${KUBE_GCE_MINION_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"} : ${KUBE_GCE_NODE_IMAGE:="$(get_latest_trusty_image ${JOB_NAME})"}
: ${KUBE_OS_DISTRIBUTION:="trusty"} : ${KUBE_OS_DISTRIBUTION:="trusty"}
: ${ENABLE_CLUSTER_REGISTRY:=false} : ${ENABLE_CLUSTER_REGISTRY:=false}
: ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"} : ${JENKINS_EXPLICIT_VERSION:="release/v1.1.1"}
@ -820,7 +820,7 @@ case ${JOB_NAME} in
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=GCE\sL7\sLoadBalancer\sController|Job|Horizontal\spod\sautoscaling"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=GCE\sL7\sLoadBalancer\sController|Job|Horizontal\spod\sautoscaling"}
# At least n1-standard-2 nodes are required for the cluster to # At least n1-standard-2 nodes are required for the cluster to
# have enough cpu/ram to run the Horizontal pod autoscaling tests. # have enough cpu/ram to run the Horizontal pod autoscaling tests.
MINION_SIZE="n1-standard-2" NODE_SIZE="n1-standard-2"
;; ;;
# Sets up the GKE soak cluster weekly using the latest CI release. # Sets up the GKE soak cluster weekly using the latest CI release.
@ -835,7 +835,7 @@ case ${JOB_NAME} in
: ${E2E_UP:="true"} : ${E2E_UP:="true"}
: ${PROJECT:="kubernetes-jenkins"} : ${PROJECT:="kubernetes-jenkins"}
# Need at least n1-standard-2 nodes to run kubelet_perf tests # Need at least n1-standard-2 nodes to run kubelet_perf tests
MINION_SIZE="n1-standard-2" NODE_SIZE="n1-standard-2"
;; ;;
# Runs tests on GKE soak cluster. # Runs tests on GKE soak cluster.
@ -1232,7 +1232,7 @@ case ${JOB_NAME} in
: ${E2E_UP:="true"} : ${E2E_UP:="true"}
: ${E2E_TEST:="false"} : ${E2E_TEST:="false"}
: ${E2E_DOWN:="false"} : ${E2E_DOWN:="false"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
kubernetes-upgrade-gce-step2-upgrade-master) kubernetes-upgrade-gce-step2-upgrade-master)
@ -1245,7 +1245,7 @@ case ${JOB_NAME} in
: ${E2E_TEST:="true"} : ${E2E_TEST:="true"}
: ${E2E_DOWN:="false"} : ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
: ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DEPLOYMENTS:=true}
: ${KUBE_ENABLE_DAEMONSETS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true}
;; ;;
@ -1267,7 +1267,7 @@ case ${JOB_NAME} in
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"} )"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
kubernetes-upgrade-gce-step4-upgrade-cluster) kubernetes-upgrade-gce-step4-upgrade-cluster)
@ -1280,7 +1280,7 @@ case ${JOB_NAME} in
: ${E2E_TEST:="true"} : ${E2E_TEST:="true"}
: ${E2E_DOWN:="false"} : ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
: ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DEPLOYMENTS:=true}
: ${KUBE_ENABLE_DAEMONSETS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true}
;; ;;
@ -1300,7 +1300,7 @@ case ${JOB_NAME} in
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"} )"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
kubernetes-upgrade-gce-step6-e2e-new) kubernetes-upgrade-gce-step6-e2e-new)
@ -1319,7 +1319,7 @@ case ${JOB_NAME} in
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \ ${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"} )"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
# kubernetes-upgrade-gce-1.0-current-release # kubernetes-upgrade-gce-1.0-current-release
@ -1342,7 +1342,7 @@ case ${JOB_NAME} in
: ${E2E_TEST:="false"} : ${E2E_TEST:="false"}
: ${E2E_DOWN:="false"} : ${E2E_DOWN:="false"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
kubernetes-upgrade-1.0-current-release-gce-step2-upgrade-master) kubernetes-upgrade-1.0-current-release-gce-step2-upgrade-master)
@ -1358,7 +1358,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="false"} : ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
: ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DEPLOYMENTS:=true}
: ${KUBE_ENABLE_DAEMONSETS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true}
;; ;;
@ -1379,7 +1379,7 @@ case ${JOB_NAME} in
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"} )"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
kubernetes-upgrade-1.0-current-release-gce-step4-upgrade-cluster) kubernetes-upgrade-1.0-current-release-gce-step4-upgrade-cluster)
@ -1395,7 +1395,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="false"} : ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
: ${KUBE_ENABLE_DEPLOYMENTS:=true} : ${KUBE_ENABLE_DEPLOYMENTS:=true}
: ${KUBE_ENABLE_DAEMONSETS:=true} : ${KUBE_ENABLE_DAEMONSETS:=true}
;; ;;
@ -1416,7 +1416,7 @@ case ${JOB_NAME} in
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"} )"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
kubernetes-upgrade-1.0-current-release-gce-step6-e2e-new) kubernetes-upgrade-1.0-current-release-gce-step6-e2e-new)
@ -1437,7 +1437,7 @@ case ${JOB_NAME} in
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \ ${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"} )"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_MINIONS:=5} : ${NUM_NODES:=5}
;; ;;
# Run Kubemark test on a fake 100 node cluster to have a comparison # Run Kubemark test on a fake 100 node cluster to have a comparison
@ -1452,11 +1452,11 @@ case ${JOB_NAME} in
: ${USE_KUBEMARK:="true"} : ${USE_KUBEMARK:="true"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters # Override defaults to be indpendent from GCE defaults and set kubemark parameters
KUBE_GCE_INSTANCE_PREFIX="kubemark100" KUBE_GCE_INSTANCE_PREFIX="kubemark100"
NUM_MINIONS="10" NUM_NODES="10"
MASTER_SIZE="n1-standard-2" MASTER_SIZE="n1-standard-2"
MINION_SIZE="n1-standard-1" NODE_SIZE="n1-standard-1"
KUBEMARK_MASTER_SIZE="n1-standard-4" KUBEMARK_MASTER_SIZE="n1-standard-4"
KUBEMARK_NUM_MINIONS="100" KUBEMARK_NUM_NODES="100"
;; ;;
# Run Kubemark test on a fake 500 node cluster to test for regressions on # Run Kubemark test on a fake 500 node cluster to test for regressions on
@ -1470,13 +1470,13 @@ case ${JOB_NAME} in
: ${E2E_TEST:="false"} : ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"} : ${USE_KUBEMARK:="true"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters # Override defaults to be indpendent from GCE defaults and set kubemark parameters
NUM_MINIONS="6" NUM_NODES="6"
MASTER_SIZE="n1-standard-4" MASTER_SIZE="n1-standard-4"
MINION_SIZE="n1-standard-8" NODE_SIZE="n1-standard-8"
KUBE_GCE_INSTANCE_PREFIX="kubemark500" KUBE_GCE_INSTANCE_PREFIX="kubemark500"
E2E_ZONE="asia-east1-a" E2E_ZONE="asia-east1-a"
KUBEMARK_MASTER_SIZE="n1-standard-16" KUBEMARK_MASTER_SIZE="n1-standard-16"
KUBEMARK_NUM_MINIONS="500" KUBEMARK_NUM_NODES="500"
;; ;;
# Run big Kubemark test, this currently means a 1000 node cluster and 16 core master # Run big Kubemark test, this currently means a 1000 node cluster and 16 core master
@ -1490,15 +1490,15 @@ case ${JOB_NAME} in
: ${USE_KUBEMARK:="true"} : ${USE_KUBEMARK:="true"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters # Override defaults to be indpendent from GCE defaults and set kubemark parameters
# We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way.
NUM_MINIONS="11" NUM_NODES="11"
MASTER_SIZE="n1-standard-4" MASTER_SIZE="n1-standard-4"
MINION_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core
# so NUM_MINIONS x cores_per_minion should # so NUM_NODES x cores_per_minion should
# be set accordingly. # be set accordingly.
KUBE_GCE_INSTANCE_PREFIX="kubemark1000" KUBE_GCE_INSTANCE_PREFIX="kubemark1000"
E2E_ZONE="asia-east1-a" E2E_ZONE="asia-east1-a"
KUBEMARK_MASTER_SIZE="n1-standard-16" KUBEMARK_MASTER_SIZE="n1-standard-16"
KUBEMARK_NUM_MINIONS="1000" KUBEMARK_NUM_NODES="1000"
;; ;;
esac esac
@ -1512,8 +1512,8 @@ export KUBE_GCE_ZONE=${E2E_ZONE}
export KUBE_GCE_NETWORK=${E2E_NETWORK} export KUBE_GCE_NETWORK=${E2E_NETWORK}
export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-} export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-}
export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-} export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-}
export KUBE_GCE_MINION_PROJECT=${KUBE_GCE_MINION_PROJECT:-} export KUBE_GCE_NODE_PROJECT=${KUBE_GCE_NODE_PROJECT:-}
export KUBE_GCE_MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-} export KUBE_GCE_NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-}
export KUBE_OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-} export KUBE_OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-}
# GKE variables # GKE variables
@ -1523,7 +1523,7 @@ export KUBE_GKE_NETWORK=${E2E_NETWORK}
export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-} export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-}
export DOGFOOD_GCLOUD=${DOGFOOD_GCLOUD:-} export DOGFOOD_GCLOUD=${DOGFOOD_GCLOUD:-}
export CMD_GROUP=${CMD_GROUP:-} export CMD_GROUP=${CMD_GROUP:-}
export MACHINE_TYPE=${MINION_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size export MACHINE_TYPE=${NODE_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size
if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then
export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT} export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT}
@ -1537,9 +1537,9 @@ export KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER=${ENABLE_HORIZONTAL_POD_AUTOSCALER:
export KUBE_ENABLE_DEPLOYMENTS=${ENABLE_DEPLOYMENTS:-} export KUBE_ENABLE_DEPLOYMENTS=${ENABLE_DEPLOYMENTS:-}
export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-} export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-}
export MASTER_SIZE=${MASTER_SIZE:-} export MASTER_SIZE=${MASTER_SIZE:-}
export MINION_SIZE=${MINION_SIZE:-} export NODE_SIZE=${NODE_SIZE:-}
export MINION_DISK_SIZE=${MINION_DISK_SIZE:-} export NODE_DISK_SIZE=${NODE_DISK_SIZE:-}
export NUM_MINIONS=${NUM_MINIONS:-} export NUM_NODES=${NUM_NODES:-}
export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-} export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-}
export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-} export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-}
export PROJECT=${PROJECT:-} export PROJECT=${PROJECT:-}
@ -1752,18 +1752,18 @@ fi
### Start Kubemark ### ### Start Kubemark ###
if [[ "${USE_KUBEMARK:-}" == "true" ]]; then if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
export RUN_FROM_DISTRO=true export RUN_FROM_DISTRO=true
NUM_MINIONS_BKP=${NUM_MINIONS} NUM_NODES_BKP=${NUM_NODES}
MASTER_SIZE_BKP=${MASTER_SIZE} MASTER_SIZE_BKP=${MASTER_SIZE}
./test/kubemark/stop-kubemark.sh ./test/kubemark/stop-kubemark.sh
NUM_MINIONS=${KUBEMARK_NUM_MINIONS:-$NUM_MINIONS} NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE} MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
./test/kubemark/start-kubemark.sh ./test/kubemark/start-kubemark.sh
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false" ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false"
./test/kubemark/stop-kubemark.sh ./test/kubemark/stop-kubemark.sh
NUM_MINIONS=${NUM_MINIONS_BKP} NUM_NODES=${NUM_NODES_BKP}
MASTER_SIZE=${MASTER_SIZE_BKP} MASTER_SIZE=${MASTER_SIZE_BKP}
unset RUN_FROM_DISTRO unset RUN_FROM_DISTRO
unset NUM_MINIONS_BKP unset NUM_NODES_BKP
unset MASTER_SIZE_BKP unset MASTER_SIZE_BKP
fi fi

View File

@ -32,7 +32,7 @@ source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
detect-project &> /dev/null detect-project &> /dev/null
echo "kube-dump.sh: Getting docker statuses on all nodes..." echo "kube-dump.sh: Getting docker statuses on all nodes..."
ALL_NODES=(${MINION_NAMES[*]} ${MASTER_NAME}) ALL_NODES=(${NODE_NAMES[*]} ${MASTER_NAME})
for NODE in ${ALL_NODES[*]}; do for NODE in ${ALL_NODES[*]}; do
echo "kube-dump.sh: Node $NODE:" echo "kube-dump.sh: Node $NODE:"
ssh-to-node "${NODE}" ' ssh-to-node "${NODE}" '

View File

@ -205,7 +205,7 @@ contexts:
current-context: kubemark-context current-context: kubemark-context
EOF EOF
sed "s/##numreplicas##/${NUM_MINIONS:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json sed "s/##numreplicas##/${NUM_NODES:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json
sed -i'' -e "s/##project##/${PROJECT}/g" ${KUBE_ROOT}/test/kubemark/hollow-node.json sed -i'' -e "s/##project##/${PROJECT}/g" ${KUBE_ROOT}/test/kubemark/hollow-node.json
kubectl create -f ${KUBE_ROOT}/test/kubemark/kubemark-ns.json kubectl create -f ${KUBE_ROOT}/test/kubemark/kubemark-ns.json
kubectl create -f ${KUBECONFIG_SECRET} --namespace="kubemark" kubectl create -f ${KUBECONFIG_SECRET} --namespace="kubemark"
@ -215,7 +215,7 @@ rm ${KUBECONFIG_SECRET}
echo "Waiting for all HollowNodes to become Running..." echo "Waiting for all HollowNodes to become Running..."
echo "This can loop forever if something crashed." echo "This can loop forever if something crashed."
until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_MINIONS}" ]]; do until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_NODES}" ]]; do
echo -n . echo -n .
sleep 1 sleep 1
done done