Bring up a 4000 node kubemark cluster in Jenkins.

This is a bit of a hack of the existing scripts, but the quickest way to get this cluster up.
Will restructure e2e.sh to do this in a more sane way in a separate PR.
This commit is contained in:
Quinton Hoole 2015-10-07 16:09:12 -07:00
parent aa307da594
commit 1b7f8f540e
2 changed files with 16 additions and 1 deletions

View File

@ -22,7 +22,7 @@
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-4}
NUM_MINIONS=${NUM_MINIONS:-100}
NUM_MINIONS=${NUM_MINIONS:-4000}
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}

View File

@ -645,6 +645,21 @@ case ${JOB_NAME} in
: ${USE_KUBEMARK:="true"}
: ${NUM_MINIONS:="10"}
;;
kubernetes-kubemark-gce-scale)
: ${E2E_CLUSTER_NAME:="kubernetes-kubemark-scale"}
: ${E2E_NETWORK:="kubernetes-kubemark-scale"}
: ${PROJECT:="kubernetes-scale"}
: ${E2E_UP:="true"}
: ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"}
: ${NUM_MINIONS:="40"}
: ${MASTER_SIZE:="n1-standard-32"} # Note: not available in all zones
: ${MINION_SIZE="n1-standard-16"} # Note: can fit about 17 hollow nodes per core
# so NUM_MINIONS x cores_per_minion should
# be set accordingly.
;;
esac
# AWS variables