Merge pull request #20891 from wojtek-t/fix_load_test

Fix load test & run it on 100- and 500-node Kubemarks
This commit is contained in:
Wojciech Tyczynski 2016-02-09 15:33:12 +01:00
commit a6a0392b03
3 changed files with 18 additions and 9 deletions

View File

@ -185,7 +185,7 @@ if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES} NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE} MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
./test/kubemark/start-kubemark.sh ./test/kubemark/start-kubemark.sh
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false" ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="${KUBEMARK_TESTS}" --delete-namespace="false" --gather-resource-usage="false"
./test/kubemark/stop-kubemark.sh ./test/kubemark/stop-kubemark.sh
NUM_NODES=${NUM_NODES_BKP} NUM_NODES=${NUM_NODES_BKP}
MASTER_SIZE=${MASTER_SIZE_BKP} MASTER_SIZE=${MASTER_SIZE_BKP}

View File

@ -542,6 +542,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="true"} : ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"} : ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"} : ${USE_KUBEMARK:="true"}
: ${KUBEMARK_TESTS:="\[Feature:Performance\]"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters # Override defaults to be indpendent from GCE defaults and set kubemark parameters
KUBE_GCE_INSTANCE_PREFIX="kubemark100" KUBE_GCE_INSTANCE_PREFIX="kubemark100"
NUM_NODES="10" NUM_NODES="10"
@ -562,6 +563,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="true"} : ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"} : ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"} : ${USE_KUBEMARK:="true"}
: ${KUBEMARK_TESTS:="\[Feature:Performance\]"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters # Override defaults to be indpendent from GCE defaults and set kubemark parameters
NUM_NODES="6" NUM_NODES="6"
MASTER_SIZE="n1-standard-4" MASTER_SIZE="n1-standard-4"
@ -581,6 +583,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="true"} : ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"} : ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"} : ${USE_KUBEMARK:="true"}
: ${KUBEMARK_TESTS:="should\sallow\sstarting\s30\spods\sper\snode"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters # Override defaults to be indpendent from GCE defaults and set kubemark parameters
# We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way.
NUM_NODES="11" NUM_NODES="11"
@ -895,6 +898,7 @@ export KUBE_SKIP_CONFIRMATIONS=y
# Kubemark # Kubemark
export USE_KUBEMARK="${USE_KUBEMARK:-false}" export USE_KUBEMARK="${USE_KUBEMARK:-false}"
export KUBEMARK_TESTS="${KUBEMARK_TESTS:-}"
export KUBEMARK_MASTER_SIZE="${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}" export KUBEMARK_MASTER_SIZE="${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}"
export KUBEMARK_NUM_NODES="${KUBEMARK_NUM_NODES:-$NUM_NODES}" export KUBEMARK_NUM_NODES="${KUBEMARK_NUM_NODES:-$NUM_NODES}"

View File

@ -132,11 +132,17 @@ var _ = Describe("Load capacity", func() {
// We may want to revisit it in the future. // We may want to revisit it in the future.
creatingTime := time.Duration(totalPods/5) * time.Second creatingTime := time.Duration(totalPods/5) * time.Second
createAllRC(configs, creatingTime) createAllRC(configs, creatingTime)
By("============================================================================")
// We would like to spread scaling replication controllers over time
// to make it possible to create/schedule & delete them in the meantime.
// Currently we assume that 5 pods/second average throughput.
// The expected number of created/deleted pods is less than totalPods/3.
scalingTime := time.Duration(totalPods/15) * time.Second
scaleAllRC(configs, scalingTime)
By("============================================================================") By("============================================================================")
scaleAllRC(configs)
By("============================================================================") scaleAllRC(configs, scalingTime)
scaleAllRC(configs)
By("============================================================================") By("============================================================================")
// Cleanup all created replication controllers. // Cleanup all created replication controllers.
@ -211,23 +217,22 @@ func createRC(wg *sync.WaitGroup, config *RCConfig, creatingTime time.Duration)
expectNoError(RunRC(*config), fmt.Sprintf("creating rc %s", config.Name)) expectNoError(RunRC(*config), fmt.Sprintf("creating rc %s", config.Name))
} }
func scaleAllRC(configs []*RCConfig) { func scaleAllRC(configs []*RCConfig, scalingTime time.Duration) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(configs)) wg.Add(len(configs))
for _, config := range configs { for _, config := range configs {
go scaleRC(&wg, config) go scaleRC(&wg, config, scalingTime)
} }
wg.Wait() wg.Wait()
} }
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards. // Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size. // Scaling happens always based on original size, not the current size.
func scaleRC(wg *sync.WaitGroup, config *RCConfig) { func scaleRC(wg *sync.WaitGroup, config *RCConfig, scalingTime time.Duration) {
defer GinkgoRecover() defer GinkgoRecover()
defer wg.Done() defer wg.Done()
resizingTime := 3 * time.Minute
sleepUpTo(resizingTime) sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize, true),
fmt.Sprintf("scaling rc %s for the first time", config.Name)) fmt.Sprintf("scaling rc %s for the first time", config.Name))