From 9fce56c165512141d57a8b6f609028731076a8bd Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 9 Feb 2016 08:38:12 +0100 Subject: [PATCH 1/2] Fix timeouts in load test --- test/e2e/load.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/test/e2e/load.go b/test/e2e/load.go index 6875cef033f..b1dcb28758b 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -132,11 +132,17 @@ var _ = Describe("Load capacity", func() { // We may want to revisit it in the future. creatingTime := time.Duration(totalPods/5) * time.Second createAllRC(configs, creatingTime) + By("============================================================================") + // We would like to spread scaling replication controllers over time + // to make it possible to create/schedule & delete them in the meantime. + // Currently we assume that 5 pods/second average throughput. + // The expected number of created/deleted pods is less than totalPods/3. + scalingTime := time.Duration(totalPods/15) * time.Second + scaleAllRC(configs, scalingTime) By("============================================================================") - scaleAllRC(configs) - By("============================================================================") - scaleAllRC(configs) + + scaleAllRC(configs, scalingTime) By("============================================================================") // Cleanup all created replication controllers. @@ -211,23 +217,22 @@ func createRC(wg *sync.WaitGroup, config *RCConfig, creatingTime time.Duration) expectNoError(RunRC(*config), fmt.Sprintf("creating rc %s", config.Name)) } -func scaleAllRC(configs []*RCConfig) { +func scaleAllRC(configs []*RCConfig, scalingTime time.Duration) { var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { - go scaleRC(&wg, config) + go scaleRC(&wg, config, scalingTime) } wg.Wait() } // Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards. // Scaling happens always based on original size, not the current size. -func scaleRC(wg *sync.WaitGroup, config *RCConfig) { +func scaleRC(wg *sync.WaitGroup, config *RCConfig, scalingTime time.Duration) { defer GinkgoRecover() defer wg.Done() - resizingTime := 3 * time.Minute - sleepUpTo(resizingTime) + sleepUpTo(scalingTime) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), fmt.Sprintf("scaling rc %s for the first time", config.Name)) From 470a9ad3c529054c6aa8ca3ca333834f2f30bffa Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 9 Feb 2016 08:48:48 +0100 Subject: [PATCH 2/2] Run load tests in 100-node and 500-node Kubemarks --- hack/jenkins/e2e-runner.sh | 2 +- hack/jenkins/e2e.sh | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hack/jenkins/e2e-runner.sh b/hack/jenkins/e2e-runner.sh index d676f54a925..18143c30ef2 100755 --- a/hack/jenkins/e2e-runner.sh +++ b/hack/jenkins/e2e-runner.sh @@ -185,7 +185,7 @@ if [[ "${USE_KUBEMARK:-}" == "true" ]]; then NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES} MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE} ./test/kubemark/start-kubemark.sh - ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false" + ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="${KUBEMARK_TESTS}" --delete-namespace="false" --gather-resource-usage="false" ./test/kubemark/stop-kubemark.sh NUM_NODES=${NUM_NODES_BKP} MASTER_SIZE=${MASTER_SIZE_BKP} diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 728c9caba06..33d87ea097b 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -659,6 +659,7 @@ case ${JOB_NAME} in : ${E2E_DOWN:="true"} : ${E2E_TEST:="false"} : ${USE_KUBEMARK:="true"} + : ${KUBEMARK_TESTS:="\[Feature:Performance\]"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters KUBE_GCE_INSTANCE_PREFIX="kubemark100" NUM_NODES="10" @@ -679,6 +680,7 @@ case ${JOB_NAME} in : ${E2E_DOWN:="true"} : ${E2E_TEST:="false"} : ${USE_KUBEMARK:="true"} + : ${KUBEMARK_TESTS:="\[Feature:Performance\]"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters NUM_NODES="6" MASTER_SIZE="n1-standard-4" @@ -698,6 +700,7 @@ case ${JOB_NAME} in : ${E2E_DOWN:="true"} : ${E2E_TEST:="false"} : ${USE_KUBEMARK:="true"} + : ${KUBEMARK_TESTS:="should\sallow\sstarting\s30\spods\sper\snode"} # Override defaults to be indpendent from GCE defaults and set kubemark parameters # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. NUM_NODES="11" @@ -1029,6 +1032,7 @@ export KUBE_SKIP_CONFIRMATIONS=y # Kubemark export USE_KUBEMARK="${USE_KUBEMARK:-false}" +export KUBEMARK_TESTS="${KUBEMARK_TESTS:-}" export KUBEMARK_MASTER_SIZE="${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}" export KUBEMARK_NUM_NODES="${KUBEMARK_NUM_NODES:-$NUM_NODES}"