diff --git a/cluster/kubemark/config-default.sh b/cluster/kubemark/config-default.sh index 8000d39c821..b5209dd8103 100644 --- a/cluster/kubemark/config-default.sh +++ b/cluster/kubemark/config-default.sh @@ -55,6 +55,8 @@ KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=16}" TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:-}" +KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS="${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}" + # ContentType used by all components to communicate with apiserver. TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}" # ContentType used to store objects in underlying database. @@ -62,8 +64,8 @@ TEST_CLUSTER_STORAGE_CONTENT_TYPE="${TEST_CLUSTER_STORAGE_CONTENT_TYPE:-}" KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL ${TEST_CLUSTER_API_CONTENT_TYPE}" APISERVER_TEST_ARGS="--runtime-config=extensions/v1beta1 ${API_SERVER_TEST_LOG_LEVEL} ${TEST_CLUSTER_STORAGE_CONTENT_TYPE} ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS}" -CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}" -SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE}" +CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}" +SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}" KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE}" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET diff --git a/test/e2e/load.go b/test/e2e/load.go index f8d23497c04..2755b014b8d 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -142,6 +142,16 @@ var _ = framework.KubeDescribe("Load capacity", func() { framework.Logf("Skipping service creation") } + // We assume a default throughput of 10 pods/second throughput. + // We may want to revisit it in the future. + // However, this can be overriden by LOAD_TEST_THROUGHPUT env var. + throughput := 10 + if throughputEnv := os.Getenv("LOAD_TEST_THROUGHPUT"); throughputEnv != "" { + if newThroughput, err := strconv.Atoi(throughputEnv); err == nil { + throughput = newThroughput + } + } + // Simulate lifetime of RC: // * create with initial size // * scale RC to a random size and list all pods @@ -155,17 +165,17 @@ var _ = framework.KubeDescribe("Load capacity", func() { // We would like to spread creating replication controllers over time // to make it possible to create/schedule them in the meantime. - // Currently we assume 10 pods/second average throughput. + // Currently we assume pods/second average throughput. // We may want to revisit it in the future. - creatingTime := time.Duration(totalPods/10) * time.Second + creatingTime := time.Duration(totalPods/throughput) * time.Second createAllRC(configs, creatingTime) By("============================================================================") // We would like to spread scaling replication controllers over time // to make it possible to create/schedule & delete them in the meantime. - // Currently we assume that 10 pods/second average throughput. + // Currently we assume that pods/second average throughput. // The expected number of created/deleted pods is less than totalPods/3. - scalingTime := time.Duration(totalPods/30) * time.Second + scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second scaleAllRC(configs, scalingTime) By("============================================================================") @@ -173,9 +183,9 @@ var _ = framework.KubeDescribe("Load capacity", func() { By("============================================================================") // Cleanup all created replication controllers. - // Currently we assume 10 pods/second average deletion throughput. + // Currently we assume pods/second average deletion throughput. // We may want to revisit it in the future. - deletingTime := time.Duration(totalPods/10) * time.Second + deletingTime := time.Duration(totalPods/throughput) * time.Second deleteAllRC(configs, deletingTime) if createServices == "true" { for _, service := range services {