diff --git a/cluster/kubemark/config-default.sh b/cluster/kubemark/config-default.sh index 313bc34b269..fa92b50f391 100644 --- a/cluster/kubemark/config-default.sh +++ b/cluster/kubemark/config-default.sh @@ -54,6 +54,7 @@ SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=16}" +TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}" TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:-}" KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS="${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}" @@ -66,7 +67,7 @@ TEST_CLUSTER_STORAGE_CONTENT_TYPE="${TEST_CLUSTER_STORAGE_CONTENT_TYPE:-}" ENABLE_GARBAGE_COLLECTOR=${ENABLE_GARBAGE_COLLECTOR:-true} KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL ${TEST_CLUSTER_API_CONTENT_TYPE}" -APISERVER_TEST_ARGS="--runtime-config=extensions/v1beta1 ${API_SERVER_TEST_LOG_LEVEL} ${TEST_CLUSTER_STORAGE_CONTENT_TYPE} ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" +APISERVER_TEST_ARGS="--runtime-config=extensions/v1beta1 ${API_SERVER_TEST_LOG_LEVEL} ${TEST_CLUSTER_STORAGE_CONTENT_TYPE} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT} ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS} --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}" KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE}" diff --git a/test/e2e/load.go b/test/e2e/load.go index 712ba020d3b..22b1ac3595d 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -18,6 +18,7 @@ package e2e import ( "fmt" + "math" "math/rand" "os" "strconv" @@ -70,11 +71,21 @@ var _ = framework.KubeDescribe("Load capacity", func() { Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) }) + // We assume a default throughput of 10 pods/second throughput. + // We may want to revisit it in the future. + // However, this can be overriden by LOAD_TEST_THROUGHPUT env var. + throughput := 10 + if throughputEnv := os.Getenv("LOAD_TEST_THROUGHPUT"); throughputEnv != "" { + if newThroughput, err := strconv.Atoi(throughputEnv); err == nil { + throughput = newThroughput + } + } + // Explicitly put here, to delete namespace at the end of the test // (after measuring latency metrics, etc.). options := framework.FrameworkOptions{ - ClientQPS: 50, - ClientBurst: 100, + ClientQPS: float32(math.Max(50.0, float64(2*throughput))), + ClientBurst: int(math.Max(100.0, float64(4*throughput))), } f := framework.NewFramework("load", options, nil) f.NamespaceDeletionTimeout = time.Hour @@ -142,16 +153,6 @@ var _ = framework.KubeDescribe("Load capacity", func() { framework.Logf("Skipping service creation") } - // We assume a default throughput of 10 pods/second throughput. - // We may want to revisit it in the future. - // However, this can be overriden by LOAD_TEST_THROUGHPUT env var. - throughput := 10 - if throughputEnv := os.Getenv("LOAD_TEST_THROUGHPUT"); throughputEnv != "" { - if newThroughput, err := strconv.Atoi(throughputEnv); err == nil { - throughput = newThroughput - } - } - // Simulate lifetime of RC: // * create with initial size // * scale RC to a random size and list all pods