mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #29288 from wojtek-t/ability_for_faster_load_test
Automatic merge from submit-queue Allow for overriding throughput in load test We seem to be already supporting higher throughput that what the default is. I'm going to increase the throughput in our tests: - speed up scalability tests - ensure that what I'm seeing locally is really the repeatable case This PR is a short preparation for those experiments. [Ideally, I would like to have kubemark-500 to be finishing within 30 minutes. And I think this should be doable pretty soon.] @gmarek
This commit is contained in:
commit
7e0a6e497a
@ -55,6 +55,8 @@ KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=16}"
|
||||
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:-}"
|
||||
|
||||
KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS="${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}"
|
||||
|
||||
# ContentType used by all components to communicate with apiserver.
|
||||
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
|
||||
# ContentType used to store objects in underlying database.
|
||||
@ -62,8 +64,8 @@ TEST_CLUSTER_STORAGE_CONTENT_TYPE="${TEST_CLUSTER_STORAGE_CONTENT_TYPE:-}"
|
||||
|
||||
KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
APISERVER_TEST_ARGS="--runtime-config=extensions/v1beta1 ${API_SERVER_TEST_LOG_LEVEL} ${TEST_CLUSTER_STORAGE_CONTENT_TYPE} ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS}"
|
||||
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}"
|
||||
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}"
|
||||
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||
|
@ -142,6 +142,16 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
||||
framework.Logf("Skipping service creation")
|
||||
}
|
||||
|
||||
// We assume a default throughput of 10 pods/second throughput.
|
||||
// We may want to revisit it in the future.
|
||||
// However, this can be overriden by LOAD_TEST_THROUGHPUT env var.
|
||||
throughput := 10
|
||||
if throughputEnv := os.Getenv("LOAD_TEST_THROUGHPUT"); throughputEnv != "" {
|
||||
if newThroughput, err := strconv.Atoi(throughputEnv); err == nil {
|
||||
throughput = newThroughput
|
||||
}
|
||||
}
|
||||
|
||||
// Simulate lifetime of RC:
|
||||
// * create with initial size
|
||||
// * scale RC to a random size and list all pods
|
||||
@ -155,17 +165,17 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
||||
|
||||
// We would like to spread creating replication controllers over time
|
||||
// to make it possible to create/schedule them in the meantime.
|
||||
// Currently we assume 10 pods/second average throughput.
|
||||
// Currently we assume <throughput> pods/second average throughput.
|
||||
// We may want to revisit it in the future.
|
||||
creatingTime := time.Duration(totalPods/10) * time.Second
|
||||
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
||||
createAllRC(configs, creatingTime)
|
||||
By("============================================================================")
|
||||
|
||||
// We would like to spread scaling replication controllers over time
|
||||
// to make it possible to create/schedule & delete them in the meantime.
|
||||
// Currently we assume that 10 pods/second average throughput.
|
||||
// Currently we assume that <throughput> pods/second average throughput.
|
||||
// The expected number of created/deleted pods is less than totalPods/3.
|
||||
scalingTime := time.Duration(totalPods/30) * time.Second
|
||||
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
|
||||
scaleAllRC(configs, scalingTime)
|
||||
By("============================================================================")
|
||||
|
||||
@ -173,9 +183,9 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
||||
By("============================================================================")
|
||||
|
||||
// Cleanup all created replication controllers.
|
||||
// Currently we assume 10 pods/second average deletion throughput.
|
||||
// Currently we assume <throughput> pods/second average deletion throughput.
|
||||
// We may want to revisit it in the future.
|
||||
deletingTime := time.Duration(totalPods/10) * time.Second
|
||||
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
||||
deleteAllRC(configs, deletingTime)
|
||||
if createServices == "true" {
|
||||
for _, service := range services {
|
||||
|
Loading…
Reference in New Issue
Block a user