Merge pull request #42048 from Crassirostris/cluster-logging-load-tests-parameters

Automatic merge from submit-queue (batch tested with PRs 41980, 42192, 42223, 41822, 42048)

Adjust parameters of GCL cluster logging load tests

This PR increases the amount of logs produced in load tests to match the number of nodes and provide the predictable load of 100 KB/sec on each node.

Also this PR reduces in half amount of time, given for ingesting logs.
This commit is contained in:
Kubernetes Submit Queue 2017-03-02 00:59:23 -08:00 committed by GitHub
commit d6528596a3

View File

@ -40,11 +40,12 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
gclLogsProvider, err := newGclLogsProvider(f)
framework.ExpectNoError(err, "Failed to create GCL logs provider")
podCount := 30
nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)
podCount := 30 * nodeCount
loggingDuration := 10 * time.Minute
linesPerSecond := 1000
linesPerSecond := 1000 * nodeCount
linesPerPod := linesPerSecond * int(loggingDuration.Seconds()) / podCount
ingestionTimeout := 1 * time.Hour
ingestionTimeout := 30 * time.Minute
By("Running logs generator pods")
pods := []*loggingPod{}
@ -78,11 +79,12 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
gclLogsProvider, err := newGclLogsProvider(f)
framework.ExpectNoError(err, "Failed to create GCL logs provider")
maxPodCount := 10
nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)
maxPodCount := 10 * nodeCount
jobDuration := 1 * time.Minute
linesPerPodPerSecond := 10
testDuration := 1 * time.Hour
ingestionTimeout := 1 * time.Hour
linesPerPodPerSecond := 100
testDuration := 10 * time.Minute
ingestionTimeout := 30 * time.Minute
podRunDelay := time.Duration(int64(jobDuration) / int64(maxPodCount))
podRunCount := int(testDuration.Seconds())/int(podRunDelay.Seconds()) - 1