From ac379b038e9bfbf787cf69c040c3b0739978c3b3 Mon Sep 17 00:00:00 2001 From: Zhou Fang Date: Mon, 22 Aug 2016 16:04:32 -0700 Subject: [PATCH] add throughput in perf data and disable --cgroups-per-qos --- test/e2e_node/benchmark_util.go | 37 ++++++++++++++++--- test/e2e_node/density_test.go | 14 +++---- .../benchmark/jenkins-benchmark.properties | 2 +- 3 files changed, 38 insertions(+), 15 deletions(-) diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index 919e79e94ee..60713f29b2c 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -20,6 +20,7 @@ package e2e_node import ( "sort" + "time" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/test/e2e/framework" @@ -28,9 +29,9 @@ import ( const ( // TODO(coufon): be consistent with perf_util.go version - currentTimeSeriesVersion = "v1" - TimeSeriesTag = "[Result:TimeSeries]" - TimeSeriesEnd = "[Finish:TimeSeries]" + currentDataVersion = "v1" + TimeSeriesTag = "[Result:TimeSeries]" + TimeSeriesEnd = "[Finish:TimeSeries]" ) type NodeTimeSeries struct { @@ -48,7 +49,7 @@ func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]unvers "node": framework.TestContext.NodeName, "test": testName, }, - Version: currentTimeSeriesVersion, + Version: currentDataVersion, } // Attach operation time series. timeSeries.OperationData = map[string][]int64{ @@ -78,10 +79,10 @@ func getCumulatedPodTimeSeries(timePerPod map[string]unversioned.Time) []int64 { return timeSeries } -// getLatencyPerfData returns perf data from latency +// getLatencyPerfData returns perf data of pod startup latency. func getLatencyPerfData(latency framework.LatencyMetric, testName string) *perftype.PerfData { return &perftype.PerfData{ - Version: "v1", + Version: currentDataVersion, DataItems: []perftype.DataItem{ { Data: map[string]float64{ @@ -103,3 +104,27 @@ func getLatencyPerfData(latency framework.LatencyMetric, testName string) *perft }, } } + +// getThroughputPerfData returns perf data of pod creation startup throughput. +func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testName string) *perftype.PerfData { + return &perftype.PerfData{ + Version: currentDataVersion, + DataItems: []perftype.DataItem{ + { + Data: map[string]float64{ + "batch": float64(podsNr) / batchLag.Minutes(), + "single-worst": 1.0 / e2eLags[len(e2eLags)-1].Latency.Minutes(), + }, + Unit: "pods/min", + Labels: map[string]string{ + "datatype": "throughput", + "latencytype": "create-pod", + }, + }, + }, + Labels: map[string]string{ + "node": framework.TestContext.NodeName, + "test": testName, + }, + } +} diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index b9472eddd72..406eb637906 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -343,12 +343,13 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg sort.Sort(framework.LatencySlice(e2eLags)) batchLag := lastRunning.Time.Sub(firstCreate.Time) + testName := testArg.getTestName() // Log time series data. if isLogTimeSeries { - logDensityTimeSeries(rc, createTimes, watchTimes, testArg.getTestName()) + logDensityTimeSeries(rc, createTimes, watchTimes, testName) } // Log throughput data. - logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr) + logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testName) return batchLag, e2eLags } @@ -376,7 +377,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de batchlag, e2eLags := createBatchPodSequential(f, testPods) // Log throughput data. - logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr) + logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testArg.getTestName()) return batchlag, e2eLags } @@ -526,9 +527,6 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD } // logThroughput calculates and logs pod creation throughput. -func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int) { - throughputBatch := float64(podsNr) / batchLag.Minutes() - framework.Logf("Batch creation throughput is %.1f pods/min", throughputBatch) - throughputSequential := 1.0 / e2eLags[len(e2eLags)-1].Latency.Minutes() - framework.Logf("Sequential creation throughput is %.1f pods/min", throughputSequential) +func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testName string) { + framework.PrintPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testName)) } diff --git a/test/e2e_node/jenkins/benchmark/jenkins-benchmark.properties b/test/e2e_node/jenkins/benchmark/jenkins-benchmark.properties index 1fe8abd8566..ed589d0f3fe 100644 --- a/test/e2e_node/jenkins/benchmark/jenkins-benchmark.properties +++ b/test/e2e_node/jenkins/benchmark/jenkins-benchmark.properties @@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e CLEANUP=true GINKGO_FLAGS='--skip="\[Flaky\]"' SETUP_NODE=false -TEST_ARGS=--cgroups-per-qos=false +#TEST_ARGS=--cgroups-per-qos=false PARALLELISM=1