diff --git a/test/integration/scheduler_perf/README.md b/test/integration/scheduler_perf/README.md index 0087f50088c..91cf677e4ad 100644 --- a/test/integration/scheduler_perf/README.md +++ b/test/integration/scheduler_perf/README.md @@ -48,7 +48,7 @@ as "fast": make test-integration WHAT=./test/integration/scheduler_perf ETCD_LOGLEVEL=warn KUBE_TEST_VMODULE="''" KUBE_TEST_ARGS="-run=^$$ -benchtime=1ns -bench=BenchmarkPerfScheduling -perf-scheduling-label-filter=performance,-fast" ``` -Once the benchmark is finished, JSON file with metrics is available in the current directory (test/integration/scheduler_perf). Look for `BenchmarkPerfScheduling_YYYY-MM-DDTHH:MM:SSZ.json`. +Once the benchmark is finished, JSON file with metrics is available in the current directory (test/integration/scheduler_perf). Look for `BenchmarkPerfScheduling_benchmark_YYYY-MM-DDTHH:MM:SSZ.json`. You can use `-data-items-dir` to generate the metrics file elsewhere. In case you want to run a specific test in the suite, you can specify the test through `-bench` flag: diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index 1a151a5c0d7..53023cb441f 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -724,7 +724,7 @@ func BenchmarkPerfScheduling(b *testing.B) { } }) } - if err := dataItems2JSONFile(dataItems, b.Name()); err != nil { + if err := dataItems2JSONFile(dataItems, b.Name()+"_benchmark"); err != nil { b.Fatalf("unable to write measured data %+v: %v", dataItems, err) } }