scheduler_perf: report data items as benchmark results

This replaces the pretty useless us/op metric (useless because it includes
setup and teardown times) with the same values that also get stored in the JSON
file.

The main advantage is that benchstat can be used to analyze and compare
results.
This commit is contained in:
Patrick Ohly 2023-01-27 14:17:23 +01:00
parent 961129c5f1
commit cc4bcd1d8e

View File

@ -661,7 +661,32 @@ func BenchmarkPerfScheduling(b *testing.B) {
for feature, flag := range tc.FeatureGates {
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
}
dataItems.DataItems = append(dataItems.DataItems, runWorkload(ctx, b, tc, w)...)
results := runWorkload(ctx, b, tc, w)
dataItems.DataItems = append(dataItems.DataItems, results...)
if len(results) > 0 {
// The default ns/op is not
// useful because it includes
// the time spent on
// initialization and shutdown. Here we suppress it.
b.ReportMetric(0, "ns/op")
// Instead, report the same
// results that also get stored
// in the JSON file.
for _, result := range results {
// For some metrics like
// scheduler_framework_extension_point_duration_seconds
// the actual value has some
// other unit. We patch the key
// to make it look right.
metric := strings.ReplaceAll(result.Labels["Metric"], "_seconds", "_"+result.Unit)
for key, value := range result.Data {
b.ReportMetric(value, metric+"/"+key)
}
}
}
// Reset metrics to prevent metrics generated in current workload gets
// carried over to the next workload.
legacyregistry.Reset()