From ac3d43a8a65d5b946109167a1f4063d08c6759d5 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 18 Nov 2024 12:35:05 +0100 Subject: [PATCH] scheduler_perf: work around incorrect gotestsum failure reports Because Go does not a "pass" action for benchmarks (https://github.com/golang/go/issues/66825#issuecomment-2343229005), gotestsum reports a successful benchmark run as failed (https://github.com/gotestyourself/gotestsum/issues/413#issuecomment-2343206787). We can work around that in each benchmark and sub-benchmark by emitting the output line that `go test` expects on stdout from the test binary for success. --- .../scheduler_perf/scheduler_perf.go | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/test/integration/scheduler_perf/scheduler_perf.go b/test/integration/scheduler_perf/scheduler_perf.go index 0aabc57f5cd..a69100bff9e 100644 --- a/test/integration/scheduler_perf/scheduler_perf.go +++ b/test/integration/scheduler_perf/scheduler_perf.go @@ -29,6 +29,7 @@ import ( "os" "path" "regexp" + "slices" "strings" "sync" "testing" @@ -1115,6 +1116,32 @@ func featureGatesMerge(src map[featuregate.Feature]bool, overrides map[featurega return result } +// fixJSONOutput works around Go not emitting a "pass" action for +// sub-benchmarks +// (https://github.com/golang/go/issues/66825#issuecomment-2343229005), which +// causes gotestsum to report a successful benchmark run as failed +// (https://github.com/gotestyourself/gotestsum/issues/413#issuecomment-2343206787). +// +// It does this by printing the missing "PASS" output line that test2json +// then converts into the "pass" action. +func fixJSONOutput(b *testing.B) { + if !slices.Contains(os.Args, "-test.v=test2json") { + // Not printing JSON. + return + } + + start := time.Now() + b.Cleanup(func() { + if b.Failed() { + // Really has failed, do nothing. + return + } + // SYN gets injected when using -test.v=test2json, see + // https://cs.opensource.google/go/go/+/refs/tags/go1.23.3:src/testing/testing.go;drc=87ec2c959c73e62bfae230ef7efca11ec2a90804;l=527 + fmt.Fprintf(os.Stderr, "%c--- PASS: %s (%.2fs)\n", 22 /* SYN */, b.Name(), time.Since(start).Seconds()) + }) +} + // RunBenchmarkPerfScheduling runs the scheduler performance benchmark tests. // // You can pass your own scheduler plugins via outOfTreePluginRegistry. @@ -1128,6 +1155,7 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin if err = validateTestCases(testCases); err != nil { b.Fatal(err) } + fixJSONOutput(b) if testing.Short() { PerfSchedulingLabelFilter += ",+short" @@ -1147,11 +1175,13 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin dataItems := DataItems{Version: "v1"} for _, tc := range testCases { b.Run(tc.Name, func(b *testing.B) { + fixJSONOutput(b) for _, w := range tc.Workloads { b.Run(w.Name, func(b *testing.B) { if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) { b.Skipf("disabled by label filter %q", PerfSchedulingLabelFilter) } + fixJSONOutput(b) featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates) informerFactory, tCtx := setupTestCase(b, tc, featureGates, output, outOfTreePluginRegistry)