mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-14 13:45:06 +00:00
scheduler_perf: work around incorrect gotestsum failure reports
Because Go does not a "pass" action for benchmarks (https://github.com/golang/go/issues/66825#issuecomment-2343229005), gotestsum reports a successful benchmark run as failed (https://github.com/gotestyourself/gotestsum/issues/413#issuecomment-2343206787). We can work around that in each benchmark and sub-benchmark by emitting the output line that `go test` expects on stdout from the test binary for success.
This commit is contained in:
@@ -29,6 +29,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -1115,6 +1116,32 @@ func featureGatesMerge(src map[featuregate.Feature]bool, overrides map[featurega
|
||||
return result
|
||||
}
|
||||
|
||||
// fixJSONOutput works around Go not emitting a "pass" action for
|
||||
// sub-benchmarks
|
||||
// (https://github.com/golang/go/issues/66825#issuecomment-2343229005), which
|
||||
// causes gotestsum to report a successful benchmark run as failed
|
||||
// (https://github.com/gotestyourself/gotestsum/issues/413#issuecomment-2343206787).
|
||||
//
|
||||
// It does this by printing the missing "PASS" output line that test2json
|
||||
// then converts into the "pass" action.
|
||||
func fixJSONOutput(b *testing.B) {
|
||||
if !slices.Contains(os.Args, "-test.v=test2json") {
|
||||
// Not printing JSON.
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
b.Cleanup(func() {
|
||||
if b.Failed() {
|
||||
// Really has failed, do nothing.
|
||||
return
|
||||
}
|
||||
// SYN gets injected when using -test.v=test2json, see
|
||||
// https://cs.opensource.google/go/go/+/refs/tags/go1.23.3:src/testing/testing.go;drc=87ec2c959c73e62bfae230ef7efca11ec2a90804;l=527
|
||||
fmt.Fprintf(os.Stderr, "%c--- PASS: %s (%.2fs)\n", 22 /* SYN */, b.Name(), time.Since(start).Seconds())
|
||||
})
|
||||
}
|
||||
|
||||
// RunBenchmarkPerfScheduling runs the scheduler performance benchmark tests.
|
||||
//
|
||||
// You can pass your own scheduler plugins via outOfTreePluginRegistry.
|
||||
@@ -1128,6 +1155,7 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin
|
||||
if err = validateTestCases(testCases); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
fixJSONOutput(b)
|
||||
|
||||
if testing.Short() {
|
||||
PerfSchedulingLabelFilter += ",+short"
|
||||
@@ -1147,11 +1175,13 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin
|
||||
dataItems := DataItems{Version: "v1"}
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.Name, func(b *testing.B) {
|
||||
fixJSONOutput(b)
|
||||
for _, w := range tc.Workloads {
|
||||
b.Run(w.Name, func(b *testing.B) {
|
||||
if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) {
|
||||
b.Skipf("disabled by label filter %q", PerfSchedulingLabelFilter)
|
||||
}
|
||||
fixJSONOutput(b)
|
||||
|
||||
featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates)
|
||||
informerFactory, tCtx := setupTestCase(b, tc, featureGates, output, outOfTreePluginRegistry)
|
||||
|
Reference in New Issue
Block a user