mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #128834 from pohly/scheduler-perf-pass-workaround
scheduler_perf: fix and enhance reporting
This commit is contained in:
commit
40f222b620
@ -81,6 +81,8 @@ fi
|
|||||||
# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
|
# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
|
||||||
# set.
|
# set.
|
||||||
KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
|
KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
|
||||||
|
# Set to 'false' to disable reduction of the JUnit file to only the top level tests.
|
||||||
|
KUBE_PRUNE_JUNIT_TESTS=${KUBE_PRUNE_JUNIT_TESTS:-true}
|
||||||
|
|
||||||
kube::test::usage() {
|
kube::test::usage() {
|
||||||
kube::log::usage_from_stdin <<EOF
|
kube::log::usage_from_stdin <<EOF
|
||||||
@ -234,7 +236,7 @@ runTests() {
|
|||||||
&& rc=$? || rc=$?
|
&& rc=$? || rc=$?
|
||||||
|
|
||||||
if [[ -n "${junit_filename_prefix}" ]]; then
|
if [[ -n "${junit_filename_prefix}" ]]; then
|
||||||
prune-junit-xml "${junit_filename_prefix}.xml"
|
prune-junit-xml -prune-tests="${KUBE_PRUNE_JUNIT_TESTS}" "${junit_filename_prefix}.xml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ ${KUBE_COVER} =~ ^[yY]$ ]]; then
|
if [[ ${KUBE_COVER} =~ ^[yY]$ ]]; then
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -207,9 +208,9 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var UseTestingLog *bool
|
var UseTestingLog bool
|
||||||
var PerfSchedulingLabelFilter *string
|
var PerfSchedulingLabelFilter string
|
||||||
var TestSchedulingLabelFilter *string
|
var TestSchedulingLabelFilter string
|
||||||
|
|
||||||
// InitTests should be called in a TestMain in each config subdirectory.
|
// InitTests should be called in a TestMain in each config subdirectory.
|
||||||
func InitTests() error {
|
func InitTests() error {
|
||||||
@ -235,9 +236,9 @@ func InitTests() error {
|
|||||||
"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||||
"Options are:\n"+strings.Join(LoggingFeatureGate.KnownFeatures(), "\n"))
|
"Options are:\n"+strings.Join(LoggingFeatureGate.KnownFeatures(), "\n"))
|
||||||
|
|
||||||
UseTestingLog = flag.Bool("use-testing-log", false, "Write log entries with testing.TB.Log. This is more suitable for unit testing and debugging, but less realistic in real benchmarks.")
|
flag.BoolVar(&UseTestingLog, "use-testing-log", false, "Write log entries with testing.TB.Log. This is more suitable for unit testing and debugging, but less realistic in real benchmarks.")
|
||||||
PerfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")
|
flag.StringVar(&PerfSchedulingLabelFilter, "perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")
|
||||||
TestSchedulingLabelFilter = flag.String("test-scheduling-label-filter", "integration-test,-performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by TestScheduling")
|
flag.StringVar(&TestSchedulingLabelFilter, "test-scheduling-label-filter", "integration-test,-performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by TestScheduling")
|
||||||
|
|
||||||
// This would fail if we hadn't removed the logging flags above.
|
// This would fail if we hadn't removed the logging flags above.
|
||||||
logsapi.AddGoFlags(LoggingConfig, flag.CommandLine)
|
logsapi.AddGoFlags(LoggingConfig, flag.CommandLine)
|
||||||
@ -988,7 +989,7 @@ func (scm stopCollectingMetricsOp) patchParams(_ *workload) (realOp, error) {
|
|||||||
|
|
||||||
func initTestOutput(tb testing.TB) io.Writer {
|
func initTestOutput(tb testing.TB) io.Writer {
|
||||||
var output io.Writer
|
var output io.Writer
|
||||||
if *UseTestingLog {
|
if UseTestingLog {
|
||||||
output = framework.NewTBWriter(tb)
|
output = framework.NewTBWriter(tb)
|
||||||
} else {
|
} else {
|
||||||
tmpDir := tb.TempDir()
|
tmpDir := tb.TempDir()
|
||||||
@ -1020,9 +1021,9 @@ func initTestOutput(tb testing.TB) io.Writer {
|
|||||||
var specialFilenameChars = regexp.MustCompile(`[^a-zA-Z0-9-_]`)
|
var specialFilenameChars = regexp.MustCompile(`[^a-zA-Z0-9-_]`)
|
||||||
|
|
||||||
func setupTestCase(t testing.TB, tc *testCase, featureGates map[featuregate.Feature]bool, output io.Writer, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) {
|
func setupTestCase(t testing.TB, tc *testCase, featureGates map[featuregate.Feature]bool, output io.Writer, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) {
|
||||||
tCtx := ktesting.Init(t, initoption.PerTestOutput(*UseTestingLog))
|
tCtx := ktesting.Init(t, initoption.PerTestOutput(UseTestingLog))
|
||||||
artifacts, doArtifacts := os.LookupEnv("ARTIFACTS")
|
artifacts, doArtifacts := os.LookupEnv("ARTIFACTS")
|
||||||
if !*UseTestingLog && doArtifacts {
|
if !UseTestingLog && doArtifacts {
|
||||||
// Reconfigure logging so that it goes to a separate file per
|
// Reconfigure logging so that it goes to a separate file per
|
||||||
// test instead of stderr. If the test passes, the file gets
|
// test instead of stderr. If the test passes, the file gets
|
||||||
// deleted. The overall output can be very large (> 200 MB for
|
// deleted. The overall output can be very large (> 200 MB for
|
||||||
@ -1115,6 +1116,32 @@ func featureGatesMerge(src map[featuregate.Feature]bool, overrides map[featurega
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fixJSONOutput works around Go not emitting a "pass" action for
|
||||||
|
// sub-benchmarks
|
||||||
|
// (https://github.com/golang/go/issues/66825#issuecomment-2343229005), which
|
||||||
|
// causes gotestsum to report a successful benchmark run as failed
|
||||||
|
// (https://github.com/gotestyourself/gotestsum/issues/413#issuecomment-2343206787).
|
||||||
|
//
|
||||||
|
// It does this by printing the missing "PASS" output line that test2json
|
||||||
|
// then converts into the "pass" action.
|
||||||
|
func fixJSONOutput(b *testing.B) {
|
||||||
|
if !slices.Contains(os.Args, "-test.v=test2json") {
|
||||||
|
// Not printing JSON.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
b.Cleanup(func() {
|
||||||
|
if b.Failed() {
|
||||||
|
// Really has failed, do nothing.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// SYN gets injected when using -test.v=test2json, see
|
||||||
|
// https://cs.opensource.google/go/go/+/refs/tags/go1.23.3:src/testing/testing.go;drc=87ec2c959c73e62bfae230ef7efca11ec2a90804;l=527
|
||||||
|
fmt.Fprintf(os.Stderr, "%c--- PASS: %s (%.2fs)\n", 22 /* SYN */, b.Name(), time.Since(start).Seconds())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// RunBenchmarkPerfScheduling runs the scheduler performance benchmark tests.
|
// RunBenchmarkPerfScheduling runs the scheduler performance benchmark tests.
|
||||||
//
|
//
|
||||||
// You can pass your own scheduler plugins via outOfTreePluginRegistry.
|
// You can pass your own scheduler plugins via outOfTreePluginRegistry.
|
||||||
@ -1128,11 +1155,12 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin
|
|||||||
if err = validateTestCases(testCases); err != nil {
|
if err = validateTestCases(testCases); err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
fixJSONOutput(b)
|
||||||
|
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
*PerfSchedulingLabelFilter += ",+short"
|
PerfSchedulingLabelFilter += ",+short"
|
||||||
}
|
}
|
||||||
testcaseLabelSelectors := strings.Split(*PerfSchedulingLabelFilter, ",")
|
testcaseLabelSelectors := strings.Split(PerfSchedulingLabelFilter, ",")
|
||||||
|
|
||||||
output := initTestOutput(b)
|
output := initTestOutput(b)
|
||||||
|
|
||||||
@ -1147,11 +1175,13 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin
|
|||||||
dataItems := DataItems{Version: "v1"}
|
dataItems := DataItems{Version: "v1"}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
b.Run(tc.Name, func(b *testing.B) {
|
b.Run(tc.Name, func(b *testing.B) {
|
||||||
|
fixJSONOutput(b)
|
||||||
for _, w := range tc.Workloads {
|
for _, w := range tc.Workloads {
|
||||||
b.Run(w.Name, func(b *testing.B) {
|
b.Run(w.Name, func(b *testing.B) {
|
||||||
if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) {
|
if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) {
|
||||||
b.Skipf("disabled by label filter %v", PerfSchedulingLabelFilter)
|
b.Skipf("disabled by label filter %q", PerfSchedulingLabelFilter)
|
||||||
}
|
}
|
||||||
|
fixJSONOutput(b)
|
||||||
|
|
||||||
featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates)
|
featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates)
|
||||||
informerFactory, tCtx := setupTestCase(b, tc, featureGates, output, outOfTreePluginRegistry)
|
informerFactory, tCtx := setupTestCase(b, tc, featureGates, output, outOfTreePluginRegistry)
|
||||||
@ -1244,16 +1274,16 @@ func RunIntegrationPerfScheduling(t *testing.T, configFile string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
*TestSchedulingLabelFilter += ",+short"
|
TestSchedulingLabelFilter += ",+short"
|
||||||
}
|
}
|
||||||
testcaseLabelSelectors := strings.Split(*TestSchedulingLabelFilter, ",")
|
testcaseLabelSelectors := strings.Split(TestSchedulingLabelFilter, ",")
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
for _, w := range tc.Workloads {
|
for _, w := range tc.Workloads {
|
||||||
t.Run(w.Name, func(t *testing.T) {
|
t.Run(w.Name, func(t *testing.T) {
|
||||||
if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) {
|
if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) {
|
||||||
t.Skipf("disabled by label filter %q", *TestSchedulingLabelFilter)
|
t.Skipf("disabled by label filter %q", TestSchedulingLabelFilter)
|
||||||
}
|
}
|
||||||
featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates)
|
featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates)
|
||||||
informerFactory, tCtx := setupTestCase(t, tc, featureGates, nil, nil)
|
informerFactory, tCtx := setupTestCase(t, tc, featureGates, nil, nil)
|
||||||
|
Loading…
Reference in New Issue
Block a user