mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-18 08:09:58 +00:00
Merge pull request #126886 from pohly/scheduler-perf-output
scheduler_perf: output
This commit is contained in:
commit
08dd9951f5
@ -150,7 +150,11 @@ func NewDefaultTestServerOptions() *TestServerInstanceOptions {
|
|||||||
// files that because Golang testing's call to os.Exit will not give a stop channel go routine
|
// files that because Golang testing's call to os.Exit will not give a stop channel go routine
|
||||||
// enough time to remove temporary files.
|
// enough time to remove temporary files.
|
||||||
func StartTestServer(t ktesting.TB, instanceOptions *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) {
|
func StartTestServer(t ktesting.TB, instanceOptions *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) {
|
||||||
tCtx := ktesting.Init(t)
|
// Some callers may have initialize ktesting already.
|
||||||
|
tCtx, ok := t.(ktesting.TContext)
|
||||||
|
if !ok {
|
||||||
|
tCtx = ktesting.Init(t)
|
||||||
|
}
|
||||||
|
|
||||||
if instanceOptions == nil {
|
if instanceOptions == nil {
|
||||||
instanceOptions = NewDefaultTestServerOptions()
|
instanceOptions = NewDefaultTestServerOptions()
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -146,7 +147,10 @@ func RunCustomEtcd(dataDir string, customFlags []string, output io.Writer) (url
|
|||||||
// try to exit etcd gracefully
|
// try to exit etcd gracefully
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cmd.Process.Signal(syscall.SIGTERM)
|
cmd.Process.Signal(syscall.SIGTERM)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
klog.Infof("etcd exited gracefully, context cancelled")
|
klog.Infof("etcd exited gracefully, context cancelled")
|
||||||
@ -156,6 +160,7 @@ func RunCustomEtcd(dataDir string, customFlags []string, output io.Writer) (url
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err := cmd.Wait()
|
err := cmd.Wait()
|
||||||
|
wg.Wait()
|
||||||
klog.Infof("etcd exit status: %v", err)
|
klog.Infof("etcd exit status: %v", err)
|
||||||
err = os.RemoveAll(etcdDataDir)
|
err = os.RemoveAll(etcdDataDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -18,6 +18,7 @@ package framework
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
@ -109,20 +110,19 @@ func (p *IntegrationTestNodePreparer) PrepareNodes(ctx context.Context, nextNode
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("Error creating node: %v", err)
|
return fmt.Errorf("creating node: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := waitListAllNodes(p.client)
|
nodes, err := waitListAllNodes(p.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("Error listing nodes: %v", err)
|
return fmt.Errorf("listing nodes: %w", err)
|
||||||
}
|
}
|
||||||
index := nextNodeIndex
|
index := nextNodeIndex
|
||||||
for _, v := range p.countToStrategy {
|
for _, v := range p.countToStrategy {
|
||||||
for i := 0; i < v.Count; i, index = i+1, index+1 {
|
for i := 0; i < v.Count; i, index = i+1, index+1 {
|
||||||
if err := testutils.DoPrepareNode(ctx, p.client, &nodes.Items[index], v.Strategy); err != nil {
|
if err := testutils.DoPrepareNode(ctx, p.client, &nodes.Items[index], v.Strategy); err != nil {
|
||||||
klog.Errorf("Aborting node preparation: %v", err)
|
return fmt.Errorf("aborting node preparation: %w", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,6 +102,15 @@ performance.
|
|||||||
During interactive debugging sessions it is possible to enable per-test output
|
During interactive debugging sessions it is possible to enable per-test output
|
||||||
via -use-testing-log.
|
via -use-testing-log.
|
||||||
|
|
||||||
|
Log output can be quite large, in particular when running the large benchmarks
|
||||||
|
and when not using -use-testing-log. For benchmarks, we want to produce that
|
||||||
|
log output in a realistic way (= write to disk using the normal logging
|
||||||
|
backends) and only capture the output of a specific test as part of the job
|
||||||
|
results when that test failed. Therefore each test redirects its own output if
|
||||||
|
the ARTIFACTS env variable is set to a `$ARTIFACTS/<test name>.log` file and
|
||||||
|
removes that file only if the test passed.
|
||||||
|
|
||||||
|
|
||||||
### Integration tests
|
### Integration tests
|
||||||
|
|
||||||
To run integration tests, use:
|
To run integration tests, use:
|
||||||
|
@ -23,23 +23,15 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
"k8s.io/component-base/featuregate"
|
|
||||||
"k8s.io/component-base/logs"
|
"k8s.io/component-base/logs"
|
||||||
logsapi "k8s.io/component-base/logs/api/v1"
|
logsapi "k8s.io/component-base/logs/api/v1"
|
||||||
_ "k8s.io/component-base/logs/json/register"
|
_ "k8s.io/component-base/logs/json/register"
|
||||||
"k8s.io/kubernetes/test/utils/ktesting"
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Run with -v=2, this is the default log level in production.
|
|
||||||
//
|
|
||||||
// In a PR this can be bumped up temporarily to run pull-kubernetes-scheduler-perf
|
|
||||||
// with more log output.
|
|
||||||
const defaultVerbosity = 2
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
// Run with -v=2, this is the default log level in production.
|
// Run with -v=2, this is the default log level in production.
|
||||||
ktesting.SetDefaultVerbosity(defaultVerbosity)
|
ktesting.SetDefaultVerbosity(DefaultLoggingVerbosity)
|
||||||
|
|
||||||
// test/integration/framework/flags.go unconditionally initializes the
|
// test/integration/framework/flags.go unconditionally initializes the
|
||||||
// logging flags. That's correct for most tests, but in the
|
// logging flags. That's correct for most tests, but in the
|
||||||
@ -56,21 +48,17 @@ func TestMain(m *testing.M) {
|
|||||||
})
|
})
|
||||||
flag.CommandLine = &fs
|
flag.CommandLine = &fs
|
||||||
|
|
||||||
featureGate := featuregate.NewFeatureGate()
|
flag.Var(LoggingFeatureGate, "feature-gate",
|
||||||
runtime.Must(logsapi.AddFeatureGates(featureGate))
|
|
||||||
flag.Var(featureGate, "feature-gate",
|
|
||||||
"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||||
"Options are:\n"+strings.Join(featureGate.KnownFeatures(), "\n"))
|
"Options are:\n"+strings.Join(LoggingFeatureGate.KnownFeatures(), "\n"))
|
||||||
c := logsapi.NewLoggingConfiguration()
|
|
||||||
c.Verbosity = defaultVerbosity
|
|
||||||
|
|
||||||
// This would fail if we hadn't removed the logging flags above.
|
// This would fail if we hadn't removed the logging flags above.
|
||||||
logsapi.AddGoFlags(c, flag.CommandLine)
|
logsapi.AddGoFlags(LoggingConfig, flag.CommandLine)
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
logs.InitLogs()
|
logs.InitLogs()
|
||||||
if err := logsapi.ValidateAndApply(c, featureGate); err != nil {
|
if err := logsapi.ValidateAndApply(LoggingConfig, LoggingFeatureGate); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -38,6 +39,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||||
@ -48,6 +50,7 @@ import (
|
|||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
|
logsapi "k8s.io/component-base/logs/api/v1"
|
||||||
"k8s.io/component-base/metrics/legacyregistry"
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
@ -94,6 +97,29 @@ const (
|
|||||||
pluginLabelName = "plugin"
|
pluginLabelName = "plugin"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Run with -v=2, this is the default log level in production.
|
||||||
|
//
|
||||||
|
// In a PR this can be bumped up temporarily to run pull-kubernetes-scheduler-perf
|
||||||
|
// with more log output.
|
||||||
|
const DefaultLoggingVerbosity = 2
|
||||||
|
|
||||||
|
var LoggingFeatureGate FeatureGateFlag
|
||||||
|
var LoggingConfig *logsapi.LoggingConfiguration
|
||||||
|
|
||||||
|
type FeatureGateFlag interface {
|
||||||
|
featuregate.FeatureGate
|
||||||
|
flag.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
f := featuregate.NewFeatureGate()
|
||||||
|
runtime.Must(logsapi.AddFeatureGates(f))
|
||||||
|
LoggingFeatureGate = f
|
||||||
|
|
||||||
|
LoggingConfig = logsapi.NewLoggingConfiguration()
|
||||||
|
LoggingConfig.Verbosity = DefaultLoggingVerbosity
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
defaultMetricsCollectorConfig = metricsCollectorConfig{
|
defaultMetricsCollectorConfig = metricsCollectorConfig{
|
||||||
Metrics: map[string][]*labelValues{
|
Metrics: map[string][]*labelValues{
|
||||||
@ -760,8 +786,62 @@ func initTestOutput(tb testing.TB) io.Writer {
|
|||||||
|
|
||||||
var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")
|
var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")
|
||||||
|
|
||||||
|
var specialFilenameChars = regexp.MustCompile(`[^a-zA-Z0-9-_]`)
|
||||||
|
|
||||||
func setupTestCase(t testing.TB, tc *testCase, output io.Writer, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) {
|
func setupTestCase(t testing.TB, tc *testCase, output io.Writer, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) {
|
||||||
tCtx := ktesting.Init(t, initoption.PerTestOutput(*useTestingLog))
|
tCtx := ktesting.Init(t, initoption.PerTestOutput(*useTestingLog))
|
||||||
|
artifacts, doArtifacts := os.LookupEnv("ARTIFACTS")
|
||||||
|
if !*useTestingLog && doArtifacts {
|
||||||
|
// Reconfigure logging so that it goes to a separate file per
|
||||||
|
// test instead of stderr. If the test passes, the file gets
|
||||||
|
// deleted. The overall output can be very large (> 200 MB for
|
||||||
|
// ci-benchmark-scheduler-perf-master). With this approach, we
|
||||||
|
// have log output for failures without having to store large
|
||||||
|
// amounts of data that no-one is looking at. The performance
|
||||||
|
// is the same as writing to stderr.
|
||||||
|
if err := logsapi.ResetForTest(LoggingFeatureGate); err != nil {
|
||||||
|
t.Fatalf("Failed to reset the logging configuration: %v", err)
|
||||||
|
}
|
||||||
|
logfileName := path.Join(artifacts, specialFilenameChars.ReplaceAllString(t.Name(), "_")+".log")
|
||||||
|
out, err := os.Create(logfileName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create per-test log output file: %v", err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
// Everything should have stopped by now, checked below
|
||||||
|
// by GoleakCheck (which runs first during test
|
||||||
|
// shutdown!). Therefore we can clean up. Errors get logged
|
||||||
|
// and fail the test, but cleanup tries to continue.
|
||||||
|
//
|
||||||
|
// Note that the race detector will flag any goroutine
|
||||||
|
// as causing a race if there is no explicit wait for
|
||||||
|
// that goroutine to stop. We know that they must have
|
||||||
|
// stopped (GoLeakCheck!) but the race detector
|
||||||
|
// doesn't.
|
||||||
|
//
|
||||||
|
// This is a major issue because many Kubernetes goroutines get
|
||||||
|
// started without waiting for them to stop :-(
|
||||||
|
if err := logsapi.ResetForTest(LoggingFeatureGate); err != nil {
|
||||||
|
t.Errorf("Failed to reset the logging configuration: %v", err)
|
||||||
|
}
|
||||||
|
if err := out.Close(); err != nil {
|
||||||
|
t.Errorf("Failed to close the per-test log output file: %s: %v", logfileName, err)
|
||||||
|
}
|
||||||
|
if !t.Failed() {
|
||||||
|
if err := os.Remove(logfileName); err != nil {
|
||||||
|
t.Errorf("Failed to remove the per-test log output file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
opts := &logsapi.LoggingOptions{
|
||||||
|
ErrorStream: out,
|
||||||
|
InfoStream: out,
|
||||||
|
}
|
||||||
|
if err := logsapi.ValidateAndApplyWithOptions(LoggingConfig, opts, LoggingFeatureGate); err != nil {
|
||||||
|
t.Fatalf("Failed to apply the per-test logging configuration: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that there are no leaked
|
// Ensure that there are no leaked
|
||||||
// goroutines. They could influence
|
// goroutines. They could influence
|
||||||
|
@ -59,6 +59,14 @@ func withTimeout(ctx context.Context, tb TB, timeout time.Duration, timeoutCause
|
|||||||
// No need to set a cause here. The cause or error of
|
// No need to set a cause here. The cause or error of
|
||||||
// the parent context will be used.
|
// the parent context will be used.
|
||||||
case <-after.C:
|
case <-after.C:
|
||||||
|
// Code using this tCtx may or may not log the
|
||||||
|
// information above when it runs into the
|
||||||
|
// cancellation. It's better if we do it, just to be on
|
||||||
|
// the safe side.
|
||||||
|
//
|
||||||
|
// Would be nice to log this with the source code location
|
||||||
|
// of our caller, but testing.Logf does not support that.
|
||||||
|
tb.Logf("\nWARNING: %s\n", timeoutCause)
|
||||||
cancel(canceledError(timeoutCause))
|
cancel(canceledError(timeoutCause))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
Loading…
Reference in New Issue
Block a user