make scheduler_perf usable from other repositories

This commit is contained in:
Kensei Nakada 2023-12-01 12:42:10 +00:00
parent 1f07da7575
commit 5310abe14a
7 changed files with 1623 additions and 1560 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,104 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"context"
"testing"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/kubernetes/test/utils/ktesting"
)
func TestScheduling(t *testing.T) {
testCases, err := getTestCases(configFile)
if err != nil {
t.Fatal(err)
}
if err = validateTestCases(testCases); err != nil {
t.Fatal(err)
}
// Check for leaks at the very end.
framework.GoleakCheck(t)
// All integration test cases share the same etcd, similar to
// https://github.com/kubernetes/kubernetes/blob/18d05b646d09b2971dc5400bc288062b0414e8cf/test/integration/framework/etcd.go#L186-L222.
framework.StartEtcd(t, nil)
// Workloads with the same configuration share the same apiserver. For that
// we first need to determine what those different configs are.
var configs []schedulerConfig
for _, tc := range testCases {
tcEnabled := false
for _, w := range tc.Workloads {
if enabled(*testSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
tcEnabled = true
break
}
}
if !tcEnabled {
continue
}
exists := false
for _, config := range configs {
if config.equals(tc) {
exists = true
break
}
}
if !exists {
configs = append(configs, schedulerConfig{schedulerConfigPath: tc.SchedulerConfigPath, featureGates: tc.FeatureGates})
}
}
for _, config := range configs {
// Not a sub test because we don't have a good name for it.
func() {
_, ctx := ktesting.NewTestContext(t)
// No timeout here because the `go test -timeout` will ensure that
// the test doesn't get stuck forever.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for feature, flag := range config.featureGates {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, feature, flag)()
}
informerFactory, client, dynClient := setupClusterForWorkload(ctx, t, config.schedulerConfigPath, config.featureGates, nil)
for _, tc := range testCases {
if !config.equals(tc) {
// Runs with some other config.
continue
}
t.Run(tc.Name, func(t *testing.T) {
for _, w := range tc.Workloads {
t.Run(w.Name, func(t *testing.T) {
if !enabled(*testSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter)
}
_, ctx := ktesting.NewTestContext(t)
runWorkload(ctx, t, tc, w, informerFactory, client, dynClient, true)
})
}
})
}
}()
}
}

View File

@ -49,6 +49,7 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/kubernetes/test/integration/util"
testutils "k8s.io/kubernetes/test/utils"
@ -82,7 +83,7 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) {
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
// Run API server with minimimal logging by default. Can be raised with -v.
framework.MinVerbosity = 0
@ -126,7 +127,7 @@ func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSch
// Not all config options will be effective but only those mostly related with scheduler performance will
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
_, informerFactory := util.StartScheduler(ctx, client, cfg, config)
_, informerFactory := util.StartScheduler(ctx, client, cfg, config, outOfTreePluginRegistry)
util.StartFakePVController(ctx, client, informerFactory)
runGC := util.CreateGCController(ctx, tb, *cfg, informerFactory)
runNS := util.CreateNamespaceController(ctx, tb, *cfg, informerFactory)

View File

@ -67,6 +67,7 @@ import (
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing"
taintutils "k8s.io/kubernetes/pkg/util/taints"
@ -82,7 +83,7 @@ type ShutdownFunc func()
// StartScheduler configures and starts a scheduler given a handle to the clientSet interface
// and event broadcaster. It returns the running scheduler and podInformer. Background goroutines
// will keep running until the context is canceled.
func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) (*scheduler.Scheduler, informers.SharedInformerFactory) {
func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration, outOfTreePluginRegistry frameworkruntime.Registry) (*scheduler.Scheduler, informers.SharedInformerFactory) {
informerFactory := scheduler.NewInformerFactory(clientSet, 0)
evtBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
Interface: clientSet.EventsV1()})
@ -107,7 +108,9 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf
scheduler.WithPodMaxBackoffSeconds(cfg.PodMaxBackoffSeconds),
scheduler.WithPodInitialBackoffSeconds(cfg.PodInitialBackoffSeconds),
scheduler.WithExtenders(cfg.Extenders...),
scheduler.WithParallelism(cfg.Parallelism))
scheduler.WithParallelism(cfg.Parallelism),
scheduler.WithFrameworkOutOfTreeRegistry(outOfTreePluginRegistry),
)
if err != nil {
logger.Error(err, "Error creating scheduler")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)