Merge pull request #117232 from pohly/scheduler-perf-code-cleanups

scheduler_perf: code cleanups
This commit is contained in:
Kubernetes Prow Robot 2023-05-03 09:54:13 -07:00 committed by GitHub
commit aece6838e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 27 additions and 16 deletions

View File

@ -760,7 +760,13 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
b.Fatalf("validate scheduler config file failed: %v", err)
}
}
podInformer, client, dynClient := mustSetupScheduler(ctx, b, cfg)
informerFactory, client, dynClient := mustSetupScheduler(ctx, b, cfg)
// Additional informers needed for testing. The pod informer was
// already created before (scheduler.NewInformerFactory) and the
// factory was started for it (mustSetupScheduler), therefore we don't
// need to start again.
podInformer := informerFactory.Core().V1().Pods()
var mu sync.Mutex
var dataItems []DataItem
@ -768,6 +774,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
// numPodsScheduledPerNamespace has all namespaces created in workload and the number of pods they (will) have.
// All namespaces listed in numPodsScheduledPerNamespace will be cleaned up.
numPodsScheduledPerNamespace := make(map[string]int)
b.Cleanup(func() {
for namespace := range numPodsScheduledPerNamespace {
if err := client.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil {
@ -826,15 +833,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
if concreteOp.Namespace != nil {
namespace = *concreteOp.Namespace
}
if _, ok := numPodsScheduledPerNamespace[namespace]; !ok {
// The namespace has not created yet.
// So, creat that and register it to numPodsScheduledPerNamespace.
_, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
if err != nil {
b.Fatalf("failed to create namespace for Pod: %v", namespace)
}
numPodsScheduledPerNamespace[namespace] = 0
}
createNamespaceIfNotPresent(ctx, b, client, namespace, &numPodsScheduledPerNamespace)
if concreteOp.PodTemplatePath == nil {
concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath
}
@ -1030,6 +1029,18 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
return dataItems
}
func createNamespaceIfNotPresent(ctx context.Context, b *testing.B, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) {
if _, ok := (*podsPerNamespace)[namespace]; !ok {
// The namespace has not created yet.
// So, create that and register it.
_, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
if err != nil {
b.Fatalf("failed to create namespace for Pod: %v", namespace)
}
(*podsPerNamespace)[namespace] = 0
}
}
type testDataCollector interface {
run(ctx context.Context)
collect() []DataItem

View File

@ -34,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
@ -75,7 +76,7 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) {
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration) (coreinformers.PodInformer, clientset.Interface, dynamic.Interface) {
func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
// Run API server with minimimal logging by default. Can be raised with -v.
framework.MinVerbosity = 0
@ -112,10 +113,10 @@ func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSc
// Not all config options will be effective but only those mostly related with scheduler performance will
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
_, podInformer := util.StartScheduler(ctx, client, cfg, config)
_, informerFactory := util.StartScheduler(ctx, client, cfg, config)
util.StartFakePVController(ctx, client)
return podInformer, client, dynClient
return informerFactory, client, dynClient
}
// Returns the list of scheduled pods in the specified namespaces.

View File

@ -34,7 +34,6 @@ import (
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
restclient "k8s.io/client-go/rest"
@ -68,7 +67,7 @@ type ShutdownFunc func()
// StartScheduler configures and starts a scheduler given a handle to the clientSet interface
// and event broadcaster. It returns the running scheduler and podInformer. Background goroutines
// will keep running until the context is canceled.
func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) (*scheduler.Scheduler, coreinformers.PodInformer) {
func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) (*scheduler.Scheduler, informers.SharedInformerFactory) {
informerFactory := scheduler.NewInformerFactory(clientSet, 0)
evtBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
Interface: clientSet.EventsV1()})
@ -100,7 +99,7 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf
informerFactory.WaitForCacheSync(ctx.Done())
go sched.Run(ctx)
return sched, informerFactory.Core().V1().Pods()
return sched, informerFactory
}
// StartFakePVController is a simplified pv controller logic that sets PVC VolumeName and annotation for each PV binding.