From 969d28b12b7402e760c6acaa65f2e6886bb3e261 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 25 Jan 2023 20:38:04 +0100 Subject: [PATCH 1/2] scheduler_perf: refactor common code --- .../scheduler_perf/scheduler_perf_test.go | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index e560f5a8a03..6f0f39bf6dc 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -758,6 +758,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [ // numPodsScheduledPerNamespace has all namespaces created in workload and the number of pods they (will) have. // All namespaces listed in numPodsScheduledPerNamespace will be cleaned up. numPodsScheduledPerNamespace := make(map[string]int) + b.Cleanup(func() { for namespace := range numPodsScheduledPerNamespace { if err := client.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil { @@ -816,15 +817,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [ if concreteOp.Namespace != nil { namespace = *concreteOp.Namespace } - if _, ok := numPodsScheduledPerNamespace[namespace]; !ok { - // The namespace has not created yet. - // So, creat that and register it to numPodsScheduledPerNamespace. - _, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) - if err != nil { - b.Fatalf("failed to create namespace for Pod: %v", namespace) - } - numPodsScheduledPerNamespace[namespace] = 0 - } + createNamespaceIfNotPresent(ctx, b, client, namespace, &numPodsScheduledPerNamespace) if concreteOp.PodTemplatePath == nil { concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath } @@ -1020,6 +1013,18 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [ return dataItems } +func createNamespaceIfNotPresent(ctx context.Context, b *testing.B, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) { + if _, ok := (*podsPerNamespace)[namespace]; !ok { + // The namespace has not created yet. + // So, create that and register it. + _, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) + if err != nil { + b.Fatalf("failed to create namespace for Pod: %v", namespace) + } + (*podsPerNamespace)[namespace] = 0 + } +} + type testDataCollector interface { run(ctx context.Context) collect() []DataItem From b3e0bc88640dd8df2cc976817d4c055bc84a9760 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 3 Feb 2023 16:13:20 +0100 Subject: [PATCH 2/2] scheduler_perf: let the test decide which informers are needed This will change when adding dynamic resource allocation test cases. Instead of changing mustSetupScheduler and StartScheduler for that, let's return the informer factory and create informers as needed in the test. --- test/integration/scheduler_perf/scheduler_perf_test.go | 8 +++++++- test/integration/scheduler_perf/util.go | 7 ++++--- test/integration/util/util.go | 5 ++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index 6f0f39bf6dc..596954c258d 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -750,7 +750,13 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [ b.Fatalf("validate scheduler config file failed: %v", err) } } - podInformer, client, dynClient := mustSetupScheduler(ctx, b, cfg) + informerFactory, client, dynClient := mustSetupScheduler(ctx, b, cfg) + + // Additional informers needed for testing. The pod informer was + // already created before (scheduler.NewInformerFactory) and the + // factory was started for it (mustSetupScheduler), therefore we don't + // need to start again. + podInformer := informerFactory.Core().V1().Pods() var mu sync.Mutex var dataItems []DataItem diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index ca94f5196b4..cc7e028e93d 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/dynamic" + "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -75,7 +76,7 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) { // remove resources after finished. // Notes on rate limiter: // - client rate limit is set to 5000. -func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration) (coreinformers.PodInformer, clientset.Interface, dynamic.Interface) { +func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) { // Run API server with minimimal logging by default. Can be raised with -v. framework.MinVerbosity = 0 @@ -112,10 +113,10 @@ func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSc // Not all config options will be effective but only those mostly related with scheduler performance will // be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`. - _, podInformer := util.StartScheduler(ctx, client, cfg, config) + _, informerFactory := util.StartScheduler(ctx, client, cfg, config) util.StartFakePVController(ctx, client) - return podInformer, client, dynClient + return informerFactory, client, dynClient } // Returns the list of scheduled pods in the specified namespaces. diff --git a/test/integration/util/util.go b/test/integration/util/util.go index b39ca1725c9..68442ab89e0 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -34,7 +34,6 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" - coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" restclient "k8s.io/client-go/rest" @@ -68,7 +67,7 @@ type ShutdownFunc func() // StartScheduler configures and starts a scheduler given a handle to the clientSet interface // and event broadcaster. It returns the running scheduler and podInformer. Background goroutines // will keep running until the context is canceled. -func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) (*scheduler.Scheduler, coreinformers.PodInformer) { +func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) (*scheduler.Scheduler, informers.SharedInformerFactory) { informerFactory := scheduler.NewInformerFactory(clientSet, 0) evtBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{ Interface: clientSet.EventsV1()}) @@ -100,7 +99,7 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf informerFactory.WaitForCacheSync(ctx.Done()) go sched.Run(ctx) - return sched, informerFactory.Core().V1().Pods() + return sched, informerFactory } // StartFakePVController is a simplified pv controller logic that sets PVC VolumeName and annotation for each PV binding.