mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-13 13:55:41 +00:00
Merge pull request #117232 from pohly/scheduler-perf-code-cleanups
scheduler_perf: code cleanups
This commit is contained in:
commit
aece6838e8
@ -760,7 +760,13 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
|
|||||||
b.Fatalf("validate scheduler config file failed: %v", err)
|
b.Fatalf("validate scheduler config file failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
podInformer, client, dynClient := mustSetupScheduler(ctx, b, cfg)
|
informerFactory, client, dynClient := mustSetupScheduler(ctx, b, cfg)
|
||||||
|
|
||||||
|
// Additional informers needed for testing. The pod informer was
|
||||||
|
// already created before (scheduler.NewInformerFactory) and the
|
||||||
|
// factory was started for it (mustSetupScheduler), therefore we don't
|
||||||
|
// need to start again.
|
||||||
|
podInformer := informerFactory.Core().V1().Pods()
|
||||||
|
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var dataItems []DataItem
|
var dataItems []DataItem
|
||||||
@ -768,6 +774,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
|
|||||||
// numPodsScheduledPerNamespace has all namespaces created in workload and the number of pods they (will) have.
|
// numPodsScheduledPerNamespace has all namespaces created in workload and the number of pods they (will) have.
|
||||||
// All namespaces listed in numPodsScheduledPerNamespace will be cleaned up.
|
// All namespaces listed in numPodsScheduledPerNamespace will be cleaned up.
|
||||||
numPodsScheduledPerNamespace := make(map[string]int)
|
numPodsScheduledPerNamespace := make(map[string]int)
|
||||||
|
|
||||||
b.Cleanup(func() {
|
b.Cleanup(func() {
|
||||||
for namespace := range numPodsScheduledPerNamespace {
|
for namespace := range numPodsScheduledPerNamespace {
|
||||||
if err := client.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil {
|
if err := client.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil {
|
||||||
@ -826,15 +833,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
|
|||||||
if concreteOp.Namespace != nil {
|
if concreteOp.Namespace != nil {
|
||||||
namespace = *concreteOp.Namespace
|
namespace = *concreteOp.Namespace
|
||||||
}
|
}
|
||||||
if _, ok := numPodsScheduledPerNamespace[namespace]; !ok {
|
createNamespaceIfNotPresent(ctx, b, client, namespace, &numPodsScheduledPerNamespace)
|
||||||
// The namespace has not created yet.
|
|
||||||
// So, creat that and register it to numPodsScheduledPerNamespace.
|
|
||||||
_, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("failed to create namespace for Pod: %v", namespace)
|
|
||||||
}
|
|
||||||
numPodsScheduledPerNamespace[namespace] = 0
|
|
||||||
}
|
|
||||||
if concreteOp.PodTemplatePath == nil {
|
if concreteOp.PodTemplatePath == nil {
|
||||||
concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath
|
concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath
|
||||||
}
|
}
|
||||||
@ -1030,6 +1029,18 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload) [
|
|||||||
return dataItems
|
return dataItems
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createNamespaceIfNotPresent(ctx context.Context, b *testing.B, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) {
|
||||||
|
if _, ok := (*podsPerNamespace)[namespace]; !ok {
|
||||||
|
// The namespace has not created yet.
|
||||||
|
// So, create that and register it.
|
||||||
|
_, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("failed to create namespace for Pod: %v", namespace)
|
||||||
|
}
|
||||||
|
(*podsPerNamespace)[namespace] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type testDataCollector interface {
|
type testDataCollector interface {
|
||||||
run(ctx context.Context)
|
run(ctx context.Context)
|
||||||
collect() []DataItem
|
collect() []DataItem
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
@ -75,7 +76,7 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) {
|
|||||||
// remove resources after finished.
|
// remove resources after finished.
|
||||||
// Notes on rate limiter:
|
// Notes on rate limiter:
|
||||||
// - client rate limit is set to 5000.
|
// - client rate limit is set to 5000.
|
||||||
func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration) (coreinformers.PodInformer, clientset.Interface, dynamic.Interface) {
|
func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
|
||||||
// Run API server with minimimal logging by default. Can be raised with -v.
|
// Run API server with minimimal logging by default. Can be raised with -v.
|
||||||
framework.MinVerbosity = 0
|
framework.MinVerbosity = 0
|
||||||
|
|
||||||
@ -112,10 +113,10 @@ func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSc
|
|||||||
|
|
||||||
// Not all config options will be effective but only those mostly related with scheduler performance will
|
// Not all config options will be effective but only those mostly related with scheduler performance will
|
||||||
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
|
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
|
||||||
_, podInformer := util.StartScheduler(ctx, client, cfg, config)
|
_, informerFactory := util.StartScheduler(ctx, client, cfg, config)
|
||||||
util.StartFakePVController(ctx, client)
|
util.StartFakePVController(ctx, client)
|
||||||
|
|
||||||
return podInformer, client, dynClient
|
return informerFactory, client, dynClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the list of scheduled pods in the specified namespaces.
|
// Returns the list of scheduled pods in the specified namespaces.
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
@ -68,7 +67,7 @@ type ShutdownFunc func()
|
|||||||
// StartScheduler configures and starts a scheduler given a handle to the clientSet interface
|
// StartScheduler configures and starts a scheduler given a handle to the clientSet interface
|
||||||
// and event broadcaster. It returns the running scheduler and podInformer. Background goroutines
|
// and event broadcaster. It returns the running scheduler and podInformer. Background goroutines
|
||||||
// will keep running until the context is canceled.
|
// will keep running until the context is canceled.
|
||||||
func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) (*scheduler.Scheduler, coreinformers.PodInformer) {
|
func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConfig *restclient.Config, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) (*scheduler.Scheduler, informers.SharedInformerFactory) {
|
||||||
informerFactory := scheduler.NewInformerFactory(clientSet, 0)
|
informerFactory := scheduler.NewInformerFactory(clientSet, 0)
|
||||||
evtBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
evtBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||||
Interface: clientSet.EventsV1()})
|
Interface: clientSet.EventsV1()})
|
||||||
@ -100,7 +99,7 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf
|
|||||||
informerFactory.WaitForCacheSync(ctx.Done())
|
informerFactory.WaitForCacheSync(ctx.Done())
|
||||||
go sched.Run(ctx)
|
go sched.Run(ctx)
|
||||||
|
|
||||||
return sched, informerFactory.Core().V1().Pods()
|
return sched, informerFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartFakePVController is a simplified pv controller logic that sets PVC VolumeName and annotation for each PV binding.
|
// StartFakePVController is a simplified pv controller logic that sets PVC VolumeName and annotation for each PV binding.
|
||||||
|
Loading…
Reference in New Issue
Block a user