mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-08 12:41:58 +00:00
Switch scheduler to use generated listers/informers
Where possible, switch the scheduler to use generated listers and informers. There are still some places where it probably makes more sense to use one-off reflectors/informers (listing/watching just a single node, listing/watching scheduled & unscheduled pods using a field selector).
This commit is contained in:
@@ -16,6 +16,7 @@ go_library(
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
@@ -43,6 +44,7 @@ go_test(
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@@ -74,7 +75,10 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
podCreator.CreatePods()
|
||||
|
||||
for {
|
||||
scheduled := schedulerConfigFactory.GetScheduledPodListerIndexer().List()
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numScheduledPods {
|
||||
break
|
||||
}
|
||||
@@ -89,7 +93,10 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled := schedulerConfigFactory.GetScheduledPodListerIndexer().List()
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numScheduledPods+b.N {
|
||||
break
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@@ -209,7 +210,10 @@ func schedulePods(config *testConfig) int32 {
|
||||
// Bake in time for the first pod scheduling event.
|
||||
for {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
scheduled := config.schedulerSupportFunctions.GetScheduledPodListerIndexer().List()
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beggining.
|
||||
if len(scheduled) > config.numPods/100 {
|
||||
@@ -224,7 +228,10 @@ func schedulePods(config *testConfig) int32 {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled := config.schedulerSupportFunctions.GetScheduledPodListerIndexer().List()
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// We will be completed when all pods are done being scheduled.
|
||||
// return the worst-case-scenario interval that was seen during this time.
|
||||
|
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
@@ -58,7 +59,19 @@ func mustSetupScheduler() (schedulerConfigurator scheduler.Configurator, destroy
|
||||
Burst: 5000,
|
||||
})
|
||||
|
||||
schedulerConfigurator = factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
schedulerConfigurator = factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
|
||||
@@ -70,11 +83,15 @@ func mustSetupScheduler() (schedulerConfigurator scheduler.Configurator, destroy
|
||||
glog.Fatalf("Error creating scheduler: %v", err)
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
informerFactory.Start(stop)
|
||||
|
||||
sched.Run()
|
||||
|
||||
destroyFunc = func() {
|
||||
glog.Infof("destroying")
|
||||
sched.StopEverything()
|
||||
close(stop)
|
||||
s.Close()
|
||||
glog.Infof("destroyed")
|
||||
}
|
||||
|
Reference in New Issue
Block a user