Move users of factory.NewConfigFactory to scheduler.New

This commit is contained in:
Guoliang Wang
2018-12-08 11:26:41 +08:00
parent 13e59ab9ad
commit 3c24c99b08
10 changed files with 131 additions and 258 deletions

View File

@@ -36,8 +36,6 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
@@ -179,33 +177,38 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
defaultBindTimeout := int64(30)
ss := &schedulerappconfig.Config{
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
SchedulerName: v1.DefaultSchedulerName,
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
Policy: &kubeschedulerconfig.SchedulerPolicySource{
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
Namespace: policyConfigMap.Namespace,
Name: policyConfigMap.Name,
},
sched, err := scheduler.New(clientSet,
informerFactory.Core().V1().Nodes(),
factory.NewPodInformer(clientSet, 0),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Apps().V1().ReplicaSets(),
informerFactory.Apps().V1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Storage().V1().StorageClasses(),
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
kubeschedulerconfig.SchedulerAlgorithmSource{
Policy: &kubeschedulerconfig.SchedulerPolicySource{
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
Namespace: policyConfigMap.Namespace,
Name: policyConfigMap.Name,
},
},
BindTimeoutSeconds: &defaultBindTimeout,
},
Client: clientSet,
InformerFactory: informerFactory,
PodInformer: factory.NewPodInformer(clientSet, 0),
EventClient: clientSet.CoreV1(),
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
Broadcaster: eventBroadcaster,
}
config, err := schedulerapp.NewSchedulerConfig(ss.Complete())
nil,
scheduler.WithName(v1.DefaultSchedulerName),
scheduler.WithHardPodAffinitySymmetricWeight(v1.DefaultHardPodAffinitySymmetricWeight),
scheduler.WithBindTimeoutSeconds(defaultBindTimeout),
)
if err != nil {
t.Fatalf("couldn't make scheduler config: %v", err)
}
config := sched.Config()
// Verify that the config is applied correctly.
schedPredicates := sets.NewString()
for k := range config.Algorithm.Predicates() {
@@ -242,29 +245,32 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
defaultBindTimeout := int64(30)
ss := &schedulerappconfig.Config{
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
SchedulerName: v1.DefaultSchedulerName,
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
Policy: &kubeschedulerconfig.SchedulerPolicySource{
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
Namespace: "non-existent-config",
Name: "non-existent-config",
},
_, err := scheduler.New(clientSet,
informerFactory.Core().V1().Nodes(),
factory.NewPodInformer(clientSet, 0),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Apps().V1().ReplicaSets(),
informerFactory.Apps().V1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Storage().V1().StorageClasses(),
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
kubeschedulerconfig.SchedulerAlgorithmSource{
Policy: &kubeschedulerconfig.SchedulerPolicySource{
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
Namespace: "non-existent-config",
Name: "non-existent-config",
},
},
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
BindTimeoutSeconds: &defaultBindTimeout,
},
Client: clientSet,
InformerFactory: informerFactory,
PodInformer: factory.NewPodInformer(clientSet, 0),
EventClient: clientSet.CoreV1(),
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
Broadcaster: eventBroadcaster,
}
nil,
scheduler.WithName(v1.DefaultSchedulerName),
scheduler.WithHardPodAffinitySymmetricWeight(v1.DefaultHardPodAffinitySymmetricWeight),
scheduler.WithBindTimeoutSeconds(defaultBindTimeout))
_, err := schedulerapp.NewSchedulerConfig(ss.Complete())
if err == nil {
t.Fatalf("Creation of scheduler didn't fail while the policy ConfigMap didn't exist.")
}
@@ -536,7 +542,7 @@ func TestMultiScheduler(t *testing.T) {
go podInformer2.Informer().Run(stopCh)
informerFactory2.Start(stopCh)
sched2, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig2}, nil...)
sched2 := scheduler.NewFromConfig(schedulerConfig2)
sched2.Run()
// 6. **check point-2**: