mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
update scheduler to use schedulerName selector
This commit is contained in:
@@ -79,8 +79,8 @@ func Run(s *options.SchedulerServer) error {
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
|
||||
// cache only non-terminal pods
|
||||
podInformer := factory.NewPodInformer(kubeClient, 0)
|
||||
|
||||
podInformer := factory.NewPodInformer(kubeClient, 0, s.SchedulerName)
|
||||
// Apply algorithms based on feature gates.
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
|
||||
@@ -901,17 +901,9 @@ func (f *configFactory) getPluginArgs() (*PluginFactoryArgs, error) {
|
||||
}
|
||||
|
||||
func (f *configFactory) getNextPod() *v1.Pod {
|
||||
for {
|
||||
pod := cache.Pop(f.podQueue).(*v1.Pod)
|
||||
if f.ResponsibleForPod(pod) {
|
||||
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
|
||||
return pod
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *configFactory) ResponsibleForPod(pod *v1.Pod) bool {
|
||||
return f.schedulerName == pod.Spec.SchedulerName
|
||||
pod := cache.Pop(f.podQueue).(*v1.Pod)
|
||||
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
|
||||
return pod
|
||||
}
|
||||
|
||||
// unassignedNonTerminatedPod selects pods that are unassigned and non-terminal.
|
||||
@@ -1008,8 +1000,11 @@ func (i *podInformer) Lister() corelisters.PodLister {
|
||||
}
|
||||
|
||||
// NewPodInformer creates a shared index informer that returns only non-terminal pods.
|
||||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) coreinformers.PodInformer {
|
||||
selector := fields.ParseSelectorOrDie("status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration, schedulerName string) coreinformers.PodInformer {
|
||||
selector := fields.ParseSelectorOrDie(
|
||||
"spec.schedulerName=" + schedulerName +
|
||||
",status.phase!=" + string(v1.PodSucceeded) +
|
||||
",status.phase!=" + string(v1.PodFailed))
|
||||
lw := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), string(v1.ResourcePods), metav1.NamespaceAll, selector)
|
||||
return &podInformer{
|
||||
informer: cache.NewSharedIndexInformer(lw, &v1.Pod{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
|
||||
|
||||
@@ -363,97 +363,6 @@ func TestBind(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestResponsibleForPod tests if a pod with an annotation that should cause it to
|
||||
// be picked up by the default scheduler, is in fact picked by the default scheduler
|
||||
// Two schedulers are made in the test: one is default scheduler and other scheduler
|
||||
// is of name "foo-scheduler". A pod must be picked up by at most one of the two
|
||||
// schedulers.
|
||||
func TestResponsibleForPod(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
// factory of "default-scheduler"
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
factoryDefaultScheduler := NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
client,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
// factory of "foo-scheduler"
|
||||
factoryFooScheduler := NewConfigFactory(
|
||||
"foo-scheduler",
|
||||
client,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
// scheduler annotations to be tested
|
||||
schedulerFitsDefault := "default-scheduler"
|
||||
schedulerFitsFoo := "foo-scheduler"
|
||||
schedulerFitsNone := "bar-scheduler"
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pickedByDefault bool
|
||||
pickedByFoo bool
|
||||
}{
|
||||
{
|
||||
// pod with "spec.Schedulername=default-scheduler" should be picked
|
||||
// by the scheduler of name "default-scheduler", NOT by the one of name "foo-scheduler"
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: v1.PodSpec{SchedulerName: schedulerFitsDefault}},
|
||||
pickedByDefault: true,
|
||||
pickedByFoo: false,
|
||||
},
|
||||
{
|
||||
// pod with "spec.SchedulerName=foo-scheduler" should be NOT
|
||||
// be picked by the scheduler of name "default-scheduler", but by the one of name "foo-scheduler"
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: v1.PodSpec{SchedulerName: schedulerFitsFoo}},
|
||||
pickedByDefault: false,
|
||||
pickedByFoo: true,
|
||||
},
|
||||
{
|
||||
// pod with "spec.SchedulerName=foo-scheduler" should be NOT
|
||||
// be picked by niether the scheduler of name "default-scheduler" nor the one of name "foo-scheduler"
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: v1.PodSpec{SchedulerName: schedulerFitsNone}},
|
||||
pickedByDefault: false,
|
||||
pickedByFoo: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
podOfDefault := factoryDefaultScheduler.ResponsibleForPod(test.pod)
|
||||
podOfFoo := factoryFooScheduler.ResponsibleForPod(test.pod)
|
||||
results := []bool{podOfDefault, podOfFoo}
|
||||
expected := []bool{test.pickedByDefault, test.pickedByFoo}
|
||||
if !reflect.DeepEqual(results, expected) {
|
||||
t.Errorf("expected: {%v, %v}, got {%v, %v}", test.pickedByDefault, test.pickedByFoo, podOfDefault, podOfFoo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
|
||||
@@ -82,9 +82,6 @@ type Configurator interface {
|
||||
GetSchedulerName() string
|
||||
MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error)
|
||||
|
||||
// Probably doesn't need to be public. But exposed for now in case.
|
||||
ResponsibleForPod(pod *v1.Pod) bool
|
||||
|
||||
// Needs to be exposed for things like integration tests where we want to make fake nodes.
|
||||
GetNodeLister() corelisters.NodeLister
|
||||
GetClient() clientset.Interface
|
||||
|
||||
Reference in New Issue
Block a user