Remove pvcLister from genericScheduler

PVCLister can be fetched from sharedInformerFactory.
This commit is contained in:
Wei Huang 2020-07-06 15:40:10 -07:00 committed by Wei Huang
parent 1b8c7585f3
commit 42cfda2f94
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
6 changed files with 23 additions and 25 deletions

View File

@ -48,7 +48,6 @@ go_test(
"//pkg/scheduler/framework/plugins/selectorspread:go_default_library", "//pkg/scheduler/framework/plugins/selectorspread:go_default_library",
"//pkg/scheduler/framework/runtime:go_default_library", "//pkg/scheduler/framework/runtime:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/framework/v1alpha1/fake:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/profile:go_default_library",

View File

@ -272,6 +272,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
fwk, err := st.NewFramework( fwk, err := st.NewFramework(
test.registerPlugins, test.registerPlugins,
runtime.WithClientSet(client), runtime.WithClientSet(client),
runtime.WithInformerFactory(informerFactory),
runtime.WithPodNominator(internalqueue.NewPodNominator()), runtime.WithPodNominator(internalqueue.NewPodNominator()),
) )
if err != nil { if err != nil {
@ -285,7 +286,6 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
cache, cache,
emptySnapshot, emptySnapshot,
extenders, extenders,
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
schedulerapi.DefaultPercentageOfNodesToScore) schedulerapi.DefaultPercentageOfNodesToScore)
podIgnored := &v1.Pod{} podIgnored := &v1.Pod{}
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), podIgnored) result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), podIgnored)

View File

@ -119,7 +119,6 @@ type genericScheduler struct {
cache internalcache.Cache cache internalcache.Cache
extenders []framework.Extender extenders []framework.Extender
nodeInfoSnapshot *internalcache.Snapshot nodeInfoSnapshot *internalcache.Snapshot
pvcLister corelisters.PersistentVolumeClaimLister
percentageOfNodesToScore int32 percentageOfNodesToScore int32
nextStartNodeIndex int nextStartNodeIndex int
} }
@ -138,7 +137,8 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name}) trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name})
defer trace.LogIfLong(100 * time.Millisecond) defer trace.LogIfLong(100 * time.Millisecond)
if err := podPassesBasicChecks(pod, g.pvcLister); err != nil { pvcLister := prof.SharedInformerFactory().Core().V1().PersistentVolumeClaims().Lister()
if err := podPassesBasicChecks(pod, pvcLister); err != nil {
return result, err return result, err
} }
trace.Step("Basic checks done") trace.Step("Basic checks done")
@ -628,13 +628,11 @@ func NewGenericScheduler(
cache internalcache.Cache, cache internalcache.Cache,
nodeInfoSnapshot *internalcache.Snapshot, nodeInfoSnapshot *internalcache.Snapshot,
extenders []framework.Extender, extenders []framework.Extender,
pvcLister corelisters.PersistentVolumeClaimLister,
percentageOfNodesToScore int32) ScheduleAlgorithm { percentageOfNodesToScore int32) ScheduleAlgorithm {
return &genericScheduler{ return &genericScheduler{
cache: cache, cache: cache,
extenders: extenders, extenders: extenders,
nodeInfoSnapshot: nodeInfoSnapshot, nodeInfoSnapshot: nodeInfoSnapshot,
pvcLister: pvcLister,
percentageOfNodesToScore: percentageOfNodesToScore, percentageOfNodesToScore: percentageOfNodesToScore,
} }
} }

View File

@ -20,7 +20,6 @@ import (
"context" "context"
"fmt" "fmt"
"math" "math"
"reflect"
"strconv" "strconv"
"testing" "testing"
"time" "time"
@ -42,7 +41,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/profile" "k8s.io/kubernetes/pkg/scheduler/profile"
@ -422,9 +420,9 @@ func TestGenericScheduler(t *testing.T) {
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}}, pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault}}},
pod: &v1.Pod{ pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}, ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore"), Namespace: v1.NamespaceDefault},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
@ -474,9 +472,9 @@ func TestGenericScheduler(t *testing.T) {
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}}, pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}},
pod: &v1.Pod{ pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}, ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore"), Namespace: v1.NamespaceDefault},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
@ -728,10 +726,16 @@ func TestGenericScheduler(t *testing.T) {
cache.AddNode(node) cache.AddNode(node)
} }
cs := clientsetfake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(cs, 0)
for i := range test.pvcs {
informerFactory.Core().V1().PersistentVolumeClaims().Informer().GetStore().Add(&test.pvcs[i])
}
snapshot := internalcache.NewSnapshot(test.pods, nodes) snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, err := st.NewFramework( fwk, err := st.NewFramework(
test.registerPlugins, test.registerPlugins,
frameworkruntime.WithSnapshotSharedLister(snapshot), frameworkruntime.WithSnapshotSharedLister(snapshot),
frameworkruntime.WithInformerFactory(informerFactory),
frameworkruntime.WithPodNominator(internalqueue.NewPodNominator()), frameworkruntime.WithPodNominator(internalqueue.NewPodNominator()),
) )
if err != nil { if err != nil {
@ -741,19 +745,14 @@ func TestGenericScheduler(t *testing.T) {
Framework: fwk, Framework: fwk,
} }
var pvcs []v1.PersistentVolumeClaim
pvcs = append(pvcs, test.pvcs...)
pvcLister := fakeframework.PersistentVolumeClaimLister(pvcs)
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
cache, cache,
snapshot, snapshot,
[]framework.Extender{}, []framework.Extender{},
pvcLister,
schedulerapi.DefaultPercentageOfNodesToScore) schedulerapi.DefaultPercentageOfNodesToScore)
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), test.pod) result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), test.pod)
if !reflect.DeepEqual(err, test.wErr) { if err != test.wErr && err.Error() != test.wErr.Error() {
t.Errorf("want: %v, got: %v", test.wErr, err) t.Errorf("Unexpected error: %v, expected: %v", err.Error(), test.wErr)
} }
if test.expectedHosts != nil && !test.expectedHosts.Has(result.SuggestedHost) { if test.expectedHosts != nil && !test.expectedHosts.Has(result.SuggestedHost) {
t.Errorf("Expected: %s, got: %s", test.expectedHosts, result.SuggestedHost) t.Errorf("Expected: %s, got: %s", test.expectedHosts, result.SuggestedHost)
@ -775,7 +774,7 @@ func makeScheduler(nodes []*v1.Node) *genericScheduler {
s := NewGenericScheduler( s := NewGenericScheduler(
cache, cache,
emptySnapshot, emptySnapshot,
nil, nil, nil,
schedulerapi.DefaultPercentageOfNodesToScore) schedulerapi.DefaultPercentageOfNodesToScore)
cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot) cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
return s.(*genericScheduler) return s.(*genericScheduler)
@ -1069,7 +1068,6 @@ func TestZeroRequest(t *testing.T) {
nil, nil,
emptySnapshot, emptySnapshot,
[]framework.Extender{}, []framework.Extender{},
nil,
schedulerapi.DefaultPercentageOfNodesToScore).(*genericScheduler) schedulerapi.DefaultPercentageOfNodesToScore).(*genericScheduler)
scheduler.nodeInfoSnapshot = snapshot scheduler.nodeInfoSnapshot = snapshot

View File

@ -180,7 +180,6 @@ func (c *Configurator) create() (*Scheduler, error) {
c.schedulerCache, c.schedulerCache,
c.nodeInfoSnapshot, c.nodeInfoSnapshot,
extenders, extenders,
c.informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
c.percentageOfNodesToScore, c.percentageOfNodesToScore,
) )

View File

@ -807,7 +807,12 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
return true, b, nil return true, b, nil
}) })
fwk, _ := st.NewFramework(fns, frameworkruntime.WithClientSet(client), frameworkruntime.WithPodNominator(internalqueue.NewPodNominator())) fwk, _ := st.NewFramework(
fns,
frameworkruntime.WithClientSet(client),
frameworkruntime.WithInformerFactory(informerFactory),
frameworkruntime.WithPodNominator(internalqueue.NewPodNominator()),
)
prof := &profile.Profile{ prof := &profile.Profile{
Framework: fwk, Framework: fwk,
Recorder: &events.FakeRecorder{}, Recorder: &events.FakeRecorder{},
@ -824,7 +829,6 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
scache, scache,
internalcache.NewEmptySnapshot(), internalcache.NewEmptySnapshot(),
[]framework.Extender{}, []framework.Extender{},
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
schedulerapi.DefaultPercentageOfNodesToScore, schedulerapi.DefaultPercentageOfNodesToScore,
) )
@ -858,6 +862,7 @@ func setupTestSchedulerWithVolumeBinding(volumeBinder scheduling.SchedulerVolume
testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}} testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}}
client := clientsetfake.NewSimpleClientset(&testNode, &testPVC) client := clientsetfake.NewSimpleClientset(&testNode, &testPVC)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
informerFactory.Core().V1().PersistentVolumeClaims().Informer().GetStore().Add(&testPVC)
fns := []st.RegisterPluginFunc{ fns := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
@ -1172,7 +1177,6 @@ func TestSchedulerBinding(t *testing.T) {
scache, scache,
nil, nil,
test.extenders, test.extenders,
nil,
0, 0,
) )
sched := Scheduler{ sched := Scheduler{