mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Remove pvcLister from genericScheduler
PVCLister can be fetched from sharedInformerFactory.
This commit is contained in:
parent
1b8c7585f3
commit
42cfda2f94
@ -48,7 +48,6 @@ go_test(
|
||||
"//pkg/scheduler/framework/plugins/selectorspread:go_default_library",
|
||||
"//pkg/scheduler/framework/runtime:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1/fake:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/profile:go_default_library",
|
||||
|
@ -272,6 +272,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
fwk, err := st.NewFramework(
|
||||
test.registerPlugins,
|
||||
runtime.WithClientSet(client),
|
||||
runtime.WithInformerFactory(informerFactory),
|
||||
runtime.WithPodNominator(internalqueue.NewPodNominator()),
|
||||
)
|
||||
if err != nil {
|
||||
@ -285,7 +286,6 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
cache,
|
||||
emptySnapshot,
|
||||
extenders,
|
||||
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||
schedulerapi.DefaultPercentageOfNodesToScore)
|
||||
podIgnored := &v1.Pod{}
|
||||
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), podIgnored)
|
||||
|
@ -119,7 +119,6 @@ type genericScheduler struct {
|
||||
cache internalcache.Cache
|
||||
extenders []framework.Extender
|
||||
nodeInfoSnapshot *internalcache.Snapshot
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
percentageOfNodesToScore int32
|
||||
nextStartNodeIndex int
|
||||
}
|
||||
@ -138,7 +137,8 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
|
||||
trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name})
|
||||
defer trace.LogIfLong(100 * time.Millisecond)
|
||||
|
||||
if err := podPassesBasicChecks(pod, g.pvcLister); err != nil {
|
||||
pvcLister := prof.SharedInformerFactory().Core().V1().PersistentVolumeClaims().Lister()
|
||||
if err := podPassesBasicChecks(pod, pvcLister); err != nil {
|
||||
return result, err
|
||||
}
|
||||
trace.Step("Basic checks done")
|
||||
@ -628,13 +628,11 @@ func NewGenericScheduler(
|
||||
cache internalcache.Cache,
|
||||
nodeInfoSnapshot *internalcache.Snapshot,
|
||||
extenders []framework.Extender,
|
||||
pvcLister corelisters.PersistentVolumeClaimLister,
|
||||
percentageOfNodesToScore int32) ScheduleAlgorithm {
|
||||
return &genericScheduler{
|
||||
cache: cache,
|
||||
extenders: extenders,
|
||||
nodeInfoSnapshot: nodeInfoSnapshot,
|
||||
pvcLister: pvcLister,
|
||||
percentageOfNodesToScore: percentageOfNodesToScore,
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
@ -42,7 +41,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
@ -422,9 +420,9 @@ func TestGenericScheduler(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}},
|
||||
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault}}},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore"), Namespace: v1.NamespaceDefault},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
@ -474,9 +472,9 @@ func TestGenericScheduler(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}},
|
||||
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore"), Namespace: v1.NamespaceDefault},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
@ -728,10 +726,16 @@ func TestGenericScheduler(t *testing.T) {
|
||||
cache.AddNode(node)
|
||||
}
|
||||
|
||||
cs := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(cs, 0)
|
||||
for i := range test.pvcs {
|
||||
informerFactory.Core().V1().PersistentVolumeClaims().Informer().GetStore().Add(&test.pvcs[i])
|
||||
}
|
||||
snapshot := internalcache.NewSnapshot(test.pods, nodes)
|
||||
fwk, err := st.NewFramework(
|
||||
test.registerPlugins,
|
||||
frameworkruntime.WithSnapshotSharedLister(snapshot),
|
||||
frameworkruntime.WithInformerFactory(informerFactory),
|
||||
frameworkruntime.WithPodNominator(internalqueue.NewPodNominator()),
|
||||
)
|
||||
if err != nil {
|
||||
@ -741,19 +745,14 @@ func TestGenericScheduler(t *testing.T) {
|
||||
Framework: fwk,
|
||||
}
|
||||
|
||||
var pvcs []v1.PersistentVolumeClaim
|
||||
pvcs = append(pvcs, test.pvcs...)
|
||||
pvcLister := fakeframework.PersistentVolumeClaimLister(pvcs)
|
||||
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
snapshot,
|
||||
[]framework.Extender{},
|
||||
pvcLister,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore)
|
||||
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), test.pod)
|
||||
if !reflect.DeepEqual(err, test.wErr) {
|
||||
t.Errorf("want: %v, got: %v", test.wErr, err)
|
||||
if err != test.wErr && err.Error() != test.wErr.Error() {
|
||||
t.Errorf("Unexpected error: %v, expected: %v", err.Error(), test.wErr)
|
||||
}
|
||||
if test.expectedHosts != nil && !test.expectedHosts.Has(result.SuggestedHost) {
|
||||
t.Errorf("Expected: %s, got: %s", test.expectedHosts, result.SuggestedHost)
|
||||
@ -775,7 +774,7 @@ func makeScheduler(nodes []*v1.Node) *genericScheduler {
|
||||
s := NewGenericScheduler(
|
||||
cache,
|
||||
emptySnapshot,
|
||||
nil, nil,
|
||||
nil,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore)
|
||||
cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
|
||||
return s.(*genericScheduler)
|
||||
@ -1069,7 +1068,6 @@ func TestZeroRequest(t *testing.T) {
|
||||
nil,
|
||||
emptySnapshot,
|
||||
[]framework.Extender{},
|
||||
nil,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore).(*genericScheduler)
|
||||
scheduler.nodeInfoSnapshot = snapshot
|
||||
|
||||
|
@ -180,7 +180,6 @@ func (c *Configurator) create() (*Scheduler, error) {
|
||||
c.schedulerCache,
|
||||
c.nodeInfoSnapshot,
|
||||
extenders,
|
||||
c.informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||
c.percentageOfNodesToScore,
|
||||
)
|
||||
|
||||
|
@ -807,7 +807,12 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
||||
return true, b, nil
|
||||
})
|
||||
|
||||
fwk, _ := st.NewFramework(fns, frameworkruntime.WithClientSet(client), frameworkruntime.WithPodNominator(internalqueue.NewPodNominator()))
|
||||
fwk, _ := st.NewFramework(
|
||||
fns,
|
||||
frameworkruntime.WithClientSet(client),
|
||||
frameworkruntime.WithInformerFactory(informerFactory),
|
||||
frameworkruntime.WithPodNominator(internalqueue.NewPodNominator()),
|
||||
)
|
||||
prof := &profile.Profile{
|
||||
Framework: fwk,
|
||||
Recorder: &events.FakeRecorder{},
|
||||
@ -824,7 +829,6 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
||||
scache,
|
||||
internalcache.NewEmptySnapshot(),
|
||||
[]framework.Extender{},
|
||||
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
)
|
||||
|
||||
@ -858,6 +862,7 @@ func setupTestSchedulerWithVolumeBinding(volumeBinder scheduling.SchedulerVolume
|
||||
testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}}
|
||||
client := clientsetfake.NewSimpleClientset(&testNode, &testPVC)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
informerFactory.Core().V1().PersistentVolumeClaims().Informer().GetStore().Add(&testPVC)
|
||||
|
||||
fns := []st.RegisterPluginFunc{
|
||||
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
|
||||
@ -1172,7 +1177,6 @@ func TestSchedulerBinding(t *testing.T) {
|
||||
scache,
|
||||
nil,
|
||||
test.extenders,
|
||||
nil,
|
||||
0,
|
||||
)
|
||||
sched := Scheduler{
|
||||
|
Loading…
Reference in New Issue
Block a user