chore: call close framework when finishing

Signed-off-by: googs1025 <googs1025@gmail.com>
This commit is contained in:
googs1025 2024-07-03 14:18:25 +08:00
parent 7e1a5a0ea8
commit d4627f16a5
14 changed files with 98 additions and 42 deletions

View File

@ -482,7 +482,7 @@ func TestAddAllEventHandlers(t *testing.T) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0) informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(), 0)
schedulingQueue := queue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory) schedulingQueue := queue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)
testSched := Scheduler{ testSched := Scheduler{
StopEverything: ctx.Done(), StopEverything: ctx.Done(),

View File

@ -317,7 +317,7 @@ func TestSchedulerWithExtenders(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
var extenders []framework.Extender var extenders []framework.Extender

View File

@ -704,7 +704,7 @@ type Handle interface {
SharedInformerFactory() informers.SharedInformerFactory SharedInformerFactory() informers.SharedInformerFactory
// ResourceClaimInfos returns an assume cache of ResourceClaim objects // ResourceClaimCache returns an assume cache of ResourceClaim objects
// which gets populated by the shared informer factory and the dynamic resources // which gets populated by the shared informer factory and the dynamic resources
// plugin. // plugin.
ResourceClaimCache() *assumecache.AssumeCache ResourceClaimCache() *assumecache.AssumeCache

View File

@ -58,7 +58,7 @@ func TestDefaultBinder(t *testing.T) {
defer cancel() defer cancel()
var gotBinding *v1.Binding var gotBinding *v1.Binding
client := fake.NewSimpleClientset(testPod) client := fake.NewClientset(testPod)
client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
if action.GetSubresource() != "binding" { if action.GetSubresource() != "binding" {
return false, nil, nil return false, nil, nil

View File

@ -340,7 +340,7 @@ func TestPostFilter(t *testing.T) {
for _, pod := range tt.pods { for _, pod := range tt.pods {
podItems = append(podItems, *pod) podItems = append(podItems, *pod)
} }
cs := clientsetfake.NewSimpleClientset(&v1.PodList{Items: podItems}) cs := clientsetfake.NewClientset(&v1.PodList{Items: podItems})
informerFactory := informers.NewSharedInformerFactory(cs, 0) informerFactory := informers.NewSharedInformerFactory(cs, 0)
podInformer := informerFactory.Core().V1().Pods().Informer() podInformer := informerFactory.Core().V1().Pods().Informer()
podInformer.GetStore().Add(tt.pod) podInformer.GetStore().Add(tt.pod)
@ -1087,7 +1087,7 @@ func TestDryRunPreemption(t *testing.T) {
for _, n := range nodes { for _, n := range nodes {
objs = append(objs, n) objs = append(objs, n)
} }
informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objs...), 0) informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
parallelism := parallelize.DefaultParallelism parallelism := parallelize.DefaultParallelism
if tt.disableParallelism { if tt.disableParallelism {
// We need disableParallelism because of the non-deterministic nature // We need disableParallelism because of the non-deterministic nature
@ -1347,7 +1347,7 @@ func TestSelectBestCandidate(t *testing.T) {
for _, pod := range tt.pods { for _, pod := range tt.pods {
objs = append(objs, pod) objs = append(objs, pod)
} }
cs := clientsetfake.NewSimpleClientset(objs...) cs := clientsetfake.NewClientset(objs...)
informerFactory := informers.NewSharedInformerFactory(cs, 0) informerFactory := informers.NewSharedInformerFactory(cs, 0)
snapshot := internalcache.NewSnapshot(tt.pods, nodes) snapshot := internalcache.NewSnapshot(tt.pods, nodes)
logger, ctx := ktesting.NewTestContext(t) logger, ctx := ktesting.NewTestContext(t)
@ -1685,7 +1685,7 @@ func TestPreempt(t *testing.T) {
labelKeys := []string{"hostname", "zone", "region"} labelKeys := []string{"hostname", "zone", "region"}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
podInformer := informerFactory.Core().V1().Pods().Informer() podInformer := informerFactory.Core().V1().Pods().Informer()
podInformer.GetStore().Add(test.pod) podInformer.GetStore().Add(test.pod)

View File

@ -89,7 +89,7 @@ func TestPreScoreSkip(t *testing.T) {
_, ctx := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0) informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(tt.objs...), 0)
f, err := frameworkruntime.NewFramework(ctx, nil, nil, f, err := frameworkruntime.NewFramework(ctx, nil, nil,
frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)), frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)),
frameworkruntime.WithInformerFactory(informerFactory)) frameworkruntime.WithInformerFactory(informerFactory))
@ -576,7 +576,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
_, ctx := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0) informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(tt.objs...), 0)
f, err := frameworkruntime.NewFramework(ctx, nil, nil, f, err := frameworkruntime.NewFramework(ctx, nil, nil,
frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)), frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)),
frameworkruntime.WithInformerFactory(informerFactory)) frameworkruntime.WithInformerFactory(informerFactory))

View File

@ -42,7 +42,7 @@ func SetupPluginWithInformers(
objs []runtime.Object, objs []runtime.Object,
) framework.Plugin { ) framework.Plugin {
objs = append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, objs...) objs = append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, objs...)
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(objs...), 0) informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(objs...), 0)
fh, err := frameworkruntime.NewFramework(ctx, nil, nil, fh, err := frameworkruntime.NewFramework(ctx, nil, nil,
frameworkruntime.WithSnapshotSharedLister(sharedLister), frameworkruntime.WithSnapshotSharedLister(sharedLister),
frameworkruntime.WithInformerFactory(informerFactory)) frameworkruntime.WithInformerFactory(informerFactory))

View File

@ -333,7 +333,7 @@ func TestDryRunPreemption(t *testing.T) {
for _, n := range tt.nodes { for _, n := range tt.nodes {
objs = append(objs, n) objs = append(objs, n)
} }
informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objs...), 0) informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
parallelism := parallelize.DefaultParallelism parallelism := parallelize.DefaultParallelism
_, ctx := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
@ -437,7 +437,7 @@ func TestSelectCandidate(t *testing.T) {
for _, pod := range tt.testPods { for _, pod := range tt.testPods {
objs = append(objs, pod) objs = append(objs, pod)
} }
informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objs...), 0) informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
snapshot := internalcache.NewSnapshot(tt.testPods, nodes) snapshot := internalcache.NewSnapshot(tt.testPods, nodes)
_, ctx := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)

View File

@ -904,6 +904,11 @@ func TestNewFrameworkMultiPointExpansion(t *testing.T) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
fw, err := NewFramework(ctx, registry, &config.KubeSchedulerProfile{Plugins: tc.plugins}) fw, err := NewFramework(ctx, registry, &config.KubeSchedulerProfile{Plugins: tc.plugins})
defer func() {
if fw != nil {
_ = fw.Close()
}
}()
if err != nil { if err != nil {
if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) {
t.Fatalf("Unexpected error, got %v, expect: %s", err, tc.wantErr) t.Fatalf("Unexpected error, got %v, expect: %s", err, tc.wantErr)
@ -913,7 +918,6 @@ func TestNewFrameworkMultiPointExpansion(t *testing.T) {
t.Fatalf("Unexpected error, got %v, expect: %s", err, tc.wantErr) t.Fatalf("Unexpected error, got %v, expect: %s", err, tc.wantErr)
} }
} }
if tc.wantErr == "" { if tc.wantErr == "" {
if diff := cmp.Diff(tc.wantPlugins, fw.ListPlugins()); diff != "" { if diff := cmp.Diff(tc.wantPlugins, fw.ListPlugins()); diff != "" {
t.Fatalf("Unexpected eventToPlugin map (-want,+got):%s", diff) t.Fatalf("Unexpected eventToPlugin map (-want,+got):%s", diff)
@ -969,7 +973,9 @@ func TestPreEnqueuePlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("fail to create framework: %s", err) t.Fatalf("fail to create framework: %s", err)
} }
defer func() {
_ = f.Close()
}()
got := f.PreEnqueuePlugins() got := f.PreEnqueuePlugins()
if !reflect.DeepEqual(got, tt.want) { if !reflect.DeepEqual(got, tt.want) {
t.Errorf("PreEnqueuePlugins(): want %v, but got %v", tt.want, got) t.Errorf("PreEnqueuePlugins(): want %v, but got %v", tt.want, got)
@ -1092,7 +1098,9 @@ func TestRunPreScorePlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
state := framework.NewCycleState() state := framework.NewCycleState()
status := f.RunPreScorePlugins(ctx, state, nil, nil) status := f.RunPreScorePlugins(ctx, state, nil, nil)
if status.Code() != tt.wantStatusCode { if status.Code() != tt.wantStatusCode {
@ -1486,6 +1494,9 @@ func TestRunScorePlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
state := framework.NewCycleState() state := framework.NewCycleState()
state.SkipScorePlugins = tt.skippedPlugins state.SkipScorePlugins = tt.skippedPlugins
@ -1530,6 +1541,9 @@ func TestPreFilterPlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
state := framework.NewCycleState() state := framework.NewCycleState()
f.RunPreFilterPlugins(ctx, state, nil) f.RunPreFilterPlugins(ctx, state, nil)
@ -1719,6 +1733,9 @@ func TestRunPreFilterPlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
state := framework.NewCycleState() state := framework.NewCycleState()
result, status := f.RunPreFilterPlugins(ctx, state, nil) result, status := f.RunPreFilterPlugins(ctx, state, nil)
@ -1809,6 +1826,9 @@ func TestRunPreFilterExtensionRemovePod(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
state := framework.NewCycleState() state := framework.NewCycleState()
state.SkipFilterPlugins = tt.skippedPluginNames state.SkipFilterPlugins = tt.skippedPluginNames
@ -1893,6 +1913,9 @@ func TestRunPreFilterExtensionAddPod(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
state := framework.NewCycleState() state := framework.NewCycleState()
state.SkipFilterPlugins = tt.skippedPluginNames state.SkipFilterPlugins = tt.skippedPluginNames
@ -2096,6 +2119,9 @@ func TestFilterPlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("fail to create framework: %s", err) t.Fatalf("fail to create framework: %s", err)
} }
defer func() {
_ = f.Close()
}()
state := framework.NewCycleState() state := framework.NewCycleState()
state.SkipFilterPlugins = tt.skippedPlugins state.SkipFilterPlugins = tt.skippedPlugins
gotStatus := f.RunFilterPlugins(ctx, state, pod, nil) gotStatus := f.RunFilterPlugins(ctx, state, pod, nil)
@ -2220,6 +2246,9 @@ func TestPostFilterPlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("fail to create framework: %s", err) t.Fatalf("fail to create framework: %s", err)
} }
defer func() {
_ = f.Close()
}()
_, gotStatus := f.RunPostFilterPlugins(ctx, nil, pod, nil) _, gotStatus := f.RunPostFilterPlugins(ctx, nil, pod, nil)
if !reflect.DeepEqual(gotStatus, tt.wantStatus) { if !reflect.DeepEqual(gotStatus, tt.wantStatus) {
t.Errorf("Unexpected status. got: %v, want: %v", gotStatus, tt.wantStatus) t.Errorf("Unexpected status. got: %v, want: %v", gotStatus, tt.wantStatus)
@ -2371,6 +2400,9 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("fail to create framework: %s", err) t.Fatalf("fail to create framework: %s", err)
} }
defer func() {
_ = f.Close()
}()
tt.nodeInfo.SetNode(tt.node) tt.nodeInfo.SetNode(tt.node)
gotStatus := f.RunFilterPluginsWithNominatedPods(ctx, framework.NewCycleState(), tt.pod, tt.nodeInfo) gotStatus := f.RunFilterPluginsWithNominatedPods(ctx, framework.NewCycleState(), tt.pod, tt.nodeInfo)
if diff := cmp.Diff(gotStatus, tt.wantStatus, cmpOpts...); diff != "" { if diff := cmp.Diff(gotStatus, tt.wantStatus, cmpOpts...); diff != "" {
@ -2528,6 +2560,9 @@ func TestPreBindPlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("fail to create framework: %s", err) t.Fatalf("fail to create framework: %s", err)
} }
defer func() {
_ = f.Close()
}()
status := f.RunPreBindPlugins(ctx, nil, pod, "") status := f.RunPreBindPlugins(ctx, nil, pod, "")
@ -2683,6 +2718,9 @@ func TestReservePlugins(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile) f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
defer func() {
_ = f.Close()
}()
if err != nil { if err != nil {
t.Fatalf("fail to create framework: %s", err) t.Fatalf("fail to create framework: %s", err)
} }
@ -2811,6 +2849,9 @@ func TestPermitPlugins(t *testing.T) {
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile, f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile,
WithWaitingPods(NewWaitingPodsMap()), WithWaitingPods(NewWaitingPodsMap()),
) )
defer func() {
_ = f.Close()
}()
if err != nil { if err != nil {
t.Fatalf("fail to create framework: %s", err) t.Fatalf("fail to create framework: %s", err)
} }
@ -3000,6 +3041,9 @@ func TestRecordingMetrics(t *testing.T) {
cancel() cancel()
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
tt.action(f) tt.action(f)
@ -3113,6 +3157,9 @@ func TestRunBindPlugins(t *testing.T) {
cancel() cancel()
t.Fatal(err) t.Fatal(err)
} }
defer func() {
_ = fwk.Close()
}()
st := fwk.RunBindPlugins(context.Background(), state, pod, "") st := fwk.RunBindPlugins(context.Background(), state, pod, "")
if st.Code() != tt.wantStatus { if st.Code() != tt.wantStatus {
@ -3171,6 +3218,9 @@ func TestPermitWaitDurationMetric(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
f.RunPermitPlugins(ctx, nil, pod, "") f.RunPermitPlugins(ctx, nil, pod, "")
f.WaitOnPermit(ctx, pod) f.WaitOnPermit(ctx, pod)
@ -3229,6 +3279,9 @@ func TestWaitOnPermit(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
runPermitPluginsStatus := f.RunPermitPlugins(ctx, nil, pod, "") runPermitPluginsStatus := f.RunPermitPlugins(ctx, nil, pod, "")
if runPermitPluginsStatus.Code() != framework.Wait { if runPermitPluginsStatus.Code() != framework.Wait {
@ -3283,6 +3336,9 @@ func TestListPlugins(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err) t.Fatalf("Failed to create framework for testing: %v", err)
} }
defer func() {
_ = f.Close()
}()
got := f.ListPlugins() got := f.ListPlugins()
if diff := cmp.Diff(tt.want, got); diff != "" { if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("unexpected plugins (-want,+got):\n%s", diff) t.Errorf("unexpected plugins (-want,+got):\n%s", diff)

View File

@ -2039,7 +2039,7 @@ func TestPriorityQueue_NominatedPodDeleted(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t) logger, _ := ktesting.NewTestContext(t)
cs := fake.NewSimpleClientset(tt.podInfo.Pod) cs := fake.NewClientset(tt.podInfo.Pod)
informerFactory := informers.NewSharedInformerFactory(cs, 0) informerFactory := informers.NewSharedInformerFactory(cs, 0)
podLister := informerFactory.Core().V1().Pods().Lister() podLister := informerFactory.Core().V1().Pods().Lister()

View File

@ -38,7 +38,7 @@ func NewTestQueueWithObjects(
objs []runtime.Object, objs []runtime.Object,
opts ...Option, opts ...Option,
) *PriorityQueue { ) *PriorityQueue {
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(objs...), 0) informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(objs...), 0)
return NewTestQueueWithInformerFactory(ctx, lessFn, informerFactory, opts...) return NewTestQueueWithInformerFactory(ctx, lessFn, informerFactory, opts...)
} }

View File

@ -407,7 +407,7 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
// profiles, each with a different node in the filter configuration. // profiles, each with a different node in the filter configuration.
objs := append([]runtime.Object{ objs := append([]runtime.Object{
&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...) &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
client := clientsetfake.NewSimpleClientset(objs...) client := clientsetfake.NewClientset(objs...)
broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -547,7 +547,7 @@ func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) {
objs := []runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: fakeNamespace}}} objs := []runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: fakeNamespace}}}
objs = append(objs, initialNodes...) objs = append(objs, initialNodes...)
objs = append(objs, initialPods...) objs = append(objs, initialPods...)
client := clientsetfake.NewSimpleClientset(objs...) client := clientsetfake.NewClientset(objs...)
broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
@ -645,7 +645,7 @@ func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) {
func TestSchedulerScheduleOne(t *testing.T) { func TestSchedulerScheduleOne(t *testing.T) {
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}} testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
client := clientsetfake.NewSimpleClientset(&testNode) client := clientsetfake.NewClientset(&testNode)
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
errS := errors.New("scheduler") errS := errors.New("scheduler")
errB := errors.New("binder") errB := errors.New("binder")
@ -760,7 +760,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
return pod.UID == gotAssumedPod.UID return pod.UID == gotAssumedPod.UID
}, },
} }
client := clientsetfake.NewSimpleClientset(item.sendPod) client := clientsetfake.NewClientset(item.sendPod)
client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
if action.GetSubresource() != "binding" { if action.GetSubresource() != "binding" {
return false, nil, nil return false, nil, nil
@ -1031,7 +1031,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
tf.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"), tf.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
} }
informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objects...), 0) informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objects...), 0)
scheduler, _, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, nil, fns...) scheduler, _, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, nil, fns...)
queuedPodStore.Add(podWithTooBigResourceRequests) queuedPodStore.Add(podWithTooBigResourceRequests)
@ -1061,7 +1061,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
findErr := fmt.Errorf("find err") findErr := fmt.Errorf("find err")
assumeErr := fmt.Errorf("assume err") assumeErr := fmt.Errorf("assume err")
bindErr := fmt.Errorf("bind err") bindErr := fmt.Errorf("bind err")
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
@ -1253,7 +1253,7 @@ func TestSchedulerBinding(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
pod := st.MakePod().Name(test.podName).Obj() pod := st.MakePod().Name(test.podName).Obj()
defaultBound := false defaultBound := false
client := clientsetfake.NewSimpleClientset(pod) client := clientsetfake.NewClientset(pod)
client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "binding" { if action.GetSubresource() == "binding" {
defaultBound = true defaultBound = true
@ -2466,7 +2466,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
cache.AddNode(logger, node) cache.AddNode(logger, node)
} }
cs := clientsetfake.NewSimpleClientset() cs := clientsetfake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(cs, 0) informerFactory := informers.NewSharedInformerFactory(cs, 0)
for _, pvc := range test.pvcs { for _, pvc := range test.pvcs {
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volume.AnnBindCompleted, "true") metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volume.AnnBindCompleted, "true")
@ -2784,7 +2784,7 @@ func TestZeroRequest(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
snapshot := internalcache.NewSnapshot(test.pods, test.nodes) snapshot := internalcache.NewSnapshot(test.pods, test.nodes)
@ -3187,7 +3187,7 @@ func Test_prioritizeNodes(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -3383,7 +3383,7 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
// create three nodes in the cluster. // create three nodes in the cluster.
nodes := makeNodeList([]string{"node1", "node2", "node3"}) nodes := makeNodeList([]string{"node1", "node2", "node3"})
client := clientsetfake.NewSimpleClientset(test.pod) client := clientsetfake.NewClientset(test.pod)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
cache := internalcache.New(ctx, time.Duration(0)) cache := internalcache.New(ctx, time.Duration(0))
for _, n := range nodes { for _, n := range nodes {
@ -3534,7 +3534,7 @@ func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queue
// scache: scheduler cache that might contain assumed pods. // scache: scheduler cache that might contain assumed pods.
func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) { func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
bindingChan := make(chan *v1.Binding, 1) bindingChan := make(chan *v1.Binding, 1)
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
var b *v1.Binding var b *v1.Binding
if action.GetSubresource() == "binding" { if action.GetSubresource() == "binding" {
@ -3552,7 +3552,7 @@ func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clien
} }
if informerFactory == nil { if informerFactory == nil {
informerFactory = informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0) informerFactory = informers.NewSharedInformerFactory(clientsetfake.NewClientset(), 0)
} }
schedulingQueue := internalqueue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory) schedulingQueue := internalqueue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)
waitingPods := frameworkruntime.NewWaitingPodsMap() waitingPods := frameworkruntime.NewWaitingPodsMap()
@ -3604,7 +3604,7 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volu
scache := internalcache.New(ctx, 10*time.Minute) scache := internalcache.New(ctx, 10*time.Minute)
scache.AddNode(logger, &testNode) scache.AddNode(logger, &testNode)
testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}} testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}}
client := clientsetfake.NewSimpleClientset(&testNode, &testPVC) client := clientsetfake.NewClientset(&testNode, &testPVC)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims() pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
pvcInformer.Informer().GetStore().Add(&testPVC) pvcInformer.Informer().GetStore().Add(&testPVC)

View File

@ -180,7 +180,7 @@ func TestSchedulerCreation(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
client := fake.NewSimpleClientset() client := fake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
@ -277,7 +277,7 @@ func TestFailureHandler(t *testing.T) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}}) client := fake.NewClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
podInformer := informerFactory.Core().V1().Pods() podInformer := informerFactory.Core().V1().Pods()
// Need to add/update/delete testPod to the store. // Need to add/update/delete testPod to the store.
@ -337,7 +337,7 @@ func TestFailureHandler_PodAlreadyBound(t *testing.T) {
nodeFoo := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} nodeFoo := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Node("foo").Obj() testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Node("foo").Obj()
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}}, &v1.NodeList{Items: []v1.Node{nodeFoo}}) client := fake.NewClientset(&v1.PodList{Items: []v1.Pod{*testPod}}, &v1.NodeList{Items: []v1.Node{nodeFoo}})
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
podInformer := informerFactory.Core().V1().Pods() podInformer := informerFactory.Core().V1().Pods()
// Need to add testPod to the store. // Need to add testPod to the store.
@ -384,7 +384,7 @@ func TestWithPercentageOfNodesToScore(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
client := fake.NewSimpleClientset() client := fake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
_, ctx := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
@ -910,7 +910,7 @@ func Test_UnionedGVKs(t *testing.T) {
func newFramework(ctx context.Context, r frameworkruntime.Registry, profile schedulerapi.KubeSchedulerProfile) (framework.Framework, error) { func newFramework(ctx context.Context, r frameworkruntime.Registry, profile schedulerapi.KubeSchedulerProfile) (framework.Framework, error) {
return frameworkruntime.NewFramework(ctx, r, &profile, return frameworkruntime.NewFramework(ctx, r, &profile,
frameworkruntime.WithSnapshotSharedLister(internalcache.NewSnapshot(nil, nil)), frameworkruntime.WithSnapshotSharedLister(internalcache.NewSnapshot(nil, nil)),
frameworkruntime.WithInformerFactory(informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)), frameworkruntime.WithInformerFactory(informers.NewSharedInformerFactory(fake.NewClientset(), 0)),
) )
} }
@ -994,7 +994,7 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
// Set up scheduler for the 3 nodes. // Set up scheduler for the 3 nodes.
objs := append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...) objs := append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewClientset(objs...)
informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) informerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: fakeClient.EventsV1()}) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: fakeClient.EventsV1()})
defer eventBroadcaster.Shutdown() defer eventBroadcaster.Shutdown()

View File

@ -226,7 +226,7 @@ func TestPatchPodStatus(t *testing.T) {
}{ }{
{ {
name: "Should update pod conditions successfully", name: "Should update pod conditions successfully",
client: clientsetfake.NewSimpleClientset(), client: clientsetfake.NewClientset(),
pod: v1.Pod{ pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: "ns", Namespace: "ns",
@ -250,7 +250,7 @@ func TestPatchPodStatus(t *testing.T) {
// which would fail the 2-way merge patch generation on Pod patches // which would fail the 2-way merge patch generation on Pod patches
// due to the mergeKey being the name field // due to the mergeKey being the name field
name: "Should update pod conditions successfully on a pod Spec with secrets with empty name", name: "Should update pod conditions successfully on a pod Spec with secrets with empty name",
client: clientsetfake.NewSimpleClientset(), client: clientsetfake.NewClientset(),
pod: v1.Pod{ pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: "ns", Namespace: "ns",
@ -273,7 +273,7 @@ func TestPatchPodStatus(t *testing.T) {
{ {
name: "retry patch request when an 'connection refused' error is returned", name: "retry patch request when an 'connection refused' error is returned",
client: func() *clientsetfake.Clientset { client: func() *clientsetfake.Clientset {
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
reqcount := 0 reqcount := 0
client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
@ -314,7 +314,7 @@ func TestPatchPodStatus(t *testing.T) {
{ {
name: "only 4 retries at most", name: "only 4 retries at most",
client: func() *clientsetfake.Clientset { client: func() *clientsetfake.Clientset {
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewClientset()
reqcount := 0 reqcount := 0
client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {