From 2d866ec2fc81d67103a7dea6d82c3b4b08d3b38d Mon Sep 17 00:00:00 2001 From: Kante Yin Date: Mon, 24 Apr 2023 14:22:51 +0800 Subject: [PATCH] Teardown only scheduler in integration tests Signed-off-by: Kante Yin --- test/integration/node/lifecycle_test.go | 4 - test/integration/podgc/podgc_test.go | 2 - test/integration/scheduler/bind/bind_test.go | 4 +- .../scheduler/extender/extender_test.go | 3 +- .../scheduler/filters/filters_test.go | 4 - .../scheduler/plugins/plugins_test.go | 714 ++++++++---------- .../scheduler/preemption/preemption_test.go | 22 +- test/integration/scheduler/queue_test.go | 8 +- test/integration/scheduler/scheduler_test.go | 8 +- .../scheduler/scoring/priorities_test.go | 12 +- .../integration/scheduler/taint/taint_test.go | 3 +- test/integration/util/util.go | 80 +- .../volumescheduling/volume_binding_test.go | 5 +- .../volume_capacity_priority_test.go | 3 +- 14 files changed, 377 insertions(+), 495 deletions(-) diff --git a/test/integration/node/lifecycle_test.go b/test/integration/node/lifecycle_test.go index 497517fda5f..2c92d9e6cb5 100644 --- a/test/integration/node/lifecycle_test.go +++ b/test/integration/node/lifecycle_test.go @@ -104,9 +104,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)() testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil) - - // Build clientset and informers for controllers. - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet // Build clientset and informers for controllers. @@ -263,7 +260,6 @@ func TestTaintBasedEvictions(t *testing.T) { podTolerations.SetExternalKubeClientSet(externalClientset) podTolerations.SetExternalKubeInformerFactory(externalInformers) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet // Start NodeLifecycleController for taint. diff --git a/test/integration/podgc/podgc_test.go b/test/integration/podgc/podgc_test.go index c3e5d15ecf1..58798943c06 100644 --- a/test/integration/podgc/podgc_test.go +++ b/test/integration/podgc/podgc_test.go @@ -77,7 +77,6 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) { t.Run(name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)() testCtx := setup(t, "podgc-orphaned") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet node := &v1.Node{ @@ -180,7 +179,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)() testCtx := setup(t, "podgc-out-of-service") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet node := &v1.Node{ diff --git a/test/integration/scheduler/bind/bind_test.go b/test/integration/scheduler/bind/bind_test.go index a633660aaea..c89346dfafe 100644 --- a/test/integration/scheduler/bind/bind_test.go +++ b/test/integration/scheduler/bind/bind_test.go @@ -28,9 +28,7 @@ import ( // TestDefaultBinder tests the binding process in the scheduler. func TestDefaultBinder(t *testing.T) { testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, "", nil), 0) - testutil.SyncInformerFactory(testCtx) - // Do not start scheduler routine. - defer testutil.CleanupTest(t, testCtx) + testutil.SyncSchedulerInformerFactory(testCtx) // Add a node. node, err := testutil.CreateNode(testCtx.ClientSet, st.MakeNode().Name("testnode").Obj()) diff --git a/test/integration/scheduler/extender/extender_test.go b/test/integration/scheduler/extender/extender_test.go index 76698aa40ae..1ba4a5614cb 100644 --- a/test/integration/scheduler/extender/extender_test.go +++ b/test/integration/scheduler/extender/extender_test.go @@ -354,9 +354,8 @@ func TestSchedulerExtender(t *testing.T) { } testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithExtenders(extenders...)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) DoTestPodScheduling(testCtx.NS, t, clientSet) } diff --git a/test/integration/scheduler/filters/filters_test.go b/test/integration/scheduler/filters/filters_test.go index a2f5aee00c3..475a065b281 100644 --- a/test/integration/scheduler/filters/filters_test.go +++ b/test/integration/scheduler/filters/filters_test.go @@ -66,7 +66,6 @@ var ( // anti-affinity predicate functions works correctly. func TestInterPodAffinity(t *testing.T) { testCtx := initTest(t, "") - defer testutils.CleanupTest(t, testCtx) // Add a few nodes with labels nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2) @@ -990,7 +989,6 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testCtx := initTest(t, "") - defer testutils.CleanupTest(t, testCtx) // Add a few nodes with labels nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2) @@ -1492,7 +1490,6 @@ func TestPodTopologySpreadFilter(t *testing.T) { testCtx := initTest(t, "pts-predicate") cs := testCtx.ClientSet ns := testCtx.NS.Name - defer testutils.CleanupTest(t, testCtx) for i := range tt.nodes { if _, err := createNode(cs, tt.nodes[i]); err != nil { @@ -1761,7 +1758,6 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tt.enableReadWriteOncePod)() testCtx := initTest(t, "scheduler-informer") - defer testutils.CleanupTest(t, testCtx) if tt.init != nil { if err := tt.init(testCtx.ClientSet, testCtx.NS.Name); err != nil { diff --git a/test/integration/scheduler/plugins/plugins_test.go b/test/integration/scheduler/plugins/plugins_test.go index df7e4b5ae1c..70496b40823 100644 --- a/test/integration/scheduler/plugins/plugins_test.go +++ b/test/integration/scheduler/plugins/plugins_test.go @@ -214,13 +214,6 @@ func (sp *ScorePlugin) Name() string { return scorePluginName } -// reset returns name of the score plugin. -func (sp *ScorePlugin) reset() { - sp.failScore = false - sp.numScoreCalled = 0 - sp.highScoreNode = "" -} - // Score returns the score of scheduling a pod on a specific node. func (sp *ScorePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) { curCalled := atomic.AddInt32(&sp.numScoreCalled, 1) @@ -246,12 +239,6 @@ func (sp *ScoreWithNormalizePlugin) Name() string { return scoreWithNormalizePluginName } -// reset returns name of the score plugin. -func (sp *ScoreWithNormalizePlugin) reset() { - sp.numScoreCalled = 0 - sp.numNormalizeScoreCalled = 0 -} - // Score returns the score of scheduling a pod on a specific node. func (sp *ScoreWithNormalizePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) { sp.numScoreCalled++ @@ -273,15 +260,6 @@ func (fp *FilterPlugin) Name() string { return filterPluginName } -// reset is used to reset filter plugin. -func (fp *FilterPlugin) reset() { - fp.numFilterCalled = 0 - fp.failFilter = false - if fp.numCalledPerPod != nil { - fp.numCalledPerPod = make(map[string]int) - } -} - // Filter is a test function that returns an error or nil, depending on the // value of "failFilter". func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { @@ -328,13 +306,6 @@ func (rp *ReservePlugin) Unreserve(ctx context.Context, state *framework.CycleSt } } -// reset used to reset internal counters. -func (rp *ReservePlugin) reset() { - rp.numReserveCalled = 0 - rp.numUnreserveCalled = 0 - rp.failReserve = false -} - // Name returns name of the plugin. func (*PreScorePlugin) Name() string { return preScorePluginName @@ -350,12 +321,6 @@ func (pfp *PreScorePlugin) PreScore(ctx context.Context, _ *framework.CycleState return nil } -// reset used to reset prescore plugin. -func (pfp *PreScorePlugin) reset() { - pfp.numPreScoreCalled = 0 - pfp.failPreScore = false -} - // Name returns name of the plugin. func (pp *PreBindPlugin) Name() string { return preBindPluginName @@ -377,15 +342,6 @@ func (pp *PreBindPlugin) PreBind(ctx context.Context, state *framework.CycleStat return nil } -// reset used to reset prebind plugin. -func (pp *PreBindPlugin) reset() { - pp.numPreBindCalled = 0 - pp.failPreBind = false - pp.rejectPreBind = false - pp.succeedOnRetry = false - pp.podUIDs = make(map[types.UID]struct{}) -} - const bindPluginAnnotation = "bindPluginName" func (bp *BindPlugin) Name() string { @@ -411,11 +367,6 @@ func (bp *BindPlugin) Bind(ctx context.Context, state *framework.CycleState, p * return bp.bindStatus } -// reset used to reset numBindCalled. -func (bp *BindPlugin) reset() { - bp.numBindCalled = 0 -} - // Name returns name of the plugin. func (pp *PostBindPlugin) Name() string { return pp.name @@ -429,11 +380,6 @@ func (pp *PostBindPlugin) PostBind(ctx context.Context, state *framework.CycleSt } } -// reset used to reset postbind plugin. -func (pp *PostBindPlugin) reset() { - pp.numPostBindCalled = 0 -} - // Name returns name of the plugin. func (pp *PreFilterPlugin) Name() string { return prefilterPluginName @@ -456,13 +402,6 @@ func (pp *PreFilterPlugin) PreFilter(ctx context.Context, state *framework.Cycle return nil, nil } -// reset used to reset prefilter plugin. -func (pp *PreFilterPlugin) reset() { - pp.numPreFilterCalled = 0 - pp.failPreFilter = false - pp.rejectPreFilter = false -} - // Name returns name of the plugin. func (pp *PostFilterPlugin) Name() string { return pp.name @@ -551,31 +490,9 @@ func (pp *PermitPlugin) rejectAllPods() { pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { wp.Reject(pp.name, "rejectAllPods") }) } -// reset used to reset permit plugin. -func (pp *PermitPlugin) reset() { - pp.numPermitCalled = 0 - pp.failPermit = false - pp.rejectPermit = false - pp.timeoutPermit = false - pp.waitAndRejectPermit = false - pp.waitAndAllowPermit = false - pp.cancelled = false - pp.waitingPod = "" - pp.allowingPod = "" - pp.rejectingPod = "" -} - // TestPreFilterPlugin tests invocation of prefilter plugins. func TestPreFilterPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a pre-filter plugin. - preFilterPlugin := &PreFilterPlugin{} - registry, prof := initRegistryAndConfig(t, preFilterPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "prefilter-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "prefilter-plugin", nil) tests := []struct { name string @@ -601,6 +518,15 @@ func TestPreFilterPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a pre-filter plugin. + preFilterPlugin := &PreFilterPlugin{} + registry, prof := initRegistryAndConfig(t, preFilterPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + preFilterPlugin.failPreFilter = test.fail preFilterPlugin.rejectPreFilter = test.reject // Create a best effort pod. @@ -627,9 +553,6 @@ func TestPreFilterPlugin(t *testing.T) { if preFilterPlugin.numPreFilterCalled == 0 { t.Errorf("Expected the prefilter plugin to be called.") } - - preFilterPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -713,8 +636,9 @@ func TestPostFilterPlugin(t *testing.T) { } var postFilterPluginName2 = postfilterPluginName + "2" + testContext := testutils.InitTestAPIServer(t, "post-filter", nil) - for i, tt := range tests { + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create a plugin registry for testing. Register a combination of filter and postFilter plugin. var ( @@ -771,15 +695,11 @@ func TestPostFilterPlugin(t *testing.T) { }, }}}) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, fmt.Sprintf("postfilter%v-", i), nil), - int(tt.numNodes), + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, int(tt.numNodes), scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry), ) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name})) @@ -820,14 +740,7 @@ func TestPostFilterPlugin(t *testing.T) { // TestScorePlugin tests invocation of score plugins. func TestScorePlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a score plugin. - scorePlugin := &ScorePlugin{} - registry, prof := initRegistryAndConfig(t, scorePlugin) - - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "score-plugin", nil), 10, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "score-plugin", nil) tests := []struct { name string @@ -845,6 +758,15 @@ func TestScorePlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a score plugin. + scorePlugin := &ScorePlugin{} + registry, prof := initRegistryAndConfig(t, scorePlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 10, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + scorePlugin.failScore = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -873,9 +795,6 @@ func TestScorePlugin(t *testing.T) { if numScoreCalled := atomic.LoadInt32(&scorePlugin.numScoreCalled); numScoreCalled == 0 { t.Errorf("Expected the score plugin to be called.") } - - scorePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -886,12 +805,10 @@ func TestNormalizeScorePlugin(t *testing.T) { scoreWithNormalizePlugin := &ScoreWithNormalizePlugin{} registry, prof := initRegistryAndConfig(t, scoreWithNormalizePlugin) - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "score-plugin", nil), 10, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "score-plugin", nil), 10, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) - // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name})) @@ -909,21 +826,11 @@ func TestNormalizeScorePlugin(t *testing.T) { if scoreWithNormalizePlugin.numNormalizeScoreCalled == 0 { t.Error("Expected the normalize score plugin to be called") } - - scoreWithNormalizePlugin.reset() } // TestReservePlugin tests invocation of reserve plugins. func TestReservePluginReserve(t *testing.T) { - // Create a plugin registry for testing. Register only a reserve plugin. - reservePlugin := &ReservePlugin{} - registry, prof := initRegistryAndConfig(t, reservePlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "reserve-plugin-reserve", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "reserve-plugin-reserve", nil) tests := []struct { name string @@ -941,6 +848,15 @@ func TestReservePluginReserve(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a reserve plugin. + reservePlugin := &ReservePlugin{} + registry, prof := initRegistryAndConfig(t, reservePlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + reservePlugin.failReserve = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -963,57 +879,15 @@ func TestReservePluginReserve(t *testing.T) { if reservePlugin.numReserveCalled == 0 { t.Errorf("Expected the reserve plugin to be called.") } - - reservePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPrebindPlugin tests invocation of prebind plugins. func TestPrebindPlugin(t *testing.T) { - // Create a plugin registry for testing. Register a prebind and a filter plugin. - preBindPlugin := &PreBindPlugin{podUIDs: make(map[types.UID]struct{})} - filterPlugin := &FilterPlugin{} - registry := frameworkruntime.Registry{ - preBindPluginName: newPlugin(preBindPlugin), - filterPluginName: newPlugin(filterPlugin), - } + testContext := testutils.InitTestAPIServer(t, "prebind-plugin", nil) - // Setup initial prebind and filter plugin in different profiles. - // The second profile ensures the embedded filter plugin is exclusively called, and hence - // we can use its internal `numFilterCalled` to perform some precise checking logic. - cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ - Profiles: []configv1.KubeSchedulerProfile{ - { - SchedulerName: pointer.String(v1.DefaultSchedulerName), - Plugins: &configv1.Plugins{ - PreBind: configv1.PluginSet{ - Enabled: []configv1.Plugin{ - {Name: preBindPluginName}, - }, - }, - }, - }, - { - SchedulerName: pointer.String("2nd-scheduler"), - Plugins: &configv1.Plugins{ - Filter: configv1.PluginSet{ - Enabled: []configv1.Plugin{ - {Name: filterPluginName}, - }, - }, - }, - }, - }, - }) - - // Create the API server and the scheduler with the test plugin set. nodesNum := 2 - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "prebind-plugin", nil), nodesNum, - scheduler.WithProfiles(cfg.Profiles...), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) tests := []struct { name string @@ -1051,19 +925,59 @@ func TestPrebindPlugin(t *testing.T) { { name: "failure on preBind moves unschedulable pods", fail: true, - unschedulablePod: st.MakePod().Name("unschedulable-pod").Namespace(testCtx.NS.Name).Container(imageutils.GetPauseImageName()).Obj(), + unschedulablePod: st.MakePod().Name("unschedulable-pod").Namespace(testContext.NS.Name).Container(imageutils.GetPauseImageName()).Obj(), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register a prebind and a filter plugin. + preBindPlugin := &PreBindPlugin{podUIDs: make(map[types.UID]struct{})} + filterPlugin := &FilterPlugin{} + registry := frameworkruntime.Registry{ + preBindPluginName: newPlugin(preBindPlugin), + filterPluginName: newPlugin(filterPlugin), + } + + // Setup initial prebind and filter plugin in different profiles. + // The second profile ensures the embedded filter plugin is exclusively called, and hence + // we can use its internal `numFilterCalled` to perform some precise checking logic. + cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ + Profiles: []configv1.KubeSchedulerProfile{ + { + SchedulerName: pointer.String(v1.DefaultSchedulerName), + Plugins: &configv1.Plugins{ + PreBind: configv1.PluginSet{ + Enabled: []configv1.Plugin{ + {Name: preBindPluginName}, + }, + }, + }, + }, + { + SchedulerName: pointer.String("2nd-scheduler"), + Plugins: &configv1.Plugins{ + Filter: configv1.PluginSet{ + Enabled: []configv1.Plugin{ + {Name: filterPluginName}, + }, + }, + }, + }, + }, + }) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, nodesNum, + scheduler.WithProfiles(cfg.Profiles...), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + if p := test.unschedulablePod; p != nil { p.Spec.SchedulerName = "2nd-scheduler" filterPlugin.rejectFilter = true if _, err := createPausePod(testCtx.ClientSet, p); err != nil { t.Fatalf("Error while creating an unschedulable pod: %v", err) } - defer filterPlugin.reset() } preBindPlugin.failPreBind = test.fail @@ -1105,9 +1019,6 @@ func TestPrebindPlugin(t *testing.T) { t.Errorf("Timed out waiting for the unschedulable Pod to be retried at least twice.") } } - - preBindPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -1201,6 +1112,8 @@ func TestUnReserveReservePlugins(t *testing.T) { }, } + testContext := testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil) + for _, test := range tests { t.Run(test.name, func(t *testing.T) { var pls []framework.Plugin @@ -1209,14 +1122,10 @@ func TestUnReserveReservePlugins(t *testing.T) { } registry, prof := initRegistryAndConfig(t, pls...) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil), - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. podName := "test-pod" @@ -1260,13 +1169,14 @@ func TestUnReserveReservePlugins(t *testing.T) { } } } - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestUnReservePermitPlugins tests unreserve of Permit plugins. func TestUnReservePermitPlugins(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil) + tests := []struct { name string plugin *PermitPlugin @@ -1305,14 +1215,10 @@ func TestUnReservePermitPlugins(t *testing.T) { } registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil), - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(profile), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. podName := "test-pod" @@ -1345,14 +1251,14 @@ func TestUnReservePermitPlugins(t *testing.T) { if test.plugin.numPermitCalled != 1 { t.Errorf("Expected the Permit plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestUnReservePreBindPlugins tests unreserve of Prebind plugins. func TestUnReservePreBindPlugins(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "unreserve-prebind-plugin", nil) + tests := []struct { name string plugin *PreBindPlugin @@ -1381,14 +1287,10 @@ func TestUnReservePreBindPlugins(t *testing.T) { } registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, "unreserve-prebind-plugin", nil), - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(profile), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a pause pod. podName := "test-pod" @@ -1421,14 +1323,14 @@ func TestUnReservePreBindPlugins(t *testing.T) { if test.plugin.numPreBindCalled != 1 { t.Errorf("Expected the Prebind plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestUnReserveBindPlugins tests unreserve of Bind plugins. func TestUnReserveBindPlugins(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "unreserve-bind-plugin", nil) + tests := []struct { name string plugin *BindPlugin @@ -1454,17 +1356,12 @@ func TestUnReserveBindPlugins(t *testing.T) { } registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...) - apiCtx := testutils.InitTestAPIServer(t, "unreserve-bind-plugin", nil) - test.plugin.client = apiCtx.ClientSet + test.plugin.client = testContext.ClientSet - // Create the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - apiCtx, - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(profile), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a pause pod. podName := "test-pod" @@ -1497,8 +1394,6 @@ func TestUnReserveBindPlugins(t *testing.T) { if test.plugin.numBindCalled != 1 { t.Errorf("Expected the Bind plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -1508,61 +1403,16 @@ type pluginInvokeEvent struct { val int } -// TestBindPlugin tests invocation of bind plugins. func TestBindPlugin(t *testing.T) { + + var ( + bindPlugin1Name = "bind-plugin-1" + bindPlugin2Name = "bind-plugin-2" + reservePluginName = "mock-reserve-plugin" + postBindPluginName = "mock-post-bind-plugin" + ) + testContext := testutils.InitTestAPIServer(t, "bind-plugin", nil) - bindPlugin1 := &BindPlugin{name: "bind-plugin-1", client: testContext.ClientSet} - bindPlugin2 := &BindPlugin{name: "bind-plugin-2", client: testContext.ClientSet} - reservePlugin := &ReservePlugin{name: "mock-reserve-plugin"} - postBindPlugin := &PostBindPlugin{name: "mock-post-bind-plugin"} - // Create a plugin registry for testing. Register reserve, bind, and - // postBind plugins. - - registry := frameworkruntime.Registry{ - reservePlugin.Name(): newPlugin(reservePlugin), - bindPlugin1.Name(): newPlugin(bindPlugin1), - bindPlugin2.Name(): newPlugin(bindPlugin2), - postBindPlugin.Name(): newPlugin(postBindPlugin), - } - - // Setup initial unreserve and bind plugins for testing. - cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ - Profiles: []configv1.KubeSchedulerProfile{{ - SchedulerName: pointer.String(v1.DefaultSchedulerName), - Plugins: &configv1.Plugins{ - MultiPoint: configv1.PluginSet{ - Disabled: []configv1.Plugin{ - {Name: defaultbinder.Name}, - }, - }, - Reserve: configv1.PluginSet{ - Enabled: []configv1.Plugin{{Name: reservePlugin.Name()}}, - }, - Bind: configv1.PluginSet{ - // Put DefaultBinder last. - Enabled: []configv1.Plugin{{Name: bindPlugin1.Name()}, {Name: bindPlugin2.Name()}, {Name: defaultbinder.Name}}, - Disabled: []configv1.Plugin{{Name: defaultbinder.Name}}, - }, - PostBind: configv1.PluginSet{ - Enabled: []configv1.Plugin{{Name: postBindPlugin.Name()}}, - }, - }, - }}, - }) - - // Create the scheduler with the test plugin set. - testCtx := testutils.InitTestSchedulerWithOptions(t, testContext, 0, - scheduler.WithProfiles(cfg.Profiles...), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - testutils.SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) - - // Add a few nodes. - _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode(), 2) - if err != nil { - t.Fatal(err) - } tests := []struct { name string @@ -1576,32 +1426,77 @@ func TestBindPlugin(t *testing.T) { name: "bind plugins skipped to bind the pod and scheduler bond the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Skip, ""), framework.NewStatus(framework.Skip, "")}, expectBoundByScheduler: true, - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: bindPlugin2.Name(), val: 1}, {pluginName: postBindPlugin.Name(), val: 1}}, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: bindPlugin2Name, val: 1}, {pluginName: postBindPluginName, val: 1}}, }, { name: "bindplugin2 succeeded to bind the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Skip, ""), framework.NewStatus(framework.Success, "")}, expectBoundByPlugin: true, - expectBindPluginName: bindPlugin2.Name(), - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: bindPlugin2.Name(), val: 1}, {pluginName: postBindPlugin.Name(), val: 1}}, + expectBindPluginName: bindPlugin2Name, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: bindPlugin2Name, val: 1}, {pluginName: postBindPluginName, val: 1}}, }, { name: "bindplugin1 succeeded to bind the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Success, ""), framework.NewStatus(framework.Success, "")}, expectBoundByPlugin: true, - expectBindPluginName: bindPlugin1.Name(), - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: postBindPlugin.Name(), val: 1}}, + expectBindPluginName: bindPlugin1Name, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: postBindPluginName, val: 1}}, }, { name: "bind plugin fails to bind the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Error, "failed to bind"), framework.NewStatus(framework.Success, "")}, - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: reservePlugin.Name(), val: 1}}, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: reservePluginName, val: 1}}, }, } var pluginInvokeEventChan chan pluginInvokeEvent for _, test := range tests { t.Run(test.name, func(t *testing.T) { + bindPlugin1 := &BindPlugin{name: bindPlugin1Name, client: testContext.ClientSet} + bindPlugin2 := &BindPlugin{name: bindPlugin2Name, client: testContext.ClientSet} + reservePlugin := &ReservePlugin{name: reservePluginName} + postBindPlugin := &PostBindPlugin{name: postBindPluginName} + + // Create a plugin registry for testing. Register reserve, bind, and + // postBind plugins. + registry := frameworkruntime.Registry{ + reservePlugin.Name(): newPlugin(reservePlugin), + bindPlugin1.Name(): newPlugin(bindPlugin1), + bindPlugin2.Name(): newPlugin(bindPlugin2), + postBindPlugin.Name(): newPlugin(postBindPlugin), + } + + // Setup initial unreserve and bind plugins for testing. + cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ + Profiles: []configv1.KubeSchedulerProfile{{ + SchedulerName: pointer.String(v1.DefaultSchedulerName), + Plugins: &configv1.Plugins{ + MultiPoint: configv1.PluginSet{ + Disabled: []configv1.Plugin{ + {Name: defaultbinder.Name}, + }, + }, + Reserve: configv1.PluginSet{ + Enabled: []configv1.Plugin{{Name: reservePlugin.Name()}}, + }, + Bind: configv1.PluginSet{ + // Put DefaultBinder last. + Enabled: []configv1.Plugin{{Name: bindPlugin1.Name()}, {Name: bindPlugin2.Name()}, {Name: defaultbinder.Name}}, + Disabled: []configv1.Plugin{{Name: defaultbinder.Name}}, + }, + PostBind: configv1.PluginSet{ + Enabled: []configv1.Plugin{{Name: postBindPlugin.Name()}}, + }, + }, + }}, + }) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(cfg.Profiles...), + scheduler.WithFrameworkOutOfTreeRegistry(registry), + ) + defer teardown() + pluginInvokeEventChan = make(chan pluginInvokeEvent, 10) bindPlugin1.bindStatus = test.bindPluginStatuses[0] @@ -1678,17 +1573,14 @@ func TestBindPlugin(t *testing.T) { t.Errorf("Waiting for invoke event %d timeout.", j) } } - postBindPlugin.reset() - bindPlugin1.reset() - bindPlugin2.reset() - reservePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPostBindPlugin tests invocation of postbind plugins. func TestPostBindPlugin(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "postbind-plugin", nil) + tests := []struct { name string preBindFail bool @@ -1716,11 +1608,10 @@ func TestPostBindPlugin(t *testing.T) { } registry, prof := initRegistryAndConfig(t, preBindPlugin, postBindPlugin) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "postbind-plugin", nil), 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -1749,23 +1640,13 @@ func TestPostBindPlugin(t *testing.T) { t.Errorf("Expected the postbind plugin to be called, was called %d times.", postBindPlugin.numPostBindCalled) } } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPermitPlugin tests invocation of permit plugins. func TestPermitPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a permit plugin. - perPlugin := &PermitPlugin{name: permitPluginName} - registry, prof := initRegistryAndConfig(t, perPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "permit-plugin", nil) tests := []struct { name string @@ -1813,6 +1694,16 @@ func TestPermitPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + + // Create a plugin registry for testing. Register only a permit plugin. + perPlugin := &PermitPlugin{name: permitPluginName} + registry, prof := initRegistryAndConfig(t, perPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + perPlugin.failPermit = test.fail perPlugin.rejectPermit = test.reject perPlugin.timeoutPermit = test.timeout @@ -1844,9 +1735,6 @@ func TestPermitPlugin(t *testing.T) { if perPlugin.numPermitCalled == 0 { t.Errorf("Expected the permit plugin to be called.") } - - perPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -1859,10 +1747,9 @@ func TestMultiplePermitPlugins(t *testing.T) { registry, prof := initRegistryAndConfig(t, perPlugin1, perPlugin2) // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "multi-permit-plugin", nil), 2, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "multi-permit-plugin", nil), 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) // Both permit plugins will return Wait for permitting perPlugin1.timeoutPermit = true @@ -1902,8 +1789,6 @@ func TestMultiplePermitPlugins(t *testing.T) { if perPlugin1.numPermitCalled == 0 || perPlugin2.numPermitCalled == 0 { t.Errorf("Expected the permit plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) } // TestPermitPluginsCancelled tests whether all permit plugins are cancelled when pod is rejected. @@ -1914,10 +1799,9 @@ func TestPermitPluginsCancelled(t *testing.T) { registry, prof := initRegistryAndConfig(t, perPlugin1, perPlugin2) // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugins", nil), 2, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugins", nil), 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) // Both permit plugins will return Wait for permitting perPlugin1.timeoutPermit = true @@ -1950,16 +1834,7 @@ func TestPermitPluginsCancelled(t *testing.T) { // TestCoSchedulingWithPermitPlugin tests invocation of permit plugins. func TestCoSchedulingWithPermitPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a permit plugin. - permitPlugin := &PermitPlugin{name: permitPluginName} - registry, prof := initRegistryAndConfig(t, permitPlugin) - - // Create the API server and the scheduler with the test plugin set. - // TODO Make the subtests not share scheduler instances. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "permit-plugin", nil) tests := []struct { name string @@ -1980,6 +1855,16 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + + // Create a plugin registry for testing. Register only a permit plugin. + permitPlugin := &PermitPlugin{name: permitPluginName} + registry, prof := initRegistryAndConfig(t, permitPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + permitPlugin.failPermit = false permitPlugin.rejectPermit = false permitPlugin.timeoutPermit = false @@ -2028,24 +1913,13 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) { if permitPlugin.numPermitCalled == 0 { t.Errorf("Expected the permit plugin to be called.") } - - permitPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{podA, podB}) }) } } // TestFilterPlugin tests invocation of filter plugins. func TestFilterPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a filter plugin. - filterPlugin := &FilterPlugin{} - registry, prof := initRegistryAndConfig(t, filterPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "filter-plugin", nil), 1, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "filter-plugin", nil) tests := []struct { name string @@ -2063,6 +1937,15 @@ func TestFilterPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a filter plugin. + filterPlugin := &FilterPlugin{} + registry, prof := initRegistryAndConfig(t, filterPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 1, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + filterPlugin.failFilter = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -2086,24 +1969,13 @@ func TestFilterPlugin(t *testing.T) { t.Errorf("Expected the filter plugin to be called 1 time, but got %v.", filterPlugin.numFilterCalled) } } - - filterPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPreScorePlugin tests invocation of pre-score plugins. func TestPreScorePlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a pre-score plugin. - preScorePlugin := &PreScorePlugin{} - registry, prof := initRegistryAndConfig(t, preScorePlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "pre-score-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "pre-score-plugin", nil) tests := []struct { name string @@ -2121,6 +1993,15 @@ func TestPreScorePlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a pre-score plugin. + preScorePlugin := &PreScorePlugin{} + registry, prof := initRegistryAndConfig(t, preScorePlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + preScorePlugin.failPreScore = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -2142,9 +2023,6 @@ func TestPreScorePlugin(t *testing.T) { if preScorePlugin.numPreScoreCalled == 0 { t.Errorf("Expected the pre-score plugin to be called.") } - - preScorePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -2153,17 +2031,7 @@ func TestPreScorePlugin(t *testing.T) { func TestPreEnqueuePlugin(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodSchedulingReadiness, true)() - // Create a plugin registry for testing. Register only a filter plugin. - enqueuePlugin := &PreEnqueuePlugin{} - // Plumb a preFilterPlugin to verify if it's called or not. - preFilterPlugin := &PreFilterPlugin{} - registry, prof := initRegistryAndConfig(t, enqueuePlugin, preFilterPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "enqueue-plugin", nil), 1, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "enqueue-plugin", nil) tests := []struct { name string @@ -2172,18 +2040,29 @@ func TestPreEnqueuePlugin(t *testing.T) { }{ { name: "pod is admitted to enqueue", - pod: st.MakePod().Name("p").Namespace(testCtx.NS.Name).Container("pause").Obj(), + pod: st.MakePod().Name("p").Namespace(testContext.NS.Name).Container("pause").Obj(), admitEnqueue: true, }, { name: "pod is not admitted to enqueue", - pod: st.MakePod().Name("p").Namespace(testCtx.NS.Name).SchedulingGates([]string{"foo"}).Container("pause").Obj(), + pod: st.MakePod().Name("p").Namespace(testContext.NS.Name).SchedulingGates([]string{"foo"}).Container("pause").Obj(), admitEnqueue: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a filter plugin. + enqueuePlugin := &PreEnqueuePlugin{} + // Plumb a preFilterPlugin to verify if it's called or not. + preFilterPlugin := &PreFilterPlugin{} + registry, prof := initRegistryAndConfig(t, enqueuePlugin, preFilterPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 1, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + enqueuePlugin.admit = tt.admitEnqueue // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, tt.pod) @@ -2208,9 +2087,6 @@ func TestPreEnqueuePlugin(t *testing.T) { t.Errorf("Expected the preFilter plugin not to be called, but got %v", preFilterPlugin.numPreFilterCalled) } } - - preFilterPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -2223,61 +2099,9 @@ func TestPreEnqueuePlugin(t *testing.T) { // // - when waitingPods get deleted externally, it'd trigger moving unschedulable Pods func TestPreemptWithPermitPlugin(t *testing.T) { - // Create a plugin registry for testing. Register a permit and a filter plugin. - permitPlugin := &PermitPlugin{} - // Inject a fake filter plugin to use its internal `numFilterCalled` to verify - // how many times a Pod gets tried scheduling. - filterPlugin := &FilterPlugin{numCalledPerPod: make(map[string]int)} - registry := frameworkruntime.Registry{ - permitPluginName: newPlugin(permitPlugin), - filterPluginName: newPlugin(filterPlugin), - } + testContext := testutils.InitTestAPIServer(t, "preempt-with-permit-plugin", nil) - // Setup initial permit and filter plugins in the profile. - cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ - Profiles: []configv1.KubeSchedulerProfile{ - { - SchedulerName: pointer.String(v1.DefaultSchedulerName), - Plugins: &configv1.Plugins{ - Permit: configv1.PluginSet{ - Enabled: []configv1.Plugin{ - {Name: permitPluginName}, - }, - }, - Filter: configv1.PluginSet{ - // Ensure the fake filter plugin is always called; otherwise noderesources - // would fail first and exit the Filter phase. - Enabled: []configv1.Plugin{ - {Name: filterPluginName}, - {Name: noderesources.Name}, - }, - Disabled: []configv1.Plugin{ - {Name: noderesources.Name}, - }, - }, - }, - }, - }, - }) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "preempt-with-permit-plugin", nil), 0, - scheduler.WithProfiles(cfg.Profiles...), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) - - // Add one node. - nodeRes := map[v1.ResourceName]string{ - v1.ResourcePods: "32", - v1.ResourceCPU: "500m", - v1.ResourceMemory: "500", - } - _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode().Capacity(nodeRes), 1) - if err != nil { - t.Fatal(err) - } - - ns := testCtx.NS.Name + ns := testContext.NS.Name lowPriority, highPriority := int32(100), int32(300) resReq := map[v1.ResourceName]string{ v1.ResourceCPU: "200m", @@ -2288,6 +2112,12 @@ func TestPreemptWithPermitPlugin(t *testing.T) { v1.ResourceMemory: "400", } + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", + } + tests := []struct { name string deleteWaitingPod bool @@ -2320,17 +2150,53 @@ func TestPreemptWithPermitPlugin(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - defer func() { - permitPlugin.reset() - filterPlugin.reset() - var pods []*v1.Pod - for _, p := range []*v1.Pod{tt.runningPod, tt.waitingPod, tt.preemptor} { - if p != nil { - pods = append(pods, p) - } - } - testutils.CleanupPods(testCtx.ClientSet, t, pods) - }() + // Create a plugin registry for testing. Register a permit and a filter plugin. + permitPlugin := &PermitPlugin{} + // Inject a fake filter plugin to use its internal `numFilterCalled` to verify + // how many times a Pod gets tried scheduling. + filterPlugin := &FilterPlugin{numCalledPerPod: make(map[string]int)} + registry := frameworkruntime.Registry{ + permitPluginName: newPlugin(permitPlugin), + filterPluginName: newPlugin(filterPlugin), + } + + // Setup initial permit and filter plugins in the profile. + cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ + Profiles: []configv1.KubeSchedulerProfile{ + { + SchedulerName: pointer.String(v1.DefaultSchedulerName), + Plugins: &configv1.Plugins{ + Permit: configv1.PluginSet{ + Enabled: []configv1.Plugin{ + {Name: permitPluginName}, + }, + }, + Filter: configv1.PluginSet{ + // Ensure the fake filter plugin is always called; otherwise noderesources + // would fail first and exit the Filter phase. + Enabled: []configv1.Plugin{ + {Name: filterPluginName}, + {Name: noderesources.Name}, + }, + Disabled: []configv1.Plugin{ + {Name: noderesources.Name}, + }, + }, + }, + }, + }, + }) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 0, + scheduler.WithProfiles(cfg.Profiles...), + scheduler.WithFrameworkOutOfTreeRegistry(registry), + ) + defer teardown() + + _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode().Capacity(nodeRes), 1) + if err != nil { + t.Fatal(err) + } permitPlugin.waitAndAllowPermit = true permitPlugin.waitingPod = "waiting-pod" @@ -2502,10 +2368,9 @@ func TestActivatePods(t *testing.T) { }) // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "job-plugin", nil), 1, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "job-plugin", nil), 1, scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet ns := testCtx.NS.Name @@ -2549,10 +2414,17 @@ func TestActivatePods(t *testing.T) { } } -func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestContext, nodeCount int, opts ...scheduler.Option) *testutils.TestContext { +// The returned shutdown func will delete created resources and scheduler, resources should be those +// that will affect the scheduling result, like nodes, pods, etc.. Namespaces should not be +// deleted here because it's created together with the apiserver, they should be deleted +// simultaneously or we'll have no namespace. +// This should only be called when you want to kill the scheduler alone, away from apiserver. +// For example, in scheduler integration tests, recreating apiserver is performance consuming, +// then shutdown the scheduler and recreate it between each test case is a better approach. +func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestContext, nodeCount int, opts ...scheduler.Option) (*testutils.TestContext, testutils.ShutdownFunc) { testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, opts...) - testutils.SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) + testutils.SyncSchedulerInformerFactory(testCtx) + go testCtx.Scheduler.Run(testCtx.SchedulerCtx) if nodeCount > 0 { if _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode(), nodeCount); err != nil { @@ -2561,7 +2433,27 @@ func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestCont t.Fatal(err) } } - return testCtx + + teardown := func() { + err := testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(testCtx.SchedulerCtx, *metav1.NewDeleteOptions(0), metav1.ListOptions{}) + if err != nil { + t.Errorf("error while deleting all nodes: %v", err) + } + err = testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).DeleteCollection(testCtx.SchedulerCtx, *metav1.NewDeleteOptions(0), metav1.ListOptions{}) + if err != nil { + t.Errorf("error while deleting pod: %v", err) + } + // Wait for all pods to be deleted, or will failed to create same name pods + // required in other test cases. + if err := wait.Poll(time.Millisecond, wait.ForeverTestTimeout, + testutils.PodsCleanedUp(testCtx.SchedulerCtx, testCtx.ClientSet, testCtx.NS.Name)); err != nil { + t.Errorf("error while waiting for all pods to be deleted: %v", err) + } + // Kill the scheduler. + testCtx.SchedulerCloseFn() + } + + return testCtx, teardown } // initRegistryAndConfig returns registry and plugins config based on give plugins. diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go index ceac2107db1..b19b8f8ef86 100644 --- a/test/integration/scheduler/preemption/preemption_test.go +++ b/test/integration/scheduler/preemption/preemption_test.go @@ -178,10 +178,9 @@ func TestPreemption(t *testing.T) { 0, scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ @@ -501,7 +500,6 @@ func TestNonPreemption(t *testing.T) { var preemptNever = v1.PreemptNever // Initialize scheduler. testCtx := initTest(t, "non-preemption") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { name string @@ -579,7 +577,6 @@ func TestNonPreemption(t *testing.T) { func TestDisablePreemption(t *testing.T) { // Initialize scheduler, and disable preemption. testCtx := initTestDisablePreemption(t, "disable-preemption") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { @@ -659,7 +656,6 @@ func TestDisablePreemption(t *testing.T) { func TestPodPriorityResolution(t *testing.T) { admission := priority.NewPlugin() testCtx := testutils.InitTestScheduler(t, testutils.InitTestAPIServer(t, "preemption", admission)) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet // Build clientset and informers for controllers. @@ -671,7 +667,7 @@ func TestPodPriorityResolution(t *testing.T) { admission.SetExternalKubeInformerFactory(externalInformers) // Waiting for all controllers to sync - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) externalInformers.Start(testCtx.Ctx.Done()) externalInformers.WaitForCacheSync(testCtx.Ctx.Done()) @@ -780,7 +776,6 @@ func mkPriorityPodWithGrace(tc *testutils.TestContext, name string, priority int func TestPreemptionStarvation(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "preemption") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { @@ -879,7 +874,6 @@ func TestPreemptionStarvation(t *testing.T) { func TestPreemptionRaces(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "preemption-race") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { @@ -1136,9 +1130,6 @@ func TestNominatedNodeCleanUp(t *testing.T) { scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(tt.outOfTreeRegistry), ) - t.Cleanup(func() { - testutils.CleanupTest(t, testCtx) - }) cs, ns := testCtx.ClientSet, testCtx.NS.Name // Create a node with the specified capacity. @@ -1227,7 +1218,6 @@ func addPodConditionReady(pod *v1.Pod) { func TestPDBInPreemption(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "preemption-pdb") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet initDisruptionController(t, testCtx) @@ -1480,7 +1470,7 @@ func TestPDBInPreemption(t *testing.T) { func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext { testCtx := testutils.InitTestSchedulerWithOptions(t, testutils.InitTestAPIServer(t, nsPrefix, nil), 0, opts...) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) // wraps the NextPod() method to make it appear the preemption has been done already and the nominated node has been set. f := testCtx.Scheduler.NextPod testCtx.Scheduler.NextPod = func() (podInfo *framework.QueuedPodInfo) { @@ -1561,9 +1551,6 @@ func TestPreferNominatedNode(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testCtx := initTestPreferNominatedNode(t, "perfer-nominated-node") - t.Cleanup(func() { - testutils.CleanupTest(t, testCtx) - }) cs := testCtx.ClientSet nsName := testCtx.NS.Name var err error @@ -1637,10 +1624,9 @@ func TestReadWriteOncePodPreemption(t *testing.T) { testutils.InitTestAPIServer(t, "preemption", nil), 0, scheduler.WithProfiles(cfg.Profiles...)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet storage := v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}} diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go index 05a788d20df..83ab1b1ccd5 100644 --- a/test/integration/scheduler/queue_test.go +++ b/test/integration/scheduler/queue_test.go @@ -124,8 +124,7 @@ func TestSchedulingGates(t *testing.T) { scheduler.WithPodInitialBackoffSeconds(0), scheduler.WithPodMaxBackoffSeconds(0), ) - testutils.SyncInformerFactory(testCtx) - defer testutils.CleanupTest(t, testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx for _, p := range tt.pods { @@ -186,9 +185,8 @@ func TestCoreResourceEnqueue(t *testing.T) { scheduler.WithPodInitialBackoffSeconds(0), scheduler.WithPodMaxBackoffSeconds(0), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) - defer testutils.CleanupTest(t, testCtx) defer testCtx.Scheduler.SchedulingQueue.Close() cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx @@ -371,7 +369,7 @@ func TestCustomResourceEnqueue(t *testing.T) { scheduler.WithPodInitialBackoffSeconds(0), scheduler.WithPodMaxBackoffSeconds(0), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) defer testutils.CleanupTest(t, testCtx) diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 2443857671d..620c9d99e18 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -51,7 +51,6 @@ type nodeStateManager struct { func TestUnschedulableNodes(t *testing.T) { testCtx := initTest(t, "unschedulable-nodes") - defer testutils.CleanupTest(t, testCtx) nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister() // NOTE: This test cannot run in parallel, because it is creating and deleting @@ -191,7 +190,6 @@ func TestMultipleSchedulers(t *testing.T) { // 1. create and start default-scheduler testCtx := initTest(t, "multi-scheduler") - defer testutils.CleanupTest(t, testCtx) // 2. create a node node := &v1.Node{ @@ -263,7 +261,7 @@ func TestMultipleSchedulers(t *testing.T) { }, }) testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithProfiles(cfg.Profiles...)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) // 6. **check point-2**: @@ -285,7 +283,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) { }) testCtx := initTest(t, "multi-scheduler", scheduler.WithProfiles(cfg.Profiles...)) - defer testutils.CleanupTest(t, testCtx) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"}, @@ -349,7 +346,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) { // This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not. func TestAllocatable(t *testing.T) { testCtx := initTest(t, "allocatable") - defer testutils.CleanupTest(t, testCtx) // 2. create a node without allocatable awareness nodeRes := map[v1.ResourceName]string{ @@ -423,7 +419,6 @@ func TestAllocatable(t *testing.T) { func TestSchedulerInformers(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "scheduler-informer") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ @@ -526,7 +521,6 @@ func TestNodeEvents(t *testing.T) { // 4. Remove the taint from node2; pod2 should now schedule on node2 testCtx := initTest(t, "node-events") - defer testutils.CleanupTest(t, testCtx) defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) // 1.1 create pod1 diff --git a/test/integration/scheduler/scoring/priorities_test.go b/test/integration/scheduler/scoring/priorities_test.go index 52e52ebbfb2..5c62801db1d 100644 --- a/test/integration/scheduler/scoring/priorities_test.go +++ b/test/integration/scheduler/scoring/priorities_test.go @@ -95,7 +95,7 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes 0, scheduler.WithProfiles(cfg.Profiles...), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) return testCtx } @@ -131,7 +131,7 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext 0, scheduler.WithProfiles(cfg.Profiles...), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) return testCtx } @@ -140,7 +140,6 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext // works correctly. func TestNodeResourcesScoring(t *testing.T) { testCtx := initTestSchedulerForNodeResourcesTest(t) - defer testutils.CleanupTest(t, testCtx) // Add a few nodes. _, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity( map[v1.ResourceName]string{ @@ -204,7 +203,6 @@ func TestNodeResourcesScoring(t *testing.T) { // works correctly. func TestNodeAffinityScoring(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name) - defer testutils.CleanupTest(t, testCtx) // Add a few nodes. _, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode(), 4) if err != nil { @@ -324,7 +322,6 @@ func TestPodAffinityScoring(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name) - defer testutils.CleanupTest(t, testCtx) // Add a few nodes. nodesInTopology, err := createAndWaitForNodesInCache(testCtx, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5) if err != nil { @@ -369,7 +366,6 @@ func TestPodAffinityScoring(t *testing.T) { // works correctly, i.e., the pod gets scheduled to the node where its container images are ready. func TestImageLocalityScoring(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name) - defer testutils.CleanupTest(t, testCtx) // Create a node with the large image. // We use a fake large image as the test image used by the pod, which has @@ -602,7 +598,6 @@ func TestPodTopologySpreadScoring(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)() testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet ns := testCtx.NS.Name @@ -653,9 +648,6 @@ func TestPodTopologySpreadScoring(t *testing.T) { // The setup has 300 nodes over 3 zones. func TestDefaultPodTopologySpreadScoring(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name) - t.Cleanup(func() { - testutils.CleanupTest(t, testCtx) - }) cs := testCtx.ClientSet ns := testCtx.NS.Name diff --git a/test/integration/scheduler/taint/taint_test.go b/test/integration/scheduler/taint/taint_test.go index a9b2ef6ea46..7323c46e927 100644 --- a/test/integration/scheduler/taint/taint_test.go +++ b/test/integration/scheduler/taint/taint_test.go @@ -79,7 +79,6 @@ func TestTaintNodeByCondition(t *testing.T) { admission.SetExternalKubeInformerFactory(externalInformers) testCtx = testutils.InitTestScheduler(t, testCtx) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet nsName := testCtx.NS.Name @@ -108,7 +107,7 @@ func TestTaintNodeByCondition(t *testing.T) { // Waiting for all controllers to sync externalInformers.Start(testCtx.Ctx.Done()) externalInformers.WaitForCacheSync(testCtx.Ctx.Done()) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) // Run all controllers go nc.Run(testCtx.Ctx) diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 68442ab89e0..9df62213ab3 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -143,15 +143,24 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface) { // TestContext store necessary context info type TestContext struct { - CloseFn framework.TearDownFunc NS *v1.Namespace ClientSet clientset.Interface KubeConfig *restclient.Config InformerFactory informers.SharedInformerFactory DynInformerFactory dynamicinformer.DynamicSharedInformerFactory Scheduler *scheduler.Scheduler - Ctx context.Context - CancelFn context.CancelFunc + // This is the top context when initializing the test environment. + Ctx context.Context + // CancelFn will cancel the context above. + CancelFn context.CancelFunc + // CloseFn will stop the apiserver and clean up the resources + // after itself, including shutting down its storage layer. + CloseFn framework.TearDownFunc + // This is the context when initializing scheduler. + SchedulerCtx context.Context + // SchedulerCloseFn will tear down the resources in creating scheduler, + // including the scheduler itself. + SchedulerCloseFn framework.TearDownFunc } // CleanupNodes cleans all nodes which were created during integration test @@ -176,25 +185,39 @@ func PodDeleted(c clientset.Interface, podNamespace, podName string) wait.Condit } } -// SyncInformerFactory starts informer and waits for caches to be synced -func SyncInformerFactory(testCtx *TestContext) { - testCtx.InformerFactory.Start(testCtx.Ctx.Done()) - if testCtx.DynInformerFactory != nil { - testCtx.DynInformerFactory.Start(testCtx.Ctx.Done()) +// PodsCleanedUp returns true if all pods are deleted in the specific namespace. +func PodsCleanedUp(ctx context.Context, c clientset.Interface, namespace string) wait.ConditionFunc { + return func() (bool, error) { + list, err := c.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + return len(list.Items) == 0, nil } - testCtx.InformerFactory.WaitForCacheSync(testCtx.Ctx.Done()) +} + +// SyncSchedulerInformerFactory starts informer and waits for caches to be synced +func SyncSchedulerInformerFactory(testCtx *TestContext) { + testCtx.InformerFactory.Start(testCtx.SchedulerCtx.Done()) if testCtx.DynInformerFactory != nil { - testCtx.DynInformerFactory.WaitForCacheSync(testCtx.Ctx.Done()) + testCtx.DynInformerFactory.Start(testCtx.SchedulerCtx.Done()) + } + testCtx.InformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done()) + if testCtx.DynInformerFactory != nil { + testCtx.DynInformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done()) } } // CleanupTest cleans related resources which were created during integration test func CleanupTest(t *testing.T, testCtx *TestContext) { - // Kill the scheduler. + // Cancel the context of the whole test environment, it will terminate the scheduler as well. testCtx.CancelFn() - // Cleanup nodes. - testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + + // Cleanup nodes and namespaces. + testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(testCtx.Ctx, *metav1.NewDeleteOptions(0), metav1.ListOptions{}) framework.DeleteNamespaceOrDie(testCtx.ClientSet, testCtx.NS, t) + + // Terminate the apiserver. testCtx.CloseFn() } @@ -330,11 +353,13 @@ func UpdateNodeStatus(cs clientset.Interface, node *v1.Node) error { // InitTestAPIServer initializes a test environment and creates an API server with default // configuration. +// It registers cleanup functions to t.Cleanup(), they will be called when the test completes, +// no need to do this again. func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext { - ctx, cancelFunc := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) testCtx := TestContext{ Ctx: ctx, - CancelFn: cancelFunc, + CancelFn: cancel, } testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{ @@ -354,6 +379,10 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf testCtx.NS = framework.CreateNamespaceOrDie(testCtx.ClientSet, "default", t) } + t.Cleanup(func() { + CleanupTest(t, &testCtx) + }) + return &testCtx } @@ -388,6 +417,9 @@ func InitTestSchedulerWithOptions( resyncPeriod time.Duration, opts ...scheduler.Option, ) *TestContext { + ctx, cancel := context.WithCancel(testCtx.Ctx) + testCtx.SchedulerCtx = ctx + // 1. Create scheduler testCtx.InformerFactory = scheduler.NewInformerFactory(testCtx.ClientSet, resyncPeriod) if testCtx.KubeConfig != nil { @@ -406,7 +438,7 @@ func InitTestSchedulerWithOptions( testCtx.InformerFactory, testCtx.DynInformerFactory, profile.NewRecorderFactory(eventBroadcaster), - testCtx.Ctx.Done(), + ctx.Done(), opts..., ) @@ -414,13 +446,19 @@ func InitTestSchedulerWithOptions( t.Fatalf("Couldn't create scheduler: %v", err) } - eventBroadcaster.StartRecordingToSink(testCtx.Ctx.Done()) + eventBroadcaster.StartRecordingToSink(ctx.Done()) oldCloseFn := testCtx.CloseFn testCtx.CloseFn = func() { oldCloseFn() eventBroadcaster.Shutdown() } + + testCtx.SchedulerCloseFn = func() { + cancel() + eventBroadcaster.Shutdown() + } + return testCtx } @@ -488,8 +526,8 @@ func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.Di // configuration. func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Option) *TestContext { testCtx := InitTestSchedulerWithOptions(t, InitTestAPIServer(t, nsPrefix, nil), 0, opts...) - SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) + SyncSchedulerInformerFactory(testCtx) + go testCtx.Scheduler.Run(testCtx.SchedulerCtx) return testCtx } @@ -512,8 +550,8 @@ func InitTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext { t, InitTestAPIServer(t, nsPrefix, nil), 0, scheduler.WithProfiles(cfg.Profiles...)) - SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) + SyncSchedulerInformerFactory(testCtx) + go testCtx.Scheduler.Run(testCtx.SchedulerCtx) return testCtx } diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index efb2f512081..0807aaef9ca 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -1000,8 +1000,6 @@ func TestRescheduleProvisioning(t *testing.T) { defer func() { testCtx.CancelFn() deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) - testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) - testCtx.CloseFn() }() ctrl, informerFactory, err := initPVController(t, testCtx, 0) @@ -1049,7 +1047,7 @@ func TestRescheduleProvisioning(t *testing.T) { func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig { testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod) - testutil.SyncInformerFactory(testCtx) + testutil.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) clientset := testCtx.ClientSet @@ -1087,7 +1085,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t teardown: func() { klog.Infof("test cluster %q start to tear down", ns) deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) - testutil.CleanupTest(t, testCtx) }, } } diff --git a/test/integration/volumescheduling/volume_capacity_priority_test.go b/test/integration/volumescheduling/volume_capacity_priority_test.go index 9122b18d203..aeeac39f43e 100644 --- a/test/integration/volumescheduling/volume_capacity_priority_test.go +++ b/test/integration/volumescheduling/volume_capacity_priority_test.go @@ -48,7 +48,7 @@ func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node { func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig { testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod) - testutil.SyncInformerFactory(testCtx) + testutil.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) clientset := testCtx.ClientSet @@ -71,7 +71,6 @@ func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPe teardown: func() { klog.Infof("test cluster %q start to tear down", ns) deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) - testutil.CleanupTest(t, testCtx) }, } }