mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
Teardown only scheduler in integration tests
Signed-off-by: Kante Yin <kerthcet@gmail.com>
This commit is contained in:
parent
d86b74b017
commit
2d866ec2fc
@ -104,9 +104,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
|
|||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
||||||
testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil)
|
testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil)
|
||||||
|
|
||||||
// Build clientset and informers for controllers.
|
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
// Build clientset and informers for controllers.
|
// Build clientset and informers for controllers.
|
||||||
@ -263,7 +260,6 @@ func TestTaintBasedEvictions(t *testing.T) {
|
|||||||
podTolerations.SetExternalKubeClientSet(externalClientset)
|
podTolerations.SetExternalKubeClientSet(externalClientset)
|
||||||
podTolerations.SetExternalKubeInformerFactory(externalInformers)
|
podTolerations.SetExternalKubeInformerFactory(externalInformers)
|
||||||
|
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
// Start NodeLifecycleController for taint.
|
// Start NodeLifecycleController for taint.
|
||||||
|
@ -77,7 +77,6 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
|
|||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
||||||
testCtx := setup(t, "podgc-orphaned")
|
testCtx := setup(t, "podgc-orphaned")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
@ -180,7 +179,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)()
|
||||||
testCtx := setup(t, "podgc-out-of-service")
|
testCtx := setup(t, "podgc-out-of-service")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
|
@ -28,9 +28,7 @@ import (
|
|||||||
// TestDefaultBinder tests the binding process in the scheduler.
|
// TestDefaultBinder tests the binding process in the scheduler.
|
||||||
func TestDefaultBinder(t *testing.T) {
|
func TestDefaultBinder(t *testing.T) {
|
||||||
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, "", nil), 0)
|
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, "", nil), 0)
|
||||||
testutil.SyncInformerFactory(testCtx)
|
testutil.SyncSchedulerInformerFactory(testCtx)
|
||||||
// Do not start scheduler routine.
|
|
||||||
defer testutil.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
// Add a node.
|
// Add a node.
|
||||||
node, err := testutil.CreateNode(testCtx.ClientSet, st.MakeNode().Name("testnode").Obj())
|
node, err := testutil.CreateNode(testCtx.ClientSet, st.MakeNode().Name("testnode").Obj())
|
||||||
|
@ -354,9 +354,8 @@ func TestSchedulerExtender(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithExtenders(extenders...))
|
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithExtenders(extenders...))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
DoTestPodScheduling(testCtx.NS, t, clientSet)
|
DoTestPodScheduling(testCtx.NS, t, clientSet)
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,6 @@ var (
|
|||||||
// anti-affinity predicate functions works correctly.
|
// anti-affinity predicate functions works correctly.
|
||||||
func TestInterPodAffinity(t *testing.T) {
|
func TestInterPodAffinity(t *testing.T) {
|
||||||
testCtx := initTest(t, "")
|
testCtx := initTest(t, "")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
// Add a few nodes with labels
|
// Add a few nodes with labels
|
||||||
nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2)
|
nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2)
|
||||||
@ -990,7 +989,6 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
testCtx := initTest(t, "")
|
testCtx := initTest(t, "")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
// Add a few nodes with labels
|
// Add a few nodes with labels
|
||||||
nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2)
|
nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2)
|
||||||
@ -1492,7 +1490,6 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
|||||||
testCtx := initTest(t, "pts-predicate")
|
testCtx := initTest(t, "pts-predicate")
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
ns := testCtx.NS.Name
|
ns := testCtx.NS.Name
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
for i := range tt.nodes {
|
for i := range tt.nodes {
|
||||||
if _, err := createNode(cs, tt.nodes[i]); err != nil {
|
if _, err := createNode(cs, tt.nodes[i]); err != nil {
|
||||||
@ -1761,7 +1758,6 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tt.enableReadWriteOncePod)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tt.enableReadWriteOncePod)()
|
||||||
|
|
||||||
testCtx := initTest(t, "scheduler-informer")
|
testCtx := initTest(t, "scheduler-informer")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
if tt.init != nil {
|
if tt.init != nil {
|
||||||
if err := tt.init(testCtx.ClientSet, testCtx.NS.Name); err != nil {
|
if err := tt.init(testCtx.ClientSet, testCtx.NS.Name); err != nil {
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -178,10 +178,9 @@ func TestPreemption(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||||
@ -501,7 +500,6 @@ func TestNonPreemption(t *testing.T) {
|
|||||||
var preemptNever = v1.PreemptNever
|
var preemptNever = v1.PreemptNever
|
||||||
// Initialize scheduler.
|
// Initialize scheduler.
|
||||||
testCtx := initTest(t, "non-preemption")
|
testCtx := initTest(t, "non-preemption")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@ -579,7 +577,6 @@ func TestNonPreemption(t *testing.T) {
|
|||||||
func TestDisablePreemption(t *testing.T) {
|
func TestDisablePreemption(t *testing.T) {
|
||||||
// Initialize scheduler, and disable preemption.
|
// Initialize scheduler, and disable preemption.
|
||||||
testCtx := initTestDisablePreemption(t, "disable-preemption")
|
testCtx := initTestDisablePreemption(t, "disable-preemption")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -659,7 +656,6 @@ func TestDisablePreemption(t *testing.T) {
|
|||||||
func TestPodPriorityResolution(t *testing.T) {
|
func TestPodPriorityResolution(t *testing.T) {
|
||||||
admission := priority.NewPlugin()
|
admission := priority.NewPlugin()
|
||||||
testCtx := testutils.InitTestScheduler(t, testutils.InitTestAPIServer(t, "preemption", admission))
|
testCtx := testutils.InitTestScheduler(t, testutils.InitTestAPIServer(t, "preemption", admission))
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
// Build clientset and informers for controllers.
|
// Build clientset and informers for controllers.
|
||||||
@ -671,7 +667,7 @@ func TestPodPriorityResolution(t *testing.T) {
|
|||||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||||
|
|
||||||
// Waiting for all controllers to sync
|
// Waiting for all controllers to sync
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
externalInformers.Start(testCtx.Ctx.Done())
|
externalInformers.Start(testCtx.Ctx.Done())
|
||||||
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||||
|
|
||||||
@ -780,7 +776,6 @@ func mkPriorityPodWithGrace(tc *testutils.TestContext, name string, priority int
|
|||||||
func TestPreemptionStarvation(t *testing.T) {
|
func TestPreemptionStarvation(t *testing.T) {
|
||||||
// Initialize scheduler.
|
// Initialize scheduler.
|
||||||
testCtx := initTest(t, "preemption")
|
testCtx := initTest(t, "preemption")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -879,7 +874,6 @@ func TestPreemptionStarvation(t *testing.T) {
|
|||||||
func TestPreemptionRaces(t *testing.T) {
|
func TestPreemptionRaces(t *testing.T) {
|
||||||
// Initialize scheduler.
|
// Initialize scheduler.
|
||||||
testCtx := initTest(t, "preemption-race")
|
testCtx := initTest(t, "preemption-race")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -1136,9 +1130,6 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
|||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
scheduler.WithFrameworkOutOfTreeRegistry(tt.outOfTreeRegistry),
|
scheduler.WithFrameworkOutOfTreeRegistry(tt.outOfTreeRegistry),
|
||||||
)
|
)
|
||||||
t.Cleanup(func() {
|
|
||||||
testutils.CleanupTest(t, testCtx)
|
|
||||||
})
|
|
||||||
|
|
||||||
cs, ns := testCtx.ClientSet, testCtx.NS.Name
|
cs, ns := testCtx.ClientSet, testCtx.NS.Name
|
||||||
// Create a node with the specified capacity.
|
// Create a node with the specified capacity.
|
||||||
@ -1227,7 +1218,6 @@ func addPodConditionReady(pod *v1.Pod) {
|
|||||||
func TestPDBInPreemption(t *testing.T) {
|
func TestPDBInPreemption(t *testing.T) {
|
||||||
// Initialize scheduler.
|
// Initialize scheduler.
|
||||||
testCtx := initTest(t, "preemption-pdb")
|
testCtx := initTest(t, "preemption-pdb")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
initDisruptionController(t, testCtx)
|
initDisruptionController(t, testCtx)
|
||||||
@ -1480,7 +1470,7 @@ func TestPDBInPreemption(t *testing.T) {
|
|||||||
|
|
||||||
func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext {
|
func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext {
|
||||||
testCtx := testutils.InitTestSchedulerWithOptions(t, testutils.InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
testCtx := testutils.InitTestSchedulerWithOptions(t, testutils.InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
// wraps the NextPod() method to make it appear the preemption has been done already and the nominated node has been set.
|
// wraps the NextPod() method to make it appear the preemption has been done already and the nominated node has been set.
|
||||||
f := testCtx.Scheduler.NextPod
|
f := testCtx.Scheduler.NextPod
|
||||||
testCtx.Scheduler.NextPod = func() (podInfo *framework.QueuedPodInfo) {
|
testCtx.Scheduler.NextPod = func() (podInfo *framework.QueuedPodInfo) {
|
||||||
@ -1561,9 +1551,6 @@ func TestPreferNominatedNode(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
testCtx := initTestPreferNominatedNode(t, "perfer-nominated-node")
|
testCtx := initTestPreferNominatedNode(t, "perfer-nominated-node")
|
||||||
t.Cleanup(func() {
|
|
||||||
testutils.CleanupTest(t, testCtx)
|
|
||||||
})
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
nsName := testCtx.NS.Name
|
nsName := testCtx.NS.Name
|
||||||
var err error
|
var err error
|
||||||
@ -1637,10 +1624,9 @@ func TestReadWriteOncePodPreemption(t *testing.T) {
|
|||||||
testutils.InitTestAPIServer(t, "preemption", nil),
|
testutils.InitTestAPIServer(t, "preemption", nil),
|
||||||
0,
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...))
|
scheduler.WithProfiles(cfg.Profiles...))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
storage := v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}
|
storage := v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}
|
||||||
|
@ -124,8 +124,7 @@ func TestSchedulingGates(t *testing.T) {
|
|||||||
scheduler.WithPodInitialBackoffSeconds(0),
|
scheduler.WithPodInitialBackoffSeconds(0),
|
||||||
scheduler.WithPodMaxBackoffSeconds(0),
|
scheduler.WithPodMaxBackoffSeconds(0),
|
||||||
)
|
)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
||||||
for _, p := range tt.pods {
|
for _, p := range tt.pods {
|
||||||
@ -186,9 +185,8 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
|||||||
scheduler.WithPodInitialBackoffSeconds(0),
|
scheduler.WithPodInitialBackoffSeconds(0),
|
||||||
scheduler.WithPodMaxBackoffSeconds(0),
|
scheduler.WithPodMaxBackoffSeconds(0),
|
||||||
)
|
)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
|
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
defer testCtx.Scheduler.SchedulingQueue.Close()
|
defer testCtx.Scheduler.SchedulingQueue.Close()
|
||||||
|
|
||||||
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
||||||
@ -371,7 +369,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
|
|||||||
scheduler.WithPodInitialBackoffSeconds(0),
|
scheduler.WithPodInitialBackoffSeconds(0),
|
||||||
scheduler.WithPodMaxBackoffSeconds(0),
|
scheduler.WithPodMaxBackoffSeconds(0),
|
||||||
)
|
)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
|
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
defer testutils.CleanupTest(t, testCtx)
|
||||||
|
|
||||||
|
@ -51,7 +51,6 @@ type nodeStateManager struct {
|
|||||||
|
|
||||||
func TestUnschedulableNodes(t *testing.T) {
|
func TestUnschedulableNodes(t *testing.T) {
|
||||||
testCtx := initTest(t, "unschedulable-nodes")
|
testCtx := initTest(t, "unschedulable-nodes")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister()
|
nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister()
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
@ -191,7 +190,6 @@ func TestMultipleSchedulers(t *testing.T) {
|
|||||||
|
|
||||||
// 1. create and start default-scheduler
|
// 1. create and start default-scheduler
|
||||||
testCtx := initTest(t, "multi-scheduler")
|
testCtx := initTest(t, "multi-scheduler")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
// 2. create a node
|
// 2. create a node
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
@ -263,7 +261,7 @@ func TestMultipleSchedulers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithProfiles(cfg.Profiles...))
|
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithProfiles(cfg.Profiles...))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
// 6. **check point-2**:
|
// 6. **check point-2**:
|
||||||
@ -285,7 +283,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
testCtx := initTest(t, "multi-scheduler", scheduler.WithProfiles(cfg.Profiles...))
|
testCtx := initTest(t, "multi-scheduler", scheduler.WithProfiles(cfg.Profiles...))
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||||
@ -349,7 +346,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
|||||||
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
||||||
func TestAllocatable(t *testing.T) {
|
func TestAllocatable(t *testing.T) {
|
||||||
testCtx := initTest(t, "allocatable")
|
testCtx := initTest(t, "allocatable")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
// 2. create a node without allocatable awareness
|
// 2. create a node without allocatable awareness
|
||||||
nodeRes := map[v1.ResourceName]string{
|
nodeRes := map[v1.ResourceName]string{
|
||||||
@ -423,7 +419,6 @@ func TestAllocatable(t *testing.T) {
|
|||||||
func TestSchedulerInformers(t *testing.T) {
|
func TestSchedulerInformers(t *testing.T) {
|
||||||
// Initialize scheduler.
|
// Initialize scheduler.
|
||||||
testCtx := initTest(t, "scheduler-informer")
|
testCtx := initTest(t, "scheduler-informer")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||||
@ -526,7 +521,6 @@ func TestNodeEvents(t *testing.T) {
|
|||||||
// 4. Remove the taint from node2; pod2 should now schedule on node2
|
// 4. Remove the taint from node2; pod2 should now schedule on node2
|
||||||
|
|
||||||
testCtx := initTest(t, "node-events")
|
testCtx := initTest(t, "node-events")
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
// 1.1 create pod1
|
// 1.1 create pod1
|
||||||
|
@ -95,7 +95,7 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes
|
|||||||
0,
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
)
|
)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
return testCtx
|
return testCtx
|
||||||
}
|
}
|
||||||
@ -131,7 +131,7 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
|||||||
0,
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
)
|
)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
return testCtx
|
return testCtx
|
||||||
}
|
}
|
||||||
@ -140,7 +140,6 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
|||||||
// works correctly.
|
// works correctly.
|
||||||
func TestNodeResourcesScoring(t *testing.T) {
|
func TestNodeResourcesScoring(t *testing.T) {
|
||||||
testCtx := initTestSchedulerForNodeResourcesTest(t)
|
testCtx := initTestSchedulerForNodeResourcesTest(t)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
// Add a few nodes.
|
// Add a few nodes.
|
||||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
|
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
|
||||||
map[v1.ResourceName]string{
|
map[v1.ResourceName]string{
|
||||||
@ -204,7 +203,6 @@ func TestNodeResourcesScoring(t *testing.T) {
|
|||||||
// works correctly.
|
// works correctly.
|
||||||
func TestNodeAffinityScoring(t *testing.T) {
|
func TestNodeAffinityScoring(t *testing.T) {
|
||||||
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name)
|
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
// Add a few nodes.
|
// Add a few nodes.
|
||||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode(), 4)
|
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode(), 4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -324,7 +322,6 @@ func TestPodAffinityScoring(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name)
|
testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
// Add a few nodes.
|
// Add a few nodes.
|
||||||
nodesInTopology, err := createAndWaitForNodesInCache(testCtx, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5)
|
nodesInTopology, err := createAndWaitForNodesInCache(testCtx, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -369,7 +366,6 @@ func TestPodAffinityScoring(t *testing.T) {
|
|||||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||||
func TestImageLocalityScoring(t *testing.T) {
|
func TestImageLocalityScoring(t *testing.T) {
|
||||||
testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name)
|
testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
// Create a node with the large image.
|
// Create a node with the large image.
|
||||||
// We use a fake large image as the test image used by the pod, which has
|
// We use a fake large image as the test image used by the pod, which has
|
||||||
@ -602,7 +598,6 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)()
|
||||||
|
|
||||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
ns := testCtx.NS.Name
|
ns := testCtx.NS.Name
|
||||||
|
|
||||||
@ -653,9 +648,6 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
|||||||
// The setup has 300 nodes over 3 zones.
|
// The setup has 300 nodes over 3 zones.
|
||||||
func TestDefaultPodTopologySpreadScoring(t *testing.T) {
|
func TestDefaultPodTopologySpreadScoring(t *testing.T) {
|
||||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||||
t.Cleanup(func() {
|
|
||||||
testutils.CleanupTest(t, testCtx)
|
|
||||||
})
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
ns := testCtx.NS.Name
|
ns := testCtx.NS.Name
|
||||||
|
|
||||||
|
@ -79,7 +79,6 @@ func TestTaintNodeByCondition(t *testing.T) {
|
|||||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||||
|
|
||||||
testCtx = testutils.InitTestScheduler(t, testCtx)
|
testCtx = testutils.InitTestScheduler(t, testCtx)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
nsName := testCtx.NS.Name
|
nsName := testCtx.NS.Name
|
||||||
@ -108,7 +107,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
|||||||
// Waiting for all controllers to sync
|
// Waiting for all controllers to sync
|
||||||
externalInformers.Start(testCtx.Ctx.Done())
|
externalInformers.Start(testCtx.Ctx.Done())
|
||||||
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||||
|
|
||||||
// Run all controllers
|
// Run all controllers
|
||||||
go nc.Run(testCtx.Ctx)
|
go nc.Run(testCtx.Ctx)
|
||||||
|
@ -143,15 +143,24 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface) {
|
|||||||
|
|
||||||
// TestContext store necessary context info
|
// TestContext store necessary context info
|
||||||
type TestContext struct {
|
type TestContext struct {
|
||||||
CloseFn framework.TearDownFunc
|
|
||||||
NS *v1.Namespace
|
NS *v1.Namespace
|
||||||
ClientSet clientset.Interface
|
ClientSet clientset.Interface
|
||||||
KubeConfig *restclient.Config
|
KubeConfig *restclient.Config
|
||||||
InformerFactory informers.SharedInformerFactory
|
InformerFactory informers.SharedInformerFactory
|
||||||
DynInformerFactory dynamicinformer.DynamicSharedInformerFactory
|
DynInformerFactory dynamicinformer.DynamicSharedInformerFactory
|
||||||
Scheduler *scheduler.Scheduler
|
Scheduler *scheduler.Scheduler
|
||||||
Ctx context.Context
|
// This is the top context when initializing the test environment.
|
||||||
CancelFn context.CancelFunc
|
Ctx context.Context
|
||||||
|
// CancelFn will cancel the context above.
|
||||||
|
CancelFn context.CancelFunc
|
||||||
|
// CloseFn will stop the apiserver and clean up the resources
|
||||||
|
// after itself, including shutting down its storage layer.
|
||||||
|
CloseFn framework.TearDownFunc
|
||||||
|
// This is the context when initializing scheduler.
|
||||||
|
SchedulerCtx context.Context
|
||||||
|
// SchedulerCloseFn will tear down the resources in creating scheduler,
|
||||||
|
// including the scheduler itself.
|
||||||
|
SchedulerCloseFn framework.TearDownFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanupNodes cleans all nodes which were created during integration test
|
// CleanupNodes cleans all nodes which were created during integration test
|
||||||
@ -176,25 +185,39 @@ func PodDeleted(c clientset.Interface, podNamespace, podName string) wait.Condit
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncInformerFactory starts informer and waits for caches to be synced
|
// PodsCleanedUp returns true if all pods are deleted in the specific namespace.
|
||||||
func SyncInformerFactory(testCtx *TestContext) {
|
func PodsCleanedUp(ctx context.Context, c clientset.Interface, namespace string) wait.ConditionFunc {
|
||||||
testCtx.InformerFactory.Start(testCtx.Ctx.Done())
|
return func() (bool, error) {
|
||||||
if testCtx.DynInformerFactory != nil {
|
list, err := c.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
|
||||||
testCtx.DynInformerFactory.Start(testCtx.Ctx.Done())
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return len(list.Items) == 0, nil
|
||||||
}
|
}
|
||||||
testCtx.InformerFactory.WaitForCacheSync(testCtx.Ctx.Done())
|
}
|
||||||
|
|
||||||
|
// SyncSchedulerInformerFactory starts informer and waits for caches to be synced
|
||||||
|
func SyncSchedulerInformerFactory(testCtx *TestContext) {
|
||||||
|
testCtx.InformerFactory.Start(testCtx.SchedulerCtx.Done())
|
||||||
if testCtx.DynInformerFactory != nil {
|
if testCtx.DynInformerFactory != nil {
|
||||||
testCtx.DynInformerFactory.WaitForCacheSync(testCtx.Ctx.Done())
|
testCtx.DynInformerFactory.Start(testCtx.SchedulerCtx.Done())
|
||||||
|
}
|
||||||
|
testCtx.InformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done())
|
||||||
|
if testCtx.DynInformerFactory != nil {
|
||||||
|
testCtx.DynInformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanupTest cleans related resources which were created during integration test
|
// CleanupTest cleans related resources which were created during integration test
|
||||||
func CleanupTest(t *testing.T, testCtx *TestContext) {
|
func CleanupTest(t *testing.T, testCtx *TestContext) {
|
||||||
// Kill the scheduler.
|
// Cancel the context of the whole test environment, it will terminate the scheduler as well.
|
||||||
testCtx.CancelFn()
|
testCtx.CancelFn()
|
||||||
// Cleanup nodes.
|
|
||||||
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
// Cleanup nodes and namespaces.
|
||||||
|
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(testCtx.Ctx, *metav1.NewDeleteOptions(0), metav1.ListOptions{})
|
||||||
framework.DeleteNamespaceOrDie(testCtx.ClientSet, testCtx.NS, t)
|
framework.DeleteNamespaceOrDie(testCtx.ClientSet, testCtx.NS, t)
|
||||||
|
|
||||||
|
// Terminate the apiserver.
|
||||||
testCtx.CloseFn()
|
testCtx.CloseFn()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,11 +353,13 @@ func UpdateNodeStatus(cs clientset.Interface, node *v1.Node) error {
|
|||||||
|
|
||||||
// InitTestAPIServer initializes a test environment and creates an API server with default
|
// InitTestAPIServer initializes a test environment and creates an API server with default
|
||||||
// configuration.
|
// configuration.
|
||||||
|
// It registers cleanup functions to t.Cleanup(), they will be called when the test completes,
|
||||||
|
// no need to do this again.
|
||||||
func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
testCtx := TestContext{
|
testCtx := TestContext{
|
||||||
Ctx: ctx,
|
Ctx: ctx,
|
||||||
CancelFn: cancelFunc,
|
CancelFn: cancel,
|
||||||
}
|
}
|
||||||
|
|
||||||
testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{
|
testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
@ -354,6 +379,10 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf
|
|||||||
testCtx.NS = framework.CreateNamespaceOrDie(testCtx.ClientSet, "default", t)
|
testCtx.NS = framework.CreateNamespaceOrDie(testCtx.ClientSet, "default", t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
CleanupTest(t, &testCtx)
|
||||||
|
})
|
||||||
|
|
||||||
return &testCtx
|
return &testCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -388,6 +417,9 @@ func InitTestSchedulerWithOptions(
|
|||||||
resyncPeriod time.Duration,
|
resyncPeriod time.Duration,
|
||||||
opts ...scheduler.Option,
|
opts ...scheduler.Option,
|
||||||
) *TestContext {
|
) *TestContext {
|
||||||
|
ctx, cancel := context.WithCancel(testCtx.Ctx)
|
||||||
|
testCtx.SchedulerCtx = ctx
|
||||||
|
|
||||||
// 1. Create scheduler
|
// 1. Create scheduler
|
||||||
testCtx.InformerFactory = scheduler.NewInformerFactory(testCtx.ClientSet, resyncPeriod)
|
testCtx.InformerFactory = scheduler.NewInformerFactory(testCtx.ClientSet, resyncPeriod)
|
||||||
if testCtx.KubeConfig != nil {
|
if testCtx.KubeConfig != nil {
|
||||||
@ -406,7 +438,7 @@ func InitTestSchedulerWithOptions(
|
|||||||
testCtx.InformerFactory,
|
testCtx.InformerFactory,
|
||||||
testCtx.DynInformerFactory,
|
testCtx.DynInformerFactory,
|
||||||
profile.NewRecorderFactory(eventBroadcaster),
|
profile.NewRecorderFactory(eventBroadcaster),
|
||||||
testCtx.Ctx.Done(),
|
ctx.Done(),
|
||||||
opts...,
|
opts...,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -414,13 +446,19 @@ func InitTestSchedulerWithOptions(
|
|||||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster.StartRecordingToSink(testCtx.Ctx.Done())
|
eventBroadcaster.StartRecordingToSink(ctx.Done())
|
||||||
|
|
||||||
oldCloseFn := testCtx.CloseFn
|
oldCloseFn := testCtx.CloseFn
|
||||||
testCtx.CloseFn = func() {
|
testCtx.CloseFn = func() {
|
||||||
oldCloseFn()
|
oldCloseFn()
|
||||||
eventBroadcaster.Shutdown()
|
eventBroadcaster.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
testCtx.SchedulerCloseFn = func() {
|
||||||
|
cancel()
|
||||||
|
eventBroadcaster.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
return testCtx
|
return testCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -488,8 +526,8 @@ func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.Di
|
|||||||
// configuration.
|
// configuration.
|
||||||
func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Option) *TestContext {
|
func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Option) *TestContext {
|
||||||
testCtx := InitTestSchedulerWithOptions(t, InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
testCtx := InitTestSchedulerWithOptions(t, InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
||||||
SyncInformerFactory(testCtx)
|
SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.SchedulerCtx)
|
||||||
return testCtx
|
return testCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -512,8 +550,8 @@ func InitTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
|||||||
t, InitTestAPIServer(t, nsPrefix, nil),
|
t, InitTestAPIServer(t, nsPrefix, nil),
|
||||||
0,
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...))
|
scheduler.WithProfiles(cfg.Profiles...))
|
||||||
SyncInformerFactory(testCtx)
|
SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.SchedulerCtx)
|
||||||
return testCtx
|
return testCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1000,8 +1000,6 @@ func TestRescheduleProvisioning(t *testing.T) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
testCtx.CancelFn()
|
testCtx.CancelFn()
|
||||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||||
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
|
||||||
testCtx.CloseFn()
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ctrl, informerFactory, err := initPVController(t, testCtx, 0)
|
ctrl, informerFactory, err := initPVController(t, testCtx, 0)
|
||||||
@ -1049,7 +1047,7 @@ func TestRescheduleProvisioning(t *testing.T) {
|
|||||||
|
|
||||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||||
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
||||||
testutil.SyncInformerFactory(testCtx)
|
testutil.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
clientset := testCtx.ClientSet
|
clientset := testCtx.ClientSet
|
||||||
@ -1087,7 +1085,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
|
|||||||
teardown: func() {
|
teardown: func() {
|
||||||
klog.Infof("test cluster %q start to tear down", ns)
|
klog.Infof("test cluster %q start to tear down", ns)
|
||||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||||
testutil.CleanupTest(t, testCtx)
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node {
|
|||||||
|
|
||||||
func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||||
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
||||||
testutil.SyncInformerFactory(testCtx)
|
testutil.SyncSchedulerInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
clientset := testCtx.ClientSet
|
clientset := testCtx.ClientSet
|
||||||
@ -71,7 +71,6 @@ func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPe
|
|||||||
teardown: func() {
|
teardown: func() {
|
||||||
klog.Infof("test cluster %q start to tear down", ns)
|
klog.Infof("test cluster %q start to tear down", ns)
|
||||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||||
testutil.CleanupTest(t, testCtx)
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user