mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Teardown only scheduler in integration tests
Signed-off-by: Kante Yin <kerthcet@gmail.com>
This commit is contained in:
parent
d86b74b017
commit
2d866ec2fc
@ -104,9 +104,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
||||
testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
@ -263,7 +260,6 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
podTolerations.SetExternalKubeClientSet(externalClientset)
|
||||
podTolerations.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
// Start NodeLifecycleController for taint.
|
||||
|
@ -77,7 +77,6 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
||||
testCtx := setup(t, "podgc-orphaned")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
node := &v1.Node{
|
||||
@ -180,7 +179,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)()
|
||||
testCtx := setup(t, "podgc-out-of-service")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
node := &v1.Node{
|
||||
|
@ -28,9 +28,7 @@ import (
|
||||
// TestDefaultBinder tests the binding process in the scheduler.
|
||||
func TestDefaultBinder(t *testing.T) {
|
||||
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, "", nil), 0)
|
||||
testutil.SyncInformerFactory(testCtx)
|
||||
// Do not start scheduler routine.
|
||||
defer testutil.CleanupTest(t, testCtx)
|
||||
testutil.SyncSchedulerInformerFactory(testCtx)
|
||||
|
||||
// Add a node.
|
||||
node, err := testutil.CreateNode(testCtx.ClientSet, st.MakeNode().Name("testnode").Obj())
|
||||
|
@ -354,9 +354,8 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
}
|
||||
|
||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithExtenders(extenders...))
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
DoTestPodScheduling(testCtx.NS, t, clientSet)
|
||||
}
|
||||
|
@ -66,7 +66,6 @@ var (
|
||||
// anti-affinity predicate functions works correctly.
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
testCtx := initTest(t, "")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Add a few nodes with labels
|
||||
nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2)
|
||||
@ -990,7 +989,6 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testCtx := initTest(t, "")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Add a few nodes with labels
|
||||
nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2)
|
||||
@ -1492,7 +1490,6 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
testCtx := initTest(t, "pts-predicate")
|
||||
cs := testCtx.ClientSet
|
||||
ns := testCtx.NS.Name
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
for i := range tt.nodes {
|
||||
if _, err := createNode(cs, tt.nodes[i]); err != nil {
|
||||
@ -1761,7 +1758,6 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tt.enableReadWriteOncePod)()
|
||||
|
||||
testCtx := initTest(t, "scheduler-informer")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
if tt.init != nil {
|
||||
if err := tt.init(testCtx.ClientSet, testCtx.NS.Name); err != nil {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -178,10 +178,9 @@ func TestPreemption(t *testing.T) {
|
||||
0,
|
||||
scheduler.WithProfiles(cfg.Profiles...),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
@ -501,7 +500,6 @@ func TestNonPreemption(t *testing.T) {
|
||||
var preemptNever = v1.PreemptNever
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "non-preemption")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -579,7 +577,6 @@ func TestNonPreemption(t *testing.T) {
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Initialize scheduler, and disable preemption.
|
||||
testCtx := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
@ -659,7 +656,6 @@ func TestDisablePreemption(t *testing.T) {
|
||||
func TestPodPriorityResolution(t *testing.T) {
|
||||
admission := priority.NewPlugin()
|
||||
testCtx := testutils.InitTestScheduler(t, testutils.InitTestAPIServer(t, "preemption", admission))
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
@ -671,7 +667,7 @@ func TestPodPriorityResolution(t *testing.T) {
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
// Waiting for all controllers to sync
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
externalInformers.Start(testCtx.Ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
|
||||
@ -780,7 +776,6 @@ func mkPriorityPodWithGrace(tc *testutils.TestContext, name string, priority int
|
||||
func TestPreemptionStarvation(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "preemption")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
@ -879,7 +874,6 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
func TestPreemptionRaces(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "preemption-race")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
@ -1136,9 +1130,6 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
scheduler.WithProfiles(cfg.Profiles...),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(tt.outOfTreeRegistry),
|
||||
)
|
||||
t.Cleanup(func() {
|
||||
testutils.CleanupTest(t, testCtx)
|
||||
})
|
||||
|
||||
cs, ns := testCtx.ClientSet, testCtx.NS.Name
|
||||
// Create a node with the specified capacity.
|
||||
@ -1227,7 +1218,6 @@ func addPodConditionReady(pod *v1.Pod) {
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "preemption-pdb")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
initDisruptionController(t, testCtx)
|
||||
@ -1480,7 +1470,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
|
||||
func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext {
|
||||
testCtx := testutils.InitTestSchedulerWithOptions(t, testutils.InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
// wraps the NextPod() method to make it appear the preemption has been done already and the nominated node has been set.
|
||||
f := testCtx.Scheduler.NextPod
|
||||
testCtx.Scheduler.NextPod = func() (podInfo *framework.QueuedPodInfo) {
|
||||
@ -1561,9 +1551,6 @@ func TestPreferNominatedNode(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testCtx := initTestPreferNominatedNode(t, "perfer-nominated-node")
|
||||
t.Cleanup(func() {
|
||||
testutils.CleanupTest(t, testCtx)
|
||||
})
|
||||
cs := testCtx.ClientSet
|
||||
nsName := testCtx.NS.Name
|
||||
var err error
|
||||
@ -1637,10 +1624,9 @@ func TestReadWriteOncePodPreemption(t *testing.T) {
|
||||
testutils.InitTestAPIServer(t, "preemption", nil),
|
||||
0,
|
||||
scheduler.WithProfiles(cfg.Profiles...))
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
storage := v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}
|
||||
|
@ -124,8 +124,7 @@ func TestSchedulingGates(t *testing.T) {
|
||||
scheduler.WithPodInitialBackoffSeconds(0),
|
||||
scheduler.WithPodMaxBackoffSeconds(0),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
|
||||
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
||||
for _, p := range tt.pods {
|
||||
@ -186,9 +185,8 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
scheduler.WithPodInitialBackoffSeconds(0),
|
||||
scheduler.WithPodMaxBackoffSeconds(0),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
defer testCtx.Scheduler.SchedulingQueue.Close()
|
||||
|
||||
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
||||
@ -371,7 +369,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
|
||||
scheduler.WithPodInitialBackoffSeconds(0),
|
||||
scheduler.WithPodMaxBackoffSeconds(0),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
|
@ -51,7 +51,6 @@ type nodeStateManager struct {
|
||||
|
||||
func TestUnschedulableNodes(t *testing.T) {
|
||||
testCtx := initTest(t, "unschedulable-nodes")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister()
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
@ -191,7 +190,6 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
|
||||
// 1. create and start default-scheduler
|
||||
testCtx := initTest(t, "multi-scheduler")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// 2. create a node
|
||||
node := &v1.Node{
|
||||
@ -263,7 +261,7 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithProfiles(cfg.Profiles...))
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
|
||||
// 6. **check point-2**:
|
||||
@ -285,7 +283,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
||||
})
|
||||
|
||||
testCtx := initTest(t, "multi-scheduler", scheduler.WithProfiles(cfg.Profiles...))
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
@ -349,7 +346,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
||||
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
||||
func TestAllocatable(t *testing.T) {
|
||||
testCtx := initTest(t, "allocatable")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// 2. create a node without allocatable awareness
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
@ -423,7 +419,6 @@ func TestAllocatable(t *testing.T) {
|
||||
func TestSchedulerInformers(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "scheduler-informer")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
@ -526,7 +521,6 @@ func TestNodeEvents(t *testing.T) {
|
||||
// 4. Remove the taint from node2; pod2 should now schedule on node2
|
||||
|
||||
testCtx := initTest(t, "node-events")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
|
||||
// 1.1 create pod1
|
||||
|
@ -95,7 +95,7 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes
|
||||
0,
|
||||
scheduler.WithProfiles(cfg.Profiles...),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
return testCtx
|
||||
}
|
||||
@ -131,7 +131,7 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
||||
0,
|
||||
scheduler.WithProfiles(cfg.Profiles...),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
return testCtx
|
||||
}
|
||||
@ -140,7 +140,6 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
||||
// works correctly.
|
||||
func TestNodeResourcesScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForNodeResourcesTest(t)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
@ -204,7 +203,6 @@ func TestNodeResourcesScoring(t *testing.T) {
|
||||
// works correctly.
|
||||
func TestNodeAffinityScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode(), 4)
|
||||
if err != nil {
|
||||
@ -324,7 +322,6 @@ func TestPodAffinityScoring(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodesInTopology, err := createAndWaitForNodesInCache(testCtx, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5)
|
||||
if err != nil {
|
||||
@ -369,7 +366,6 @@ func TestPodAffinityScoring(t *testing.T) {
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocalityScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Create a node with the large image.
|
||||
// We use a fake large image as the test image used by the pod, which has
|
||||
@ -602,7 +598,6 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)()
|
||||
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
ns := testCtx.NS.Name
|
||||
|
||||
@ -653,9 +648,6 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
||||
// The setup has 300 nodes over 3 zones.
|
||||
func TestDefaultPodTopologySpreadScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||
t.Cleanup(func() {
|
||||
testutils.CleanupTest(t, testCtx)
|
||||
})
|
||||
cs := testCtx.ClientSet
|
||||
ns := testCtx.NS.Name
|
||||
|
||||
|
@ -79,7 +79,6 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
testCtx = testutils.InitTestScheduler(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
cs := testCtx.ClientSet
|
||||
nsName := testCtx.NS.Name
|
||||
@ -108,7 +107,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
// Waiting for all controllers to sync
|
||||
externalInformers.Start(testCtx.Ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
testutils.SyncSchedulerInformerFactory(testCtx)
|
||||
|
||||
// Run all controllers
|
||||
go nc.Run(testCtx.Ctx)
|
||||
|
@ -143,15 +143,24 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface) {
|
||||
|
||||
// TestContext store necessary context info
|
||||
type TestContext struct {
|
||||
CloseFn framework.TearDownFunc
|
||||
NS *v1.Namespace
|
||||
ClientSet clientset.Interface
|
||||
KubeConfig *restclient.Config
|
||||
InformerFactory informers.SharedInformerFactory
|
||||
DynInformerFactory dynamicinformer.DynamicSharedInformerFactory
|
||||
Scheduler *scheduler.Scheduler
|
||||
Ctx context.Context
|
||||
CancelFn context.CancelFunc
|
||||
// This is the top context when initializing the test environment.
|
||||
Ctx context.Context
|
||||
// CancelFn will cancel the context above.
|
||||
CancelFn context.CancelFunc
|
||||
// CloseFn will stop the apiserver and clean up the resources
|
||||
// after itself, including shutting down its storage layer.
|
||||
CloseFn framework.TearDownFunc
|
||||
// This is the context when initializing scheduler.
|
||||
SchedulerCtx context.Context
|
||||
// SchedulerCloseFn will tear down the resources in creating scheduler,
|
||||
// including the scheduler itself.
|
||||
SchedulerCloseFn framework.TearDownFunc
|
||||
}
|
||||
|
||||
// CleanupNodes cleans all nodes which were created during integration test
|
||||
@ -176,25 +185,39 @@ func PodDeleted(c clientset.Interface, podNamespace, podName string) wait.Condit
|
||||
}
|
||||
}
|
||||
|
||||
// SyncInformerFactory starts informer and waits for caches to be synced
|
||||
func SyncInformerFactory(testCtx *TestContext) {
|
||||
testCtx.InformerFactory.Start(testCtx.Ctx.Done())
|
||||
if testCtx.DynInformerFactory != nil {
|
||||
testCtx.DynInformerFactory.Start(testCtx.Ctx.Done())
|
||||
// PodsCleanedUp returns true if all pods are deleted in the specific namespace.
|
||||
func PodsCleanedUp(ctx context.Context, c clientset.Interface, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
list, err := c.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(list.Items) == 0, nil
|
||||
}
|
||||
testCtx.InformerFactory.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
}
|
||||
|
||||
// SyncSchedulerInformerFactory starts informer and waits for caches to be synced
|
||||
func SyncSchedulerInformerFactory(testCtx *TestContext) {
|
||||
testCtx.InformerFactory.Start(testCtx.SchedulerCtx.Done())
|
||||
if testCtx.DynInformerFactory != nil {
|
||||
testCtx.DynInformerFactory.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
testCtx.DynInformerFactory.Start(testCtx.SchedulerCtx.Done())
|
||||
}
|
||||
testCtx.InformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done())
|
||||
if testCtx.DynInformerFactory != nil {
|
||||
testCtx.DynInformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done())
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupTest cleans related resources which were created during integration test
|
||||
func CleanupTest(t *testing.T, testCtx *TestContext) {
|
||||
// Kill the scheduler.
|
||||
// Cancel the context of the whole test environment, it will terminate the scheduler as well.
|
||||
testCtx.CancelFn()
|
||||
// Cleanup nodes.
|
||||
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
|
||||
// Cleanup nodes and namespaces.
|
||||
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(testCtx.Ctx, *metav1.NewDeleteOptions(0), metav1.ListOptions{})
|
||||
framework.DeleteNamespaceOrDie(testCtx.ClientSet, testCtx.NS, t)
|
||||
|
||||
// Terminate the apiserver.
|
||||
testCtx.CloseFn()
|
||||
}
|
||||
|
||||
@ -330,11 +353,13 @@ func UpdateNodeStatus(cs clientset.Interface, node *v1.Node) error {
|
||||
|
||||
// InitTestAPIServer initializes a test environment and creates an API server with default
|
||||
// configuration.
|
||||
// It registers cleanup functions to t.Cleanup(), they will be called when the test completes,
|
||||
// no need to do this again.
|
||||
func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
testCtx := TestContext{
|
||||
Ctx: ctx,
|
||||
CancelFn: cancelFunc,
|
||||
CancelFn: cancel,
|
||||
}
|
||||
|
||||
testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{
|
||||
@ -354,6 +379,10 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf
|
||||
testCtx.NS = framework.CreateNamespaceOrDie(testCtx.ClientSet, "default", t)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
CleanupTest(t, &testCtx)
|
||||
})
|
||||
|
||||
return &testCtx
|
||||
}
|
||||
|
||||
@ -388,6 +417,9 @@ func InitTestSchedulerWithOptions(
|
||||
resyncPeriod time.Duration,
|
||||
opts ...scheduler.Option,
|
||||
) *TestContext {
|
||||
ctx, cancel := context.WithCancel(testCtx.Ctx)
|
||||
testCtx.SchedulerCtx = ctx
|
||||
|
||||
// 1. Create scheduler
|
||||
testCtx.InformerFactory = scheduler.NewInformerFactory(testCtx.ClientSet, resyncPeriod)
|
||||
if testCtx.KubeConfig != nil {
|
||||
@ -406,7 +438,7 @@ func InitTestSchedulerWithOptions(
|
||||
testCtx.InformerFactory,
|
||||
testCtx.DynInformerFactory,
|
||||
profile.NewRecorderFactory(eventBroadcaster),
|
||||
testCtx.Ctx.Done(),
|
||||
ctx.Done(),
|
||||
opts...,
|
||||
)
|
||||
|
||||
@ -414,13 +446,19 @@ func InitTestSchedulerWithOptions(
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
|
||||
eventBroadcaster.StartRecordingToSink(testCtx.Ctx.Done())
|
||||
eventBroadcaster.StartRecordingToSink(ctx.Done())
|
||||
|
||||
oldCloseFn := testCtx.CloseFn
|
||||
testCtx.CloseFn = func() {
|
||||
oldCloseFn()
|
||||
eventBroadcaster.Shutdown()
|
||||
}
|
||||
|
||||
testCtx.SchedulerCloseFn = func() {
|
||||
cancel()
|
||||
eventBroadcaster.Shutdown()
|
||||
}
|
||||
|
||||
return testCtx
|
||||
}
|
||||
|
||||
@ -488,8 +526,8 @@ func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.Di
|
||||
// configuration.
|
||||
func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Option) *TestContext {
|
||||
testCtx := InitTestSchedulerWithOptions(t, InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
||||
SyncInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.SchedulerCtx)
|
||||
return testCtx
|
||||
}
|
||||
|
||||
@ -512,8 +550,8 @@ func InitTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
t, InitTestAPIServer(t, nsPrefix, nil),
|
||||
0,
|
||||
scheduler.WithProfiles(cfg.Profiles...))
|
||||
SyncInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.SchedulerCtx)
|
||||
return testCtx
|
||||
}
|
||||
|
||||
|
@ -1000,8 +1000,6 @@ func TestRescheduleProvisioning(t *testing.T) {
|
||||
defer func() {
|
||||
testCtx.CancelFn()
|
||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
testCtx.CloseFn()
|
||||
}()
|
||||
|
||||
ctrl, informerFactory, err := initPVController(t, testCtx, 0)
|
||||
@ -1049,7 +1047,7 @@ func TestRescheduleProvisioning(t *testing.T) {
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
||||
testutil.SyncInformerFactory(testCtx)
|
||||
testutil.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
|
||||
clientset := testCtx.ClientSet
|
||||
@ -1087,7 +1085,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
|
||||
teardown: func() {
|
||||
klog.Infof("test cluster %q start to tear down", ns)
|
||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||
testutil.CleanupTest(t, testCtx)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node {
|
||||
|
||||
func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
||||
testutil.SyncInformerFactory(testCtx)
|
||||
testutil.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
|
||||
clientset := testCtx.ClientSet
|
||||
@ -71,7 +71,6 @@ func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPe
|
||||
teardown: func() {
|
||||
klog.Infof("test cluster %q start to tear down", ns)
|
||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||
testutil.CleanupTest(t, testCtx)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user