added CI for unit testing (#69)

This commit is contained in:
Frank Lee
2021-12-16 10:32:08 +08:00
committed by GitHub
parent 45355a62f7
commit cd9c28e055
68 changed files with 1089 additions and 766 deletions

View File

@@ -1,15 +1,15 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
from pathlib import Path
import pytest
import torch
import torch.multiprocessing as mp
from colossalai import launch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from functools import partial
from pathlib import Path
CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_2d_init.py').absolute()
@@ -75,6 +75,7 @@ def init_2d(rank, world_size, backend, port, host):
check_2d_parallel_rank(rank)
check_pipeline_parallel_rank(rank)
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.cpu
@@ -86,7 +87,7 @@ def test_2d_init():
test_fn = partial(init_2d,
world_size=world_size,
backend='gloo',
port='29500',
port='29900',
host='localhost'
)
mp.spawn(test_fn, nprocs=world_size)

View File

@@ -5,6 +5,7 @@ from functools import partial
from pathlib import Path
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.context.parallel_mode import ParallelMode
@@ -98,6 +99,7 @@ def init_2halfd(rank, world_size, backend, port, host):
check_tensor_parallel_rank(rank)
check_2p5d_parallel_rank(rank)
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.cpu
@@ -109,7 +111,7 @@ def test_2halfd_init():
test_fn = partial(init_2halfd,
world_size=world_size,
backend='gloo',
port='29501',
port='29901',
host='localhost'
)
mp.spawn(test_fn, nprocs=world_size)

View File

@@ -5,8 +5,10 @@ from functools import partial
from pathlib import Path
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
@@ -90,6 +92,7 @@ def init_3d(rank, world_size, backend, port, host):
check_data_parallel_rank(rank)
check_pipeline_parallel_rank(rank)
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.cpu
@@ -101,7 +104,7 @@ def test_3d_init():
test_fn = partial(init_3d,
world_size=world_size,
backend='gloo',
port='29502',
port='29902',
host='localhost'
)
mp.spawn(test_fn, nprocs=world_size)