mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-13 13:11:05 +00:00
added CI for unit testing (#69)
This commit is contained in:
0
tests/test_layers/test_1d/checks_1d/__init__.py
Normal file
0
tests/test_layers/test_1d/checks_1d/__init__.py
Normal file
@@ -1,4 +1,3 @@
|
||||
from tests.test_layers.test_3d.common import IMG_SIZE
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch.nn import Parameter
|
||||
@@ -7,7 +6,7 @@ from colossalai.context.parallel_mode import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.nn import Linear1D_Col, Linear1D_Row, TransformerMLP1D, TransformerSelfAttention1D, ViTMLP1D, ViTSelfAttention1D, ViTPatchEmbedding1D, ViTHead1D, ViTTokenFuser1D
|
||||
from colossalai.utils import get_current_device, print_rank_0
|
||||
from common import HIDDEN_SIZE, DEPTH, BATCH_SIZE, SEQ_LENGTH, NUM_CLASSES, check_equal, IMG_SIZE
|
||||
from .common import HIDDEN_SIZE, DEPTH, BATCH_SIZE, SEQ_LENGTH, NUM_CLASSES, check_equal, IMG_SIZE
|
||||
|
||||
|
||||
def check_linear_col():
|
@@ -2,10 +2,13 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.initialize import launch, get_default_parser
|
||||
from test_layer import *
|
||||
from functools import partial
|
||||
from checks_1d.check_layer_1d import *
|
||||
|
||||
CONFIG = dict(
|
||||
parallel=dict(
|
||||
@@ -18,8 +21,14 @@ CONFIG = dict(
|
||||
)
|
||||
|
||||
|
||||
def check_layer():
|
||||
# print_rank_0('start check_linear_col')
|
||||
def check_layer(rank, world_size):
|
||||
launch(config=CONFIG,
|
||||
rank=rank,
|
||||
world_size=world_size,
|
||||
host='localhost',
|
||||
port=29920,
|
||||
backend='nccl')
|
||||
|
||||
check_linear_col()
|
||||
check_linear_row()
|
||||
check_attention()
|
||||
@@ -28,21 +37,15 @@ def check_layer():
|
||||
check_embed()
|
||||
check_head()
|
||||
|
||||
gpc.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
|
||||
def test_1d():
|
||||
parser = get_default_parser()
|
||||
args = parser.parse_args()
|
||||
launch(config=CONFIG,
|
||||
rank=args.rank,
|
||||
world_size=args.world_size,
|
||||
host=args.host,
|
||||
port=args.port,
|
||||
backend=args.backend)
|
||||
|
||||
check_layer()
|
||||
gpc.destroy()
|
||||
world_size = 2
|
||||
run_func = partial(check_layer, world_size=world_size)
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Reference in New Issue
Block a user