mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-24 03:03:37 +00:00
Hotfix/Colossalai layers (#92)
* optimized 1d layer apis; reorganized nn.layer modules; fixed tests * fixed 2.5d runtime issue * reworked split batch, now called in trainer.schedule.load_batch Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
This commit is contained in:
@@ -4,10 +4,11 @@
|
||||
import pytest
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
from colossalai.initialize import launch, get_default_parser
|
||||
from colossalai.initialize import launch
|
||||
from colossalai.logging import get_dist_logger
|
||||
from checks_seq.check_layer_seq import *
|
||||
from functools import partial
|
||||
from colossalai.utils import free_port
|
||||
|
||||
|
||||
CONFIG = dict(
|
||||
@@ -22,13 +23,13 @@ def check_layer():
|
||||
check_selfattention()
|
||||
|
||||
|
||||
def run_check_sequence(rank, world_size):
|
||||
def run_check_sequence(rank, world_size, port):
|
||||
# init dist
|
||||
launch(config=CONFIG,
|
||||
rank=rank,
|
||||
world_size=world_size,
|
||||
host='localhost',
|
||||
port=29924,
|
||||
port=port,
|
||||
backend='nccl')
|
||||
logger = get_dist_logger()
|
||||
logger.info('Distributed environment is initialzied.', ranks=[0])
|
||||
@@ -41,7 +42,7 @@ def run_check_sequence(rank, world_size):
|
||||
@pytest.mark.dist
|
||||
def test_sequence():
|
||||
world_size = 4
|
||||
run_func = partial(run_check_sequence, world_size=world_size)
|
||||
run_func = partial(run_check_sequence, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user