mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2026-05-01 20:33:02 +00:00
* Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> * Split conv2d, class token, positional embedding in 2d, Fix random number in ddp Fix convergence in cifar10, Imagenet1000 * Integrate 1d tensor parallel in Colossal-AI (#39) * fixed 1D and 2D convergence (#38) * optimized 2D operations * fixed 1D ViT convergence problem * Feature/ddp (#49) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * support torch ddp * fix loss accumulation * add log for ddp * change seed * modify timing hook Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * Feature/pipeline (#40) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * optimize communication of pipeline parallel * fix grad clip for pipeline Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51) * Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset * update api for better usability (#58) update api for better usability Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
97 lines
3.0 KiB
Python
97 lines
3.0 KiB
Python
#!/usr/bin/env python
|
|
# -*- encoding: utf-8 -*-
|
|
|
|
from functools import partial
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
import torch.multiprocessing as mp
|
|
|
|
from colossalai import launch
|
|
from colossalai.context.parallel_mode import ParallelMode
|
|
from colossalai.core import global_context as gpc
|
|
|
|
CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_2d_init.py').absolute()
|
|
|
|
|
|
def check_data_parallel_rank(rank):
|
|
if rank in [0, 1, 2, 3, 4, 5, 6, 7]:
|
|
assert gpc.get_local_rank(ParallelMode.DATA) == 0
|
|
elif rank in [8, 9, 10, 11, 12, 13, 14, 15]:
|
|
assert gpc.get_local_rank(ParallelMode.DATA) == 1
|
|
|
|
|
|
def check_pipeline_parallel_rank(rank):
|
|
if rank in [0, 1, 2, 3]:
|
|
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 0
|
|
elif rank in [4, 5, 6, 7]:
|
|
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 1
|
|
elif rank in [8, 9, 10, 11]:
|
|
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 0
|
|
elif rank in [12, 13, 14, 15]:
|
|
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 1
|
|
|
|
|
|
def check_tensor_parallel_rank(rank):
|
|
if rank in [0, 4, 8, 12]:
|
|
assert gpc.get_local_rank(ParallelMode.TENSOR) == 0
|
|
elif rank in [1, 5, 9, 13]:
|
|
assert gpc.get_local_rank(ParallelMode.TENSOR) == 1
|
|
elif rank in [2, 6, 10, 14]:
|
|
assert gpc.get_local_rank(ParallelMode.TENSOR) == 2
|
|
elif rank in [3, 7, 11, 15]:
|
|
assert gpc.get_local_rank(ParallelMode.TENSOR) == 3
|
|
|
|
|
|
def check_2d_parallel_rank(rank):
|
|
if rank in [0, 4, 8, 12]:
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 0
|
|
elif rank in [1, 5, 9, 13]:
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 1
|
|
elif rank in [2, 6, 10, 14]:
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 1
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 0
|
|
elif rank in [3, 7, 11, 15]:
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 1
|
|
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 1
|
|
|
|
|
|
def init_2d(rank, world_size, backend, port, host):
|
|
dist_args = dict(
|
|
config=CONFIG_PATH,
|
|
rank=rank,
|
|
world_size=world_size,
|
|
backend=backend,
|
|
port=port,
|
|
host=host,
|
|
verbose=True
|
|
)
|
|
launch(**dist_args)
|
|
|
|
check_tensor_parallel_rank(rank)
|
|
check_data_parallel_rank(rank)
|
|
check_2d_parallel_rank(rank)
|
|
check_pipeline_parallel_rank(rank)
|
|
gpc.destroy()
|
|
|
|
|
|
@pytest.mark.cpu
|
|
def test_2d_init():
|
|
"""
|
|
As no computation or communication is done, we can run this test on CPU.
|
|
"""
|
|
world_size = 16
|
|
test_fn = partial(init_2d,
|
|
world_size=world_size,
|
|
backend='gloo',
|
|
port='29500',
|
|
host='localhost'
|
|
)
|
|
mp.spawn(test_fn, nprocs=world_size)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
test_2d_init()
|