Migrated project

This commit is contained in:
zbian
2021-10-28 18:21:23 +02:00
parent 2ebaefc542
commit 404ecbdcc6
409 changed files with 35853 additions and 0 deletions

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=4,
mode='2d'
)
)

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
depth=2,
mode='2.5d'
)
)

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
mode='3d'
)
)

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
from pathlib import Path
import pytest
import torch.multiprocessing as mp
from colossalai import init_dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_2d_init.py').absolute()
def check_data_parallel_rank(rank):
if rank in [0, 1, 2, 3, 4, 5, 6, 7]:
assert gpc.get_local_rank(ParallelMode.DATA) == 0
elif rank in [8, 9, 10, 11, 12, 13, 14, 15]:
assert gpc.get_local_rank(ParallelMode.DATA) == 1
def check_pipeline_parallel_rank(rank):
if rank in [0, 1, 2, 3]:
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 0
elif rank in [4, 5, 6, 7]:
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 1
elif rank in [8, 9, 10, 11]:
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 0
elif rank in [12, 13, 14, 15]:
assert gpc.get_local_rank(ParallelMode.PIPELINE) == 1
def check_tensor_parallel_rank(rank):
if rank in [0, 4, 8, 12]:
assert gpc.get_local_rank(ParallelMode.TENSOR) == 0
elif rank in [1, 5, 9, 13]:
assert gpc.get_local_rank(ParallelMode.TENSOR) == 1
elif rank in [2, 6, 10, 14]:
assert gpc.get_local_rank(ParallelMode.TENSOR) == 2
elif rank in [3, 7, 11, 15]:
assert gpc.get_local_rank(ParallelMode.TENSOR) == 3
def check_2d_parallel_rank(rank):
if rank in [0, 4, 8, 12]:
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 0
elif rank in [1, 5, 9, 13]:
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 1
elif rank in [2, 6, 10, 14]:
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 1
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 0
elif rank in [3, 7, 11, 15]:
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 1
assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 1
def init_2d(local_rank, world_size, backend, port, host):
dist_args = dict(
config=CONFIG_PATH,
local_rank=local_rank,
world_size=world_size,
backend=backend,
port=port,
host=host
)
init_dist(**dist_args)
check_tensor_parallel_rank(local_rank)
check_data_parallel_rank(local_rank)
check_2d_parallel_rank(local_rank)
check_pipeline_parallel_rank(local_rank)
gpc.destroy()
@pytest.mark.cpu
def test_2d_init():
"""
As no computation or communication is done, we can run this test on CPU.
"""
world_size = 16
test_fn = partial(init_2d,
world_size=world_size,
backend='gloo',
port='29500',
host='localhost'
)
mp.spawn(test_fn, nprocs=world_size)
if __name__ == '__main__':
test_2d_init()

View File

@@ -0,0 +1,118 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
from pathlib import Path
import pytest
import torch.multiprocessing as mp
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import init_dist
CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_2p5d_init.py').absolute()
def check_data_parallel_rank(rank):
dp_rank = gpc.get_local_rank(ParallelMode.DATA)
if rank in list(range(16)):
assert dp_rank == 0
elif rank in list(range(16, 32)):
assert dp_rank == 1
def check_pipeline_parallel_rank(rank):
ppr = gpc.get_local_rank(ParallelMode.PIPELINE)
if rank in list(range(8)):
assert ppr == 0
elif rank in list(range(8, 16)):
assert ppr == 1
elif rank in list(range(16, 24)):
assert ppr == 0
elif rank in list(range(24, 32)):
assert ppr == 1
def check_tensor_parallel_rank(rank):
tp_rank = gpc.get_local_rank(ParallelMode.TENSOR)
for i in range(8):
ranks = list(range(i, 32, 8))
if rank in ranks:
assert tp_rank == i, f'{rank}:{tp_rank}'
def check_2p5d_parallel_rank(rank):
rp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
cp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
dp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
xp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ)
# check for row parallel group
for i in range(2):
ranks = list(range(i, 32, 2))
if rank in ranks:
assert rp_rank == i
# check for col parallel group
for i in range(2):
ranks = list(range(i * 2, 32, 4))
ranks_plus_ones = [val + 1 for val in ranks]
ranks.extend(ranks_plus_ones)
if rank in ranks:
assert cp_rank == i
# check for depth parallel group
for i in range(2):
ranks = []
for j in range(i * 4, 32, 8):
ranks.extend([j + k for k in range(4)])
if rank in ranks:
assert dp_rank == i
# check for xz parallel group
for i in range(2):
ranks = list(range(i * 2, 32, 8))
ranks_plus_one = [val + 1 for val in ranks]
ranks.extend(ranks_plus_one)
if rank in ranks:
assert xp_rank == i
def init_2halfd(local_rank, world_size, backend, port, host):
dist_args = dict(
config=CONFIG_PATH,
local_rank=local_rank,
world_size=world_size,
backend=backend,
port=port,
host=host
)
init_dist(**dist_args)
check_data_parallel_rank(local_rank)
check_pipeline_parallel_rank(local_rank)
check_tensor_parallel_rank(local_rank)
check_2p5d_parallel_rank(local_rank)
gpc.destroy()
@pytest.mark.cpu
def test_2halfd_init():
"""
As no computation or communication is done, we can run this test on CPU.
"""
world_size = 32
test_fn = partial(init_2halfd,
world_size=world_size,
backend='gloo',
port='29501',
host='localhost'
)
mp.spawn(test_fn, nprocs=world_size)
if __name__ == '__main__':
test_2halfd_init()

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
from pathlib import Path
import pytest
import torch.multiprocessing as mp
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import init_dist
CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_3d_init.py').absolute()
def check_data_parallel_rank(rank):
dp_rank = gpc.get_local_rank(ParallelMode.DATA)
if rank in list(range(16)):
assert dp_rank == 0
elif rank in list(range(16, 32)):
assert dp_rank == 1
def check_pipeline_parallel_rank(rank):
ppr = gpc.get_local_rank(ParallelMode.PIPELINE)
if rank in list(range(8)):
assert ppr == 0
elif rank in list(range(8, 16)):
assert ppr == 1
elif rank in list(range(16, 24)):
assert ppr == 0
elif rank in list(range(24, 32)):
assert ppr == 1
def check_tensor_parallel_rank(rank):
tp_rank = gpc.get_local_rank(ParallelMode.TENSOR)
for i in range(8):
ranks = list(range(i, 32, 8))
if rank in ranks:
assert tp_rank == i
def check_3d_parallel_rank(rank):
ip_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_INPUT)
wp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT)
op_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT)
# check for input parallel group
for i in range(2):
_ranks = list(range(i * 2, 32, 4))
_ranks_plus_one = [val + 1 for val in _ranks]
input_ranks = _ranks + _ranks_plus_one
if rank in input_ranks:
assert ip_rank == i
# check for weight parallel group
for i in range(2):
ranks = list(range(i, 32, 2))
if rank in ranks:
assert wp_rank == i
# check for output parallel group
for i in range(2):
ranks = []
for j in range(i * 4, 32, 8):
ranks.extend([j + k for k in range(4)])
if rank in ranks:
assert op_rank == i
def init_3d(local_rank, world_size, backend, port, host):
dist_args = dict(
config=CONFIG_PATH,
local_rank=local_rank,
world_size=world_size,
backend=backend,
port=port,
host=host
)
init_dist(**dist_args)
check_tensor_parallel_rank(local_rank)
check_3d_parallel_rank(local_rank)
check_data_parallel_rank(local_rank)
check_pipeline_parallel_rank(local_rank)
print('pass')
gpc.destroy()
@pytest.mark.cpu
def test_3d_init():
"""
As no computation or communication is done, we can run this test on CPU.
"""
world_size = 32
test_fn = partial(init_3d,
world_size=world_size,
backend='gloo',
port='29502',
host='localhost'
)
mp.spawn(test_fn, nprocs=world_size)
if __name__ == '__main__':
test_3d_init()