[hotfix] ZeroDDP use new process group (#1333)

* process group supports getting ranks in group

* chunk mgr receives a process group

* update unit test

* fix unit tests
This commit is contained in:
ver217
2022-07-18 14:14:52 +08:00
committed by GitHub
parent 11d1436a67
commit 0c51ff2c13
9 changed files with 49 additions and 43 deletions

View File

@@ -33,11 +33,11 @@ def init_ddp(module: torch.nn.Module) -> ColoDDP:
def init_ddpv2(module: torch.nn.Module, use_chunk: bool = False) -> ZeroDDP:
chunk_size = ChunkManager.search_chunk_size(module, 64, 2) if use_chunk else None
chunk_manager = ChunkManager(chunk_size)
gemini_manager = GeminiManager('cuda', chunk_manager)
pg = ProcessGroup()
return ZeroDDP(module, gemini_manager, pg)
chunk_size = ChunkManager.search_chunk_size(module, 64, 2) if use_chunk else None
chunk_manager = ChunkManager(chunk_size, pg)
gemini_manager = GeminiManager('cuda', chunk_manager)
return ZeroDDP(module, gemini_manager)
class Net(torch.nn.Module):

View File

@@ -28,11 +28,11 @@ def init_ddp(module: torch.nn.Module) -> ColoDDP:
def init_ddpv2(module: torch.nn.Module, use_chunk: bool = False, use_zero: bool = False) -> ZeroDDP:
chunk_size = ChunkManager.search_chunk_size(module, 64, 4) if use_chunk else None
chunk_manager = ChunkManager(chunk_size, enable_distributed_storage=use_zero)
gemini_manager = GeminiManager('cuda', chunk_manager)
pg = ProcessGroup()
return ZeroDDP(module, gemini_manager, process_group=pg)
chunk_size = ChunkManager.search_chunk_size(module, 64, 4) if use_chunk else None
chunk_manager = ChunkManager(chunk_size, pg, enable_distributed_storage=use_zero)
gemini_manager = GeminiManager('cuda', chunk_manager)
return ZeroDDP(module, gemini_manager)
def run_state_dict(ddp_init_func: Callable[[torch.nn.Module], ColoDDP]):

View File

@@ -7,8 +7,7 @@ from functools import partial
from colossalai.gemini import ChunkManager
from colossalai.testing import rerun_if_address_is_in_use, parameterize
from colossalai.utils import free_port
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from colossalai.tensor import ProcessGroup as ColoProcessGroup
def check_has_params(params: List[torch.Tensor], has_tensors: List[bool]):
@@ -38,12 +37,13 @@ TOTAL_MEM = {True: {True: [512, 512], False: [1024, 1024]}, False: {True: [512,
@parameterize('use_chunk', [False, True])
@parameterize('use_zero', [False, True])
def run_chunk_zero(use_chunk, use_zero):
rank = gpc.get_local_rank(ParallelMode.DATA)
pg = ColoProcessGroup()
rank = pg.rank()
if rank == 0:
print(f'use_chunk={use_chunk}, use_zero={use_zero}')
params = [torch.rand(8, 8) for _ in range(3)]
chunk_size = 128 if use_chunk else None
chunk_manager = ChunkManager(chunk_size, enable_distributed_storage=use_zero)
chunk_manager = ChunkManager(chunk_size, pg, enable_distributed_storage=use_zero)
chunk_manager.create_group('param')
assert chunk_manager.total_mem['cpu'] == 0
assert chunk_manager.total_mem['cuda'] == 0

View File

@@ -31,8 +31,6 @@ def check_param_equal(model, torch_model, pg: ProcessGroup):
def check_grad_equal(model, torch_model, pg: ProcessGroup):
for (n, p), (tn, tp) in zip(model.named_parameters(), torch_model.named_parameters()):
if p.grad is not None:
torch.distributed.barrier()
print(torch.distributed.get_rank(), p.grad)
assert tensor_shard_equal(tp.grad.to(dtype=p.grad.dtype, device=p.grad.device), p.grad,
pg.tp_local_rank(), pg.tp_world_size()), \
f'{tp.grad} vs {p.grad}\n{n}:\n\t{tp.grad.shape} vs {p.grad.shape} in {pg.rank()}'
@@ -63,9 +61,9 @@ def init_1d_col_spec(model, pg: ProcessGroup):
p.set_tensor_spec(*spec)
@parameterize('use_chunk', [False])
@parameterize('use_zero', [False])
@parameterize('placement_policy', ['cuda'])
@parameterize('use_chunk', [False, True])
@parameterize('use_zero', [False, True])
@parameterize('placement_policy', ['cuda', 'cpu'])
def run_gpt(use_chunk, use_zero, placement_policy, tp_init_spec_func=None):
set_seed(42)
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
@@ -92,10 +90,11 @@ def run_gpt(use_chunk, use_zero, placement_policy, tp_init_spec_func=None):
chunk_size = ChunkManager.search_chunk_size(model, 8192, 8) if use_chunk else None
chunk_manager = ChunkManager(chunk_size,
pg,
enable_distributed_storage=use_zero,
init_device=GeminiManager.get_default_device(placement_policy))
gemini_manager = GeminiManager(placement_policy, chunk_manager)
model = ZeroDDP(model, gemini_manager, pg)
model = ZeroDDP(model, gemini_manager)
optim = HybridAdam(model.parameters(), lr=1e-3)
optim = ZeroOptimizer(optim, model, initial_scale=1)
@@ -104,7 +103,7 @@ def run_gpt(use_chunk, use_zero, placement_policy, tp_init_spec_func=None):
torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
torch_model = DDP(torch_model, device_ids=[pg.rank()], process_group=pg.dp_process_group())
# print(chunk_manager)
print(chunk_manager)
check_param_equal(model, torch_model, pg)
model.eval()
@@ -129,13 +128,12 @@ def run_dist(rank, world_size, port):
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
if world_size == 4:
run_gpt(tp_init_spec_func=init_1d_col_spec)
# run_gpt(tp_init_spec_func=init_1d_row_spec)
run_gpt(tp_init_spec_func=init_1d_row_spec)
else:
run_gpt(tp_init_spec_func=init_1d_col_spec)
@pytest.mark.dist
@pytest.mark.skip("buggy test")
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_gpt(world_size):

View File

@@ -20,13 +20,14 @@ from colossalai.tensor import ProcessGroup
def init_zero(model, use_chunk, use_zero, placement_policy):
pg = ProcessGroup()
chunk_size = ChunkManager.search_chunk_size(model, 8192, 8) if use_chunk else None
chunk_manager = ChunkManager(chunk_size,
pg,
enable_distributed_storage=use_zero,
init_device=GeminiManager.get_default_device(placement_policy))
gemini_manager = GeminiManager(placement_policy, chunk_manager)
pg = ProcessGroup()
return ZeroDDP(model, gemini_manager, pg)
return ZeroDDP(model, gemini_manager)
def run_step(model, optim, criterion, data, label):