mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-07-05 03:26:48 +00:00
* [gemini] remove distributed-related part from colotensor (#4379) * [gemini] remove process group dependency * [gemini] remove tp part from colo tensor * [gemini] patch inplace op * [gemini] fix param op hook and update tests * [test] remove useless tests * [test] remove useless tests * [misc] fix requirements * [test] fix model zoo * [test] fix model zoo * [test] fix model zoo * [test] fix model zoo * [test] fix model zoo * [misc] update requirements * [gemini] refactor gemini optimizer and gemini ddp (#4398) * [gemini] update optimizer interface * [gemini] renaming gemini optimizer * [gemini] refactor gemini ddp class * [example] update gemini related example * [example] update gemini related example * [plugin] fix gemini plugin args * [test] update gemini ckpt tests * [gemini] fix checkpoint io * [example] fix opt example requirements * [example] fix opt example * [example] fix opt example * [example] fix opt example * [gemini] add static placement policy (#4443) * [gemini] add static placement policy * [gemini] fix param offload * [test] update gemini tests * [plugin] update gemini plugin * [plugin] update gemini plugin docstr * [misc] fix flash attn requirement * [test] fix gemini checkpoint io test * [example] update resnet example result (#4457) * [example] update bert example result (#4458) * [doc] update gemini doc (#4468) * [example] update gemini related examples (#4473) * [example] update gpt example * [example] update dreambooth example * [example] update vit * [example] update opt * [example] update palm * [example] update vit and opt benchmark * [hotfix] fix bert in model zoo (#4480) * [hotfix] fix bert in model zoo * [test] remove chatglm gemini test * [test] remove sam gemini test * [test] remove vit gemini test * [hotfix] fix opt tutorial example (#4497) * [hotfix] fix opt tutorial example * [hotfix] fix opt tutorial example
67 lines
2.4 KiB
Python
67 lines
2.4 KiB
Python
import pytest
|
|
import torch
|
|
|
|
import colossalai
|
|
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
|
from colossalai.utils import get_current_device
|
|
from colossalai.zero.gemini.chunk import init_chunk_manager, search_chunk_configuration
|
|
from tests.components_to_test.registry import non_distributed_component_funcs
|
|
|
|
|
|
def exam_search_chunk_size():
|
|
world_size = torch.distributed.get_world_size()
|
|
|
|
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
|
|
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
|
|
|
# make sure torch_model and model has the same parameter values
|
|
model = model_builder()
|
|
config_dict, *_ = search_chunk_configuration(model,
|
|
search_range_m=1,
|
|
search_interval=16,
|
|
min_chunk_size_m=0,
|
|
filter_exlarge_params=True)
|
|
|
|
for key in config_dict:
|
|
chunk_size = config_dict[key]['chunk_size']
|
|
if world_size == 1 or True:
|
|
assert chunk_size == 31616
|
|
else:
|
|
assert chunk_size == 1024
|
|
|
|
|
|
def exam_chunk_manager():
|
|
world_size = torch.distributed.get_world_size()
|
|
|
|
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
|
|
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
|
|
|
sharded_ddp_model = model_builder()
|
|
chunk_manager = init_chunk_manager(sharded_ddp_model,
|
|
get_current_device(),
|
|
hidden_dim=16,
|
|
search_range_m=1,
|
|
min_chunk_size_m=0,
|
|
filter_exlarge_params=True,
|
|
strict_ddp_flag=True)
|
|
config_dict = chunk_manager.dp_degree_chunk_size_dict
|
|
assert len(config_dict) == 1
|
|
assert config_dict[world_size] == 31616
|
|
|
|
|
|
def run_dist(rank, world_size, port):
|
|
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
|
exam_search_chunk_size()
|
|
exam_chunk_manager()
|
|
|
|
|
|
@pytest.mark.dist
|
|
@pytest.mark.parametrize('world_size', [1, 4])
|
|
@rerun_if_address_is_in_use()
|
|
def test_search(world_size):
|
|
spawn(run_dist, world_size)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
test_search(4)
|