mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 19:13:01 +00:00
[gemini] improve compatibility and add static placement policy (#4479)
* [gemini] remove distributed-related part from colotensor (#4379) * [gemini] remove process group dependency * [gemini] remove tp part from colo tensor * [gemini] patch inplace op * [gemini] fix param op hook and update tests * [test] remove useless tests * [test] remove useless tests * [misc] fix requirements * [test] fix model zoo * [test] fix model zoo * [test] fix model zoo * [test] fix model zoo * [test] fix model zoo * [misc] update requirements * [gemini] refactor gemini optimizer and gemini ddp (#4398) * [gemini] update optimizer interface * [gemini] renaming gemini optimizer * [gemini] refactor gemini ddp class * [example] update gemini related example * [example] update gemini related example * [plugin] fix gemini plugin args * [test] update gemini ckpt tests * [gemini] fix checkpoint io * [example] fix opt example requirements * [example] fix opt example * [example] fix opt example * [example] fix opt example * [gemini] add static placement policy (#4443) * [gemini] add static placement policy * [gemini] fix param offload * [test] update gemini tests * [plugin] update gemini plugin * [plugin] update gemini plugin docstr * [misc] fix flash attn requirement * [test] fix gemini checkpoint io test * [example] update resnet example result (#4457) * [example] update bert example result (#4458) * [doc] update gemini doc (#4468) * [example] update gemini related examples (#4473) * [example] update gpt example * [example] update dreambooth example * [example] update vit * [example] update opt * [example] update palm * [example] update vit and opt benchmark * [hotfix] fix bert in model zoo (#4480) * [hotfix] fix bert in model zoo * [test] remove chatglm gemini test * [test] remove sam gemini test * [test] remove vit gemini test * [hotfix] fix opt tutorial example (#4497) * [hotfix] fix opt tutorial example * [hotfix] fix opt tutorial example
This commit is contained in:
@@ -2,33 +2,20 @@ import pytest
|
||||
import torch
|
||||
|
||||
import colossalai
|
||||
from colossalai.tensor import ComputePattern, ComputeSpec, ProcessGroup, ShardSpec
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.zero import ColoInitContext
|
||||
from colossalai.zero.gemini.chunk import init_chunk_manager, search_chunk_configuration
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
|
||||
|
||||
def init_1d_row_spec(model, pg: ProcessGroup):
|
||||
tensor_spec = (ShardSpec([0], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
|
||||
for n, p in model.named_parameters():
|
||||
if 'weight' in n and 'ln' not in n:
|
||||
p.set_process_group(pg)
|
||||
p.set_tensor_spec(*tensor_spec)
|
||||
|
||||
|
||||
def exam_search_chunk_size():
|
||||
world_size = torch.distributed.get_world_size()
|
||||
pg_tp = ProcessGroup(tp_degree=world_size)
|
||||
|
||||
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
|
||||
# make sure torch_model and model has the same parameter values
|
||||
with ColoInitContext(device=get_current_device()):
|
||||
model = model_builder()
|
||||
init_1d_row_spec(model, pg_tp)
|
||||
model = model_builder()
|
||||
config_dict, *_ = search_chunk_configuration(model,
|
||||
search_range_m=1,
|
||||
search_interval=16,
|
||||
@@ -37,57 +24,19 @@ def exam_search_chunk_size():
|
||||
|
||||
for key in config_dict:
|
||||
chunk_size = config_dict[key]['chunk_size']
|
||||
if world_size == 1:
|
||||
if world_size == 1 or True:
|
||||
assert chunk_size == 31616
|
||||
else:
|
||||
assert chunk_size == 1024
|
||||
|
||||
|
||||
def exam_search_strict_ddp():
|
||||
world_size = torch.distributed.get_world_size()
|
||||
default_shard_pg = ProcessGroup(tp_degree=world_size)
|
||||
default_shard_spec = ShardSpec([-1], [world_size])
|
||||
|
||||
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
# get the chunk configuration over replicated models
|
||||
with ColoInitContext(device=get_current_device()):
|
||||
ddp_model = model_builder()
|
||||
re_dict, re_total, re_wasted = search_chunk_configuration(ddp_model,
|
||||
search_range_m=1,
|
||||
search_interval=16,
|
||||
min_chunk_size_m=0,
|
||||
filter_exlarge_params=True,
|
||||
strict_ddp_flag=False)
|
||||
# get the chunk configuration over sharded ddp models
|
||||
with ColoInitContext(device=get_current_device(), default_pg=default_shard_pg,
|
||||
default_dist_spec=default_shard_spec):
|
||||
sharded_ddp_model = model_builder()
|
||||
sh_dict, sh_total, sh_wasted = search_chunk_configuration(sharded_ddp_model,
|
||||
search_range_m=1,
|
||||
search_interval=16,
|
||||
min_chunk_size_m=0,
|
||||
filter_exlarge_params=True,
|
||||
strict_ddp_flag=True)
|
||||
assert re_dict == sh_dict
|
||||
for key in re_dict:
|
||||
assert re_dict[key] == sh_dict[key]
|
||||
|
||||
assert re_total == sh_total
|
||||
assert re_wasted == sh_wasted
|
||||
|
||||
|
||||
def exam_chunk_manager():
|
||||
world_size = torch.distributed.get_world_size()
|
||||
default_shard_pg = ProcessGroup(tp_degree=world_size)
|
||||
default_shard_spec = ShardSpec([-1], [world_size])
|
||||
|
||||
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
|
||||
with ColoInitContext(device=get_current_device(), default_pg=default_shard_pg,
|
||||
default_dist_spec=default_shard_spec):
|
||||
sharded_ddp_model = model_builder()
|
||||
sharded_ddp_model = model_builder()
|
||||
chunk_manager = init_chunk_manager(sharded_ddp_model,
|
||||
get_current_device(),
|
||||
hidden_dim=16,
|
||||
@@ -103,7 +52,6 @@ def exam_chunk_manager():
|
||||
def run_dist(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
exam_search_chunk_size()
|
||||
exam_search_strict_ddp()
|
||||
exam_chunk_manager()
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user