[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -14,10 +14,10 @@ from tests.components_to_test.registry import non_distributed_component_funcs
# run gemini use the runtime memory tracer
@parameterize('placement_policy', ['auto'])
@parameterize('keep_gather', [False])
@parameterize('model_name', ['repeated_computed_layers', 'bert', 'albert', 'gpt2'])
@parameterize('use_grad_checkpoint', [False, True])
@parameterize("placement_policy", ["auto"])
@parameterize("keep_gather", [False])
@parameterize("model_name", ["repeated_computed_layers", "bert", "albert", "gpt2"])
@parameterize("use_grad_checkpoint", [False, True])
def run_gemini_use_rmt(placement_policy, keep_gather, model_name: str, use_grad_checkpoint: bool = False):
set_seed(42)
get_components_func = non_distributed_component_funcs.get_callable(model_name)
@@ -25,7 +25,7 @@ def run_gemini_use_rmt(placement_policy, keep_gather, model_name: str, use_grad_
model = model_builder(use_grad_checkpoint).cuda()
print(f'model_name {model_name}')
print(f"model_name {model_name}")
runtime_mem_tracer = RuntimeMemTracer(model)
for i, (input_ids, label) in enumerate(train_dataloader):
if i > 0:
@@ -37,17 +37,17 @@ def run_gemini_use_rmt(placement_policy, keep_gather, model_name: str, use_grad_
run_fwd_bwd(runtime_mem_tracer, input_ids, label, criterion, runtime_mem_tracer)
memstats = runtime_mem_tracer.memstats()
runtime_tracer_non_model_data = runtime_mem_tracer._memstats._non_model_data_cuda_list
print('runtime tracer non model data points: ', len(runtime_tracer_non_model_data))
print('runtime tracer: ', runtime_tracer_non_model_data)
print("runtime tracer non model data points: ", len(runtime_tracer_non_model_data))
print("runtime tracer: ", runtime_tracer_non_model_data)
print([memstats.param_used_step(p) for p in model.parameters()])
if model_name == 'repeated_computed_layers':
if model_name == "repeated_computed_layers":
for idx, p in enumerate(model.parameters()):
step_list = memstats.param_used_step(p)
if idx < 4:
assert len(step_list) == 4
if model_name == 'repeated_computed_layers':
if model_name == "repeated_computed_layers":
for idx, p in enumerate(model.parameters()):
step_list = memstats.param_used_step(p)
if idx < 4:
@@ -55,13 +55,11 @@ def run_gemini_use_rmt(placement_policy, keep_gather, model_name: str, use_grad_
world_size = torch.distributed.get_world_size()
config_dict, *_ = search_chunk_configuration(model, search_range_m=1, search_interval=100)
config_dict[world_size]['chunk_size'] = 5000
config_dict[world_size]['keep_gathered'] = keep_gather
model = GeminiDDP(model,
chunk_config_dict=config_dict,
placement_policy=placement_policy,
pin_memory=True,
memstats=memstats)
config_dict[world_size]["chunk_size"] = 5000
config_dict[world_size]["keep_gathered"] = keep_gather
model = GeminiDDP(
model, chunk_config_dict=config_dict, placement_policy=placement_policy, pin_memory=True, memstats=memstats
)
set_seed(dist.get_rank())
for i, (input_ids, label) in enumerate(train_dataloader):
@@ -73,29 +71,30 @@ def run_gemini_use_rmt(placement_policy, keep_gather, model_name: str, use_grad_
input_ids, label = input_ids.cuda(), label.cuda()
set_seed(42)
loss = run_fwd_bwd(model, input_ids, label, criterion, model)
run_fwd_bwd(model, input_ids, label, criterion, model)
gemini_non_model_data = model.gemini_manager._mem_stats_collector._memstats.non_model_data_list('cuda')
gemini_non_model_data = model.gemini_manager._mem_stats_collector._memstats.non_model_data_list("cuda")
# print('gemini non model data:', gemini_non_model_data)
assert len(gemini_non_model_data) == len(runtime_tracer_non_model_data), \
f'model_name {model_name} {len(gemini_non_model_data)} vs {len(runtime_tracer_non_model_data)}'
assert len(gemini_non_model_data) == len(
runtime_tracer_non_model_data
), f"model_name {model_name} {len(gemini_non_model_data)} vs {len(runtime_tracer_non_model_data)}"
def run_dist(rank, world_size, port):
config = {}
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
colossalai.launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_gemini_use_rmt()
@pytest.mark.skip("this is not used")
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@pytest.mark.parametrize("world_size", [1, 4])
@rerun_if_address_is_in_use()
def test_gemini_use_rmt(world_size):
spawn(run_dist, world_size)
if __name__ == '__main__':
if __name__ == "__main__":
test_gemini_use_rmt(1)