mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-12 12:47:21 +00:00
[Gemini] rename ParamTracerWrapper -> RuntimeMemTracer (#2073)
This commit is contained in:
@@ -1,11 +1,15 @@
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from colossalai.gemini.memory_tracer.param_tracer_wrapper import ParamTracerWrapper
|
||||
from colossalai.gemini.memory_tracer.model_data_memtracer import GLOBAL_CUDA_MEM_INFO
|
||||
from colossalai.gemini.memory_tracer.runtime_mem_tracer import RuntimeMemTracer
|
||||
from colossalai.utils.model.colo_init_context import ColoInitContext
|
||||
from tests.components_to_test import run_fwd_bwd
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
|
||||
|
||||
def run_fwd_bwd(model, data, label, criterion, enable_autocast=False, dtype=torch.half):
|
||||
with torch.cuda.amp.autocast(enabled=enable_autocast):
|
||||
if criterion:
|
||||
@@ -16,9 +20,9 @@ def run_fwd_bwd(model, data, label, criterion, enable_autocast=False, dtype=torc
|
||||
loss = loss.to(dtype)
|
||||
model.backward(loss)
|
||||
|
||||
|
||||
def run_param_wrapper_testing():
|
||||
test_models = ['simple_net', 'repeated_computed_layers', 'nested_model']
|
||||
|
||||
for model_name in test_models:
|
||||
get_components_func = non_distributed_component_funcs.get_callable(model_name)
|
||||
model_builder, train_dataloader, _, _, criterion = get_components_func()
|
||||
@@ -26,7 +30,8 @@ def run_param_wrapper_testing():
|
||||
with ColoInitContext(device=torch.device('cpu')):
|
||||
model = model_builder(checkpoint=False)
|
||||
|
||||
model = ParamTracerWrapper(model)
|
||||
model_bk = deepcopy(model)
|
||||
runtime_mem_tracer = RuntimeMemTracer(model)
|
||||
|
||||
for i, (data, label) in enumerate(train_dataloader):
|
||||
if i > 1:
|
||||
@@ -34,15 +39,17 @@ def run_param_wrapper_testing():
|
||||
data = data.cuda()
|
||||
label = label.cuda()
|
||||
|
||||
run_fwd_bwd(model, data, label, criterion, False)
|
||||
run_fwd_bwd(runtime_mem_tracer, data, label, criterion, False)
|
||||
|
||||
cuda_non_model_data_list = np.array(GLOBAL_CUDA_MEM_INFO.non_model_data_list) / 1024 ** 2
|
||||
for p1, p2 in zip(model_bk.parameters(), model.parameters()):
|
||||
torch.allclose(p1.to(torch.half), p2)
|
||||
|
||||
cuda_non_model_data_list = np.array(GLOBAL_CUDA_MEM_INFO.non_model_data_list) / 1024**2
|
||||
print("cuda_non_model_data_list", len(cuda_non_model_data_list))
|
||||
# print(GLOBAL_CUDA_MEM_INFO.non_model_data_list)
|
||||
|
||||
del model
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_param_wrapper_testing()
|
||||
run_param_wrapper_testing()
|
Reference in New Issue
Block a user