mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 02:51:59 +00:00
[test] merge old components to test to model zoo (#4945)
* [test] add custom models in model zoo * [test] update legacy test * [test] update model zoo * [test] update gemini test * [test] remove components to test
This commit is contained in:
@@ -4,10 +4,9 @@ import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from colossalai.testing import clear_cache_before_run
|
||||
from colossalai.testing import DummyDataloader, clear_cache_before_run
|
||||
from colossalai.zero.gemini.memory_tracer.runtime_mem_tracer import RuntimeMemTracer
|
||||
from tests.components_to_test import run_fwd_bwd
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
from tests.kit.model_zoo import model_zoo, run_fwd_bwd
|
||||
|
||||
|
||||
@pytest.mark.skip("this is not used")
|
||||
@@ -16,21 +15,22 @@ def test_runtime_mem_tracer():
|
||||
test_models = ["gpt2", "bert", "simple_net", "repeated_computed_layers", "nested_model", "albert"]
|
||||
|
||||
for model_name in test_models:
|
||||
get_components_func = non_distributed_component_funcs.get_callable(model_name)
|
||||
model_builder, train_dataloader, _, _, criterion = get_components_func()
|
||||
model_builder, data_gen_fn, output_transform_fn, *_ = next(
|
||||
iter(model_zoo.get_sub_registry(model_name).values())
|
||||
)
|
||||
|
||||
model = model_builder(checkpoint=False).cuda()
|
||||
model = model_builder().cuda()
|
||||
|
||||
model_bk = deepcopy(model)
|
||||
runtime_mem_tracer = RuntimeMemTracer(model)
|
||||
|
||||
for i, (data, label) in enumerate(train_dataloader):
|
||||
train_dataloader = DummyDataloader(data_gen_fn)
|
||||
for i, data in enumerate(train_dataloader):
|
||||
if i > 1:
|
||||
break
|
||||
data = data.cuda()
|
||||
label = label.cuda()
|
||||
data = {k: v.cuda() if isinstance(v, torch.Tensor) else v for k, v in data.items()}
|
||||
|
||||
run_fwd_bwd(runtime_mem_tracer, data, label, criterion, optimizer=runtime_mem_tracer)
|
||||
run_fwd_bwd(runtime_mem_tracer, data, output_transform_fn, optimizer=runtime_mem_tracer)
|
||||
|
||||
for p1, p2 in zip(model_bk.parameters(), model.parameters()):
|
||||
torch.allclose(p1.to(torch.half), p2)
|
||||
|
Reference in New Issue
Block a user