mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-11 13:59:08 +00:00
[test] merge old components to test to model zoo (#4945)
* [test] add custom models in model zoo * [test] update legacy test * [test] update model zoo * [test] update gemini test * [test] remove components to test
This commit is contained in:
@@ -7,12 +7,11 @@ from torch.testing import assert_close
|
||||
import colossalai
|
||||
from colossalai.legacy.amp import convert_to_apex_amp
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
||||
from colossalai.testing import DummyDataloader, parameterize, rerun_if_address_is_in_use, spawn
|
||||
from colossalai.utils import set_seed
|
||||
from colossalai.zero import GeminiDDP, GeminiOptimizer
|
||||
from colossalai.zero.gemini.chunk import search_chunk_configuration
|
||||
from tests.components_to_test import run_fwd_bwd
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
from tests.kit.model_zoo import model_zoo, run_fwd_bwd
|
||||
|
||||
PLACEMENT_CONFIGS = [
|
||||
{
|
||||
@@ -51,12 +50,13 @@ def check_param(model: GeminiDDP, torch_model: torch.nn.Module):
|
||||
|
||||
|
||||
@parameterize("placement_config", PLACEMENT_CONFIGS)
|
||||
@parameterize("model_name", ["gpt2"])
|
||||
@parameterize("model_name", ["transformers_gpt_lm"])
|
||||
@parameterize("master_weights", [True, False])
|
||||
def exam_grad_clipping(placement_config, model_name: str, master_weights: bool):
|
||||
set_seed(1912)
|
||||
get_components_func = non_distributed_component_funcs.get_callable(model_name)
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
|
||||
iter(model_zoo.get_sub_registry(model_name).values())
|
||||
)
|
||||
|
||||
torch_model = model_builder().cuda()
|
||||
amp_config = dict(opt_level="O2", keep_batchnorm_fp32=False, loss_scale=32)
|
||||
@@ -94,21 +94,17 @@ def exam_grad_clipping(placement_config, model_name: str, master_weights: bool):
|
||||
torch_model.train()
|
||||
|
||||
set_seed(dist.get_rank() * 3 + 128)
|
||||
for i, (data, label) in enumerate(train_dataloader):
|
||||
train_dataloader = DummyDataloader(data_gen_fn)
|
||||
for i, data in enumerate(train_dataloader):
|
||||
if i > 2:
|
||||
break
|
||||
data = data.cuda()
|
||||
label = label.cuda()
|
||||
data = {k: v.cuda() if isinstance(v, torch.Tensor) else v for k, v in data.items()}
|
||||
|
||||
zero_optim.zero_grad()
|
||||
torch_optim.zero_grad()
|
||||
|
||||
torch_loss = run_fwd_bwd(torch_model, data, label, criterion, torch_optim)
|
||||
loss = run_fwd_bwd(model, data, label, criterion, zero_optim)
|
||||
|
||||
# as no master weights leads to error accumulation, we don't check the loss
|
||||
if master_weights:
|
||||
assert_close(torch_loss, loss)
|
||||
run_fwd_bwd(torch_model, data, output_transform_fn, loss_fn, optimizer=torch_optim)
|
||||
run_fwd_bwd(model, data, output_transform_fn, loss_fn, optimizer=zero_optim)
|
||||
|
||||
import apex.amp as apex_amp
|
||||
|
||||
|
Reference in New Issue
Block a user