[Gemini] patch for supporting orch.add_ function for ColoTensor (#2003)

This commit is contained in:
Jiarui Fang
2022-11-25 20:06:35 +08:00
committed by GitHub
parent 632753abbc
commit 8daf1b4db1
7 changed files with 60 additions and 95 deletions

View File

@@ -16,14 +16,14 @@ class InlineOpModule(CheckpointModule):
def __init__(self, checkpoint=False) -> None:
super().__init__(checkpoint=checkpoint)
self.proj1 = nn.Linear(4, 8)
self.weight = nn.Parameter(torch.randn(8, 8))
self.proj2 = nn.Linear(8, 4)
self.proj2 = nn.Linear(8, 8)
def forward(self, x):
x = self.proj1(x)
# inline add_
x.add_(10)
x = F.linear(x, self.weight)
x = self.proj2(x)
# inline relu_
x = torch.relu_(x)
x = self.proj2(x)

View File

@@ -15,7 +15,7 @@ from tests.components_to_test.registry import non_distributed_component_funcs
def run_gemini_fwd_bwd(rank, world_size, port, model_name: str, iter_num=2):
PLACEMENT_POLICY = 'cuda'
PLACEMENT_POLICY = 'auto'
disable_existing_loggers()
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
@@ -52,9 +52,9 @@ def run_gemini_fwd_bwd(rank, world_size, port, model_name: str, iter_num=2):
print(f'pass test {model_name}')
@pytest.mark.parametrize("model_name", ['bert'])
@pytest.mark.parametrize("model_name", ["inline_op_model", "bert", "simple_net", "gpt2", "resnet18"])
@rerun_if_address_is_in_use()
def test_gemini_train(model_name, iter_num=2):
def test_gemini_train(model_name, iter_num=4):
run_func = partial(run_gemini_fwd_bwd, world_size=1, port=free_port(), model_name=model_name, iter_num=iter_num)
mp.spawn(run_func, nprocs=1)
@@ -63,5 +63,5 @@ if __name__ == '__main__':
# for model_name in ["bert", "resnet18", "inline_op_model"]:
# bert, gpt, inline_op_model, nested_model, no_leaf_module,
# repeated_computed_layer, resnet, simple_net
for model_name in ["nested_model", "no_leaf_module"]:
for model_name in ["resnet18"]:
test_gemini_train(model_name=model_name, iter_num=4)