mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-03 10:06:44 +00:00
[fx] tested the complete workflow for auto-parallel (#1336)
* [fx] tested the complete workflow for auto-parallel * polish code * polish code * polish code
This commit is contained in:
77
tests/test_fx/test_complete_workflow.py
Normal file
77
tests/test_fx/test_complete_workflow.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import colossalai
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import pytest
|
||||
import torch.multiprocessing as mp
|
||||
import torch.distributed as dist
|
||||
from colossalai.testing import rerun_if_address_is_in_use
|
||||
from functools import partial
|
||||
from colossalai.fx import ColoTracer
|
||||
from colossalai.utils.model.lazy_init_context import LazyInitContext
|
||||
from colossalai.fx.passes.shard_1d_pass import transformer_mlp_pass
|
||||
from colossalai.utils import free_port
|
||||
from colossalai.tensor import ProcessGroup
|
||||
|
||||
|
||||
class MLP(torch.nn.Module):
|
||||
|
||||
def __init__(self, dim: int):
|
||||
super().__init__()
|
||||
self.linear1 = torch.nn.Linear(dim, dim)
|
||||
self.linear2 = torch.nn.Linear(dim, dim)
|
||||
self.dropout = torch.nn.Dropout(0)
|
||||
self.relu = torch.nn.ReLU()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.linear1(x)
|
||||
x = self.dropout(x)
|
||||
x = self.relu(x)
|
||||
x = self.linear2(x)
|
||||
return x
|
||||
|
||||
|
||||
def run_workflow(world_size):
|
||||
# initailization
|
||||
with LazyInitContext() as ctx:
|
||||
model = MLP(16)
|
||||
|
||||
# tracing
|
||||
tracer = ColoTracer()
|
||||
graph = tracer.trace(model)
|
||||
gm = torch.fx.GraphModule(model, graph, model.__class__.__name__)
|
||||
|
||||
# annotate
|
||||
annotated_gm = transformer_mlp_pass(gm, process_group=ProcessGroup())
|
||||
annotated_gm.recompile()
|
||||
|
||||
# materialization and sharding
|
||||
ctx.lazy_init_parameters(annotated_gm)
|
||||
|
||||
# # check sharding
|
||||
assert list(model.linear1.weight.shape) == [16 // world_size, 16]
|
||||
assert list(model.linear1.bias.shape) == [16 // world_size]
|
||||
assert list(model.linear2.weight.shape) == [16, 16 // world_size]
|
||||
|
||||
# test forward to make sure that IR transform will produce the same results
|
||||
# like how ColoTensor would do it normally
|
||||
data = torch.rand(4, 16)
|
||||
non_fx_out = model(data)
|
||||
fx_out = annotated_gm(data)
|
||||
assert torch.equal(non_fx_out, fx_out)
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
run_workflow(world_size)
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.parametrize('world_size', [1, 2])
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_complete_workflow(world_size):
|
||||
run_func = partial(run_dist, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_complete_workflow(2)
|
@@ -1,59 +0,0 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import pytest
|
||||
import colossalai
|
||||
from colossalai.fx import ColoTracer
|
||||
from colossalai.fx.passes.shard_1d_pass import transform_mlp_pass
|
||||
CONFIG = dict(parallel=dict(tensor=dict(size=2, mode='1d')))
|
||||
|
||||
class MLP(torch.nn.Module):
|
||||
|
||||
def __init__(self, dim: int):
|
||||
super().__init__()
|
||||
self.linear1 = torch.nn.Linear(dim, dim)
|
||||
self.linear2 = torch.nn.Linear(dim, dim)
|
||||
self.linear3 = torch.nn.Linear(dim, dim)
|
||||
self.linear4 = torch.nn.Linear(dim, dim)
|
||||
self.dropout = torch.nn.Dropout()
|
||||
self.relu = torch.nn.ReLU()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.relu(self.linear1(x))
|
||||
x = self.dropout(self.relu(self.linear2(x)))
|
||||
x = self.linear3(x)
|
||||
x = torch.nn.functional.relu(self.linear4(x))
|
||||
return x
|
||||
|
||||
def test_out_acc():
|
||||
model = MLP(16).cuda()
|
||||
model.eval()
|
||||
input_tensor = torch.rand(2, 16).cuda()
|
||||
output = model(input_tensor)
|
||||
tracer = ColoTracer()
|
||||
graph = tracer.trace(model, meta_args={'x': torch.randn((2, 16), device="meta")})
|
||||
gm = torch.fx.GraphModule(model, graph, model.__class__.__name__)
|
||||
splitted_gm = transform_mlp_pass(gm)
|
||||
new_output = splitted_gm(input_tensor)
|
||||
assert output.equal(new_output)
|
||||
|
||||
def test_linear_acc():
|
||||
input_tensor = torch.rand(2, 16).cuda()
|
||||
model = MLP(16).cuda()
|
||||
tracer = ColoTracer()
|
||||
graph = tracer.trace(model, meta_args={'x': torch.randn((2, 16), device="meta")})
|
||||
gm = torch.fx.GraphModule(model, graph, model.__class__.__name__)
|
||||
splitted_gm = transform_mlp_pass(gm)
|
||||
col_shard = True
|
||||
for node in splitted_gm.graph.nodes:
|
||||
if node.op == "call_module" and isinstance(node.graph.owning_module.get_submodule(node.target), torch.nn.Linear):
|
||||
target_module = node.graph.owning_module.get_submodule(node.target)
|
||||
dim = 0 if col_shard else -1
|
||||
assert target_module.weight.fx_attr == (dim, "SHARD", "TP", "col_needs_many_outputs")
|
||||
col_shard = not col_shard
|
||||
|
||||
if __name__ == "__main__":
|
||||
torch.manual_seed(1)
|
||||
torch.cuda.manual_seed(1)
|
||||
# colossalai.launch_from_torch(config=CONFIG)
|
||||
test_out_acc()
|
||||
test_linear_acc()
|
Reference in New Issue
Block a user