mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-01 17:17:05 +00:00
[rpc] split with dag (#2028)
* add DAG to split_module * add comment * add test case for DAG * remove print * add DAG middleware in scheduler * add test case for scheduler * remove break * recover old lifecycle Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com>
This commit is contained in:
@@ -20,6 +20,18 @@ def color_debug(text, prefix=' ', color='blue'):
|
||||
color = color.upper()
|
||||
print(getattr(Back, color), prefix, Style.RESET_ALL, text)
|
||||
|
||||
class MLP(nn.Module):
|
||||
def __init__(self, dim: int, layers: int):
|
||||
super().__init__()
|
||||
self.layers = torch.nn.ModuleList()
|
||||
|
||||
for _ in range(layers):
|
||||
self.layers.append(nn.Linear(dim, dim, bias=False))
|
||||
|
||||
def forward(self, x):
|
||||
for layer in self.layers:
|
||||
x = layer(x)
|
||||
return x
|
||||
|
||||
class RpcTestModel(nn.Module):
|
||||
|
||||
|
60
tests/test_pipeline/test_middleware_1f1b.py
Normal file
60
tests/test_pipeline/test_middleware_1f1b.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from colossalai.pipeline.rpc._pipeline_schedule import OneFOneBPipelineEngine
|
||||
from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass
|
||||
from colossalai.fx import ColoTracer
|
||||
from rpc_test_utils import rpc_run, parse_args, MLP
|
||||
from functools import partial
|
||||
|
||||
# global variable for model created
|
||||
batch_size = 16
|
||||
dim = 10
|
||||
|
||||
def create_partition_module(pp_rank: int, stage_num: int, model, data_kwargs):
|
||||
model.eval()
|
||||
tracer = ColoTracer()
|
||||
meta_args = {k: v.to('meta') for k, v in data_kwargs.items()}
|
||||
graph = tracer.trace(root=model, meta_args=meta_args)
|
||||
gm = torch.fx.GraphModule(model, graph, model.__class__.__name__)
|
||||
annotated_model = balanced_split_pass(gm, stage_num)
|
||||
split_model, _ = split_with_split_nodes_pass(annotated_model, merge_output=True)
|
||||
return list(split_model.children())[pp_rank]
|
||||
|
||||
def partition(data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int):
|
||||
torch.manual_seed(1024)
|
||||
model = MLP(dim, stage_num * 3)
|
||||
partition = create_partition_module(pp_rank, stage_num, model, data_kwargs)
|
||||
return partition
|
||||
|
||||
def run_master(args):
|
||||
torch.manual_seed(100)
|
||||
|
||||
epoch = args.epoch
|
||||
device = args.device
|
||||
stage_num = args.world_size
|
||||
chunk = args.chunk
|
||||
num_microbatches = args.num_microbatches
|
||||
use_checkpoint = args.use_checkpoint
|
||||
|
||||
input_sample = torch.randn((batch_size, dim), device=device)
|
||||
|
||||
def data_gen():
|
||||
x = torch.zeros((batch_size, dim))
|
||||
kwargs = dict(x=x)
|
||||
return kwargs
|
||||
|
||||
data_kwargs = data_gen()
|
||||
engine = OneFOneBPipelineEngine(partition_fn=partial(partition, data_kwargs),
|
||||
stage_num=stage_num,
|
||||
num_microbatches=num_microbatches,
|
||||
device=device,
|
||||
chunk=chunk,
|
||||
checkpoint=use_checkpoint)
|
||||
|
||||
for _ in range(epoch):
|
||||
logits = engine.forward_backward({'x': input_sample}, forward_only=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
rpc_run(args, run_master)
|
Reference in New Issue
Block a user