[shardformer] refactored embedding and dropout to parallel module (#4013)

* [shardformer] refactored embedding and dropout to parallel module

* polish code
This commit is contained in:
Frank Lee
2023-06-16 15:00:26 +08:00
parent dfca9678fa
commit 3893fa1a8d
6 changed files with 198 additions and 423 deletions

View File

@@ -0,0 +1,43 @@
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.shardformer.layer.layers import Embedding1D
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_embedding_1d():
embedding = nn.Embedding(32, 128).cuda()
embedding_1d = Embedding1D.from_native_module(embedding, process_group=None)
assert embedding_1d.weight.shape == torch.Size([32, 64])
# check computation correctness
x = torch.randint(low=0, high=32, size=(4, 32)).cuda()
out = embedding(x)
gather_out = embedding_1d(x)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
rank = dist.get_rank()
target_grad = torch.chunk(embedding.weight.grad, 2, dim=1)[rank]
assert_close(target_grad, embedding_1d.weight.grad)
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
check_embedding_1d()
@rerun_if_address_is_in_use()
def test_embedding_1d():
spawn(run_dist, nprocs=2)
if __name__ == '__main__':
test_embedding_1d()