mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-21 17:40:33 +00:00
[legacy] move communication and nn to legacy and refactor logger (#4671)
* [legacy] move communication to legacy (#4640) * [legacy] refactor logger and clean up legacy codes (#4654) * [legacy] make logger independent to gpc * [legacy] make optim independent to registry * [legacy] move test engine to legacy * [legacy] move nn to legacy (#4656) * [legacy] move nn to legacy * [checkpointio] fix save hf config * [test] remove useledd rpc pp test * [legacy] fix nn init * [example] skip tutorial hybriad parallel example * [devops] test doc check * [devops] test doc check
This commit is contained in:
@@ -0,0 +1,21 @@
|
||||
import torch
|
||||
|
||||
from colossalai.context import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.legacy.nn import TransformerSelfAttentionRing
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
|
||||
def check_selfattention():
|
||||
WORLD_SIZE = gpc.get_world_size(ParallelMode.SEQUENCE)
|
||||
SUB_SEQ_LENGTH = 8
|
||||
BATCH = 4
|
||||
HIDDEN_SIZE = 16
|
||||
|
||||
layer = TransformerSelfAttentionRing(16, 8, 8, 0.1)
|
||||
layer = layer.to(get_current_device())
|
||||
|
||||
hidden_states = torch.rand(SUB_SEQ_LENGTH, BATCH, HIDDEN_SIZE).to(get_current_device())
|
||||
attention_mask = torch.randint(low=0, high=2,
|
||||
size=(BATCH, 1, 1, 1, SUB_SEQ_LENGTH * WORLD_SIZE)).to(get_current_device())
|
||||
out = layer(hidden_states, attention_mask)
|
139
tests/test_legacy/test_layers/test_sequence/test_sequence.py
Normal file
139
tests/test_legacy/test_layers/test_sequence/test_sequence.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import pytest
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
import colossalai
|
||||
from colossalai.context import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.legacy.nn.layer.parallel_sequence import RingAV, RingQK
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
|
||||
CONFIG = dict(parallel=dict(tensor=dict(size=4, mode='sequence')))
|
||||
|
||||
|
||||
def check_ring_qk(rank, world_size):
|
||||
# params
|
||||
batch_size = 4
|
||||
num_heads = 4
|
||||
seq_length = 32
|
||||
attention_head_size = 32
|
||||
sub_seq_length = seq_length // world_size
|
||||
|
||||
# create master tensors
|
||||
q = torch.rand(batch_size * num_heads, seq_length, attention_head_size).cuda()
|
||||
k = torch.rand(batch_size * num_heads, seq_length, attention_head_size).cuda()
|
||||
dist.broadcast(q, src=0, group=gpc.get_group(ParallelMode.SEQUENCE))
|
||||
dist.broadcast(k, src=0, group=gpc.get_group(ParallelMode.SEQUENCE))
|
||||
|
||||
# create distributed tensors
|
||||
sub_q = q.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
|
||||
sub_k = k.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
|
||||
|
||||
# set autograd attributes
|
||||
q.requires_grad = True
|
||||
k.requires_grad = True
|
||||
q.retain_grad()
|
||||
k.retain_grad()
|
||||
sub_q.requires_grad = True
|
||||
sub_k.requires_grad = True
|
||||
sub_q.retain_grad()
|
||||
sub_k.retain_grad()
|
||||
|
||||
# compute master attention scores
|
||||
a = torch.matmul(q, k.transpose(2, 1))
|
||||
|
||||
# compute distributed attention scores
|
||||
ring_qk = RingQK.apply
|
||||
sub_a = ring_qk(sub_q, sub_k, batch_size, num_heads, sub_seq_length)
|
||||
|
||||
# check master and distributed attention scores
|
||||
sub_master_a = a[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
|
||||
assert torch.allclose(sub_a, sub_master_a, rtol=1e-5, atol=1e-2)
|
||||
|
||||
# run master backward
|
||||
a.retain_grad()
|
||||
a.mean().backward()
|
||||
|
||||
# run distributed backward
|
||||
partial_master_a_grad = a.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
|
||||
torch.autograd.backward(sub_a, partial_master_a_grad)
|
||||
|
||||
# check master and distributed grads
|
||||
partial_master_q_grad = q.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
|
||||
assert torch.allclose(sub_q.grad, partial_master_q_grad, rtol=1e-5, atol=1e-2), \
|
||||
'attention score cannot match'
|
||||
|
||||
|
||||
def check_ring_av(rank, world_size):
|
||||
# params
|
||||
batch_size = 4
|
||||
num_heads = 4
|
||||
seq_length = 16
|
||||
attention_head_size = 32
|
||||
sub_seq_length = seq_length // world_size
|
||||
|
||||
# create master tensors
|
||||
a = torch.rand(batch_size * num_heads, seq_length, seq_length).cuda()
|
||||
v = torch.rand(batch_size * num_heads, seq_length, attention_head_size).cuda()
|
||||
dist.broadcast(a, src=0, group=gpc.get_group(ParallelMode.SEQUENCE))
|
||||
dist.broadcast(v, src=0, group=gpc.get_group(ParallelMode.SEQUENCE))
|
||||
|
||||
# create distributed tensors
|
||||
sub_a = a.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
|
||||
sub_v = v.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
|
||||
|
||||
# set autograd attributes
|
||||
a.requires_grad = True
|
||||
v.requires_grad = True
|
||||
a.retain_grad()
|
||||
v.retain_grad()
|
||||
sub_a.requires_grad = True
|
||||
sub_v.requires_grad = True
|
||||
sub_a.retain_grad()
|
||||
sub_v.retain_grad()
|
||||
|
||||
# compute master attention scores
|
||||
out = torch.matmul(a, v)
|
||||
|
||||
# compute distributed attention scores
|
||||
ring_av = RingAV.apply
|
||||
sub_out = ring_av(sub_a, sub_v, batch_size, num_heads, attention_head_size, sub_seq_length)
|
||||
|
||||
# print(f'master output shape: {out.shape}, partial output shape: {sub_out.shape}')
|
||||
|
||||
# check master and distributed output
|
||||
sub_master_out = out[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
|
||||
assert torch.allclose(sub_out, sub_master_out, rtol=1e-5, atol=1e-2)
|
||||
|
||||
# # run master backward
|
||||
out.retain_grad()
|
||||
out.mean().backward()
|
||||
|
||||
# # run distributed backward
|
||||
partial_master_out_grad = out.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
|
||||
torch.autograd.backward(sub_out, partial_master_out_grad)
|
||||
|
||||
# # check master and distributed grads
|
||||
partial_master_a_grad = a.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
|
||||
assert torch.allclose(sub_a.grad, partial_master_a_grad, rtol=1e-5, atol=1e-2), \
|
||||
'attention output cannot match'
|
||||
|
||||
|
||||
def run_test(rank, world_size, port):
|
||||
colossalai.launch(rank=rank, world_size=world_size, config=CONFIG, host='localhost', port=port)
|
||||
|
||||
# check_ring_qk(rank, world_size)
|
||||
check_ring_av(rank, world_size)
|
||||
|
||||
gpc.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_sequence():
|
||||
spawn(run_test, 4)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_sequence()
|
Reference in New Issue
Block a user