[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -16,6 +16,7 @@ def check_selfattention():
layer = layer.to(get_current_device())
hidden_states = torch.rand(SUB_SEQ_LENGTH, BATCH, HIDDEN_SIZE).to(get_current_device())
attention_mask = torch.randint(low=0, high=2,
size=(BATCH, 1, 1, 1, SUB_SEQ_LENGTH * WORLD_SIZE)).to(get_current_device())
out = layer(hidden_states, attention_mask)
attention_mask = torch.randint(low=0, high=2, size=(BATCH, 1, 1, 1, SUB_SEQ_LENGTH * WORLD_SIZE)).to(
get_current_device()
)
layer(hidden_states, attention_mask)

View File

@@ -8,7 +8,7 @@ from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn.layer.parallel_sequence import RingAV, RingQK
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = dict(parallel=dict(tensor=dict(size=4, mode='sequence')))
CONFIG = dict(parallel=dict(tensor=dict(size=4, mode="sequence")))
def check_ring_qk(rank, world_size):
@@ -26,8 +26,8 @@ def check_ring_qk(rank, world_size):
dist.broadcast(k, src=0, group=gpc.get_group(ParallelMode.SEQUENCE))
# create distributed tensors
sub_q = q.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
sub_k = k.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
sub_q = q.clone()[:, rank * sub_seq_length : (rank + 1) * sub_seq_length].contiguous()
sub_k = k.clone()[:, rank * sub_seq_length : (rank + 1) * sub_seq_length].contiguous()
# set autograd attributes
q.requires_grad = True
@@ -47,7 +47,7 @@ def check_ring_qk(rank, world_size):
sub_a = ring_qk(sub_q, sub_k, batch_size, num_heads, sub_seq_length)
# check master and distributed attention scores
sub_master_a = a[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
sub_master_a = a[:, rank * sub_seq_length : (rank + 1) * sub_seq_length]
assert torch.allclose(sub_a, sub_master_a, rtol=1e-5, atol=1e-2)
# run master backward
@@ -55,13 +55,12 @@ def check_ring_qk(rank, world_size):
a.mean().backward()
# run distributed backward
partial_master_a_grad = a.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
partial_master_a_grad = a.grad[:, rank * sub_seq_length : (rank + 1) * sub_seq_length]
torch.autograd.backward(sub_a, partial_master_a_grad)
# check master and distributed grads
partial_master_q_grad = q.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
assert torch.allclose(sub_q.grad, partial_master_q_grad, rtol=1e-5, atol=1e-2), \
'attention score cannot match'
partial_master_q_grad = q.grad[:, rank * sub_seq_length : (rank + 1) * sub_seq_length]
assert torch.allclose(sub_q.grad, partial_master_q_grad, rtol=1e-5, atol=1e-2), "attention score cannot match"
def check_ring_av(rank, world_size):
@@ -79,8 +78,8 @@ def check_ring_av(rank, world_size):
dist.broadcast(v, src=0, group=gpc.get_group(ParallelMode.SEQUENCE))
# create distributed tensors
sub_a = a.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
sub_v = v.clone()[:, rank * sub_seq_length:(rank + 1) * sub_seq_length].contiguous()
sub_a = a.clone()[:, rank * sub_seq_length : (rank + 1) * sub_seq_length].contiguous()
sub_v = v.clone()[:, rank * sub_seq_length : (rank + 1) * sub_seq_length].contiguous()
# set autograd attributes
a.requires_grad = True
@@ -102,7 +101,7 @@ def check_ring_av(rank, world_size):
# print(f'master output shape: {out.shape}, partial output shape: {sub_out.shape}')
# check master and distributed output
sub_master_out = out[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
sub_master_out = out[:, rank * sub_seq_length : (rank + 1) * sub_seq_length]
assert torch.allclose(sub_out, sub_master_out, rtol=1e-5, atol=1e-2)
# # run master backward
@@ -110,17 +109,16 @@ def check_ring_av(rank, world_size):
out.mean().backward()
# # run distributed backward
partial_master_out_grad = out.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
partial_master_out_grad = out.grad[:, rank * sub_seq_length : (rank + 1) * sub_seq_length]
torch.autograd.backward(sub_out, partial_master_out_grad)
# # check master and distributed grads
partial_master_a_grad = a.grad[:, rank * sub_seq_length:(rank + 1) * sub_seq_length]
assert torch.allclose(sub_a.grad, partial_master_a_grad, rtol=1e-5, atol=1e-2), \
'attention output cannot match'
partial_master_a_grad = a.grad[:, rank * sub_seq_length : (rank + 1) * sub_seq_length]
assert torch.allclose(sub_a.grad, partial_master_a_grad, rtol=1e-5, atol=1e-2), "attention output cannot match"
def run_test(rank, world_size, port):
colossalai.legacy.launch(rank=rank, world_size=world_size, config=CONFIG, host='localhost', port=port)
colossalai.legacy.launch(rank=rank, world_size=world_size, config=CONFIG, host="localhost", port=port)
# check_ring_qk(rank, world_size)
check_ring_av(rank, world_size)
@@ -135,5 +133,5 @@ def test_sequence():
spawn(run_test, 4)
if __name__ == '__main__':
if __name__ == "__main__":
test_sequence()