mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-17 15:11:20 +00:00
Merge pull request #6071 from wangbluo/ring_attention
[Ring Attention] fix the 2d ring attn when using multiple machine
This commit is contained in:
@@ -5,6 +5,7 @@ from flash_attn import flash_attn_qkvpacked_func, flash_attn_varlen_qkvpacked_fu
|
||||
from torch.testing import assert_close
|
||||
|
||||
import colossalai
|
||||
from colossalai.cluster import ProcessGroupMesh
|
||||
from colossalai.shardformer.layer import AttnMaskType
|
||||
from colossalai.shardformer.layer.attn import AttnMaskType, RingAttention
|
||||
from colossalai.shardformer.layer.utils import split_batch_zigzag, split_varlen_zigzag
|
||||
@@ -17,11 +18,14 @@ from colossalai.utils import get_current_device
|
||||
@parameterize("nheads", [5])
|
||||
@parameterize("d", [128])
|
||||
@parameterize("dtype", [torch.bfloat16, torch.float16])
|
||||
def check_ring_attn(seq_len, bs, nheads, d, dtype):
|
||||
def check_ring_attn(seq_len, bs, nheads, d, dtype, inner_ring_size):
|
||||
torch.cuda.manual_seed(2)
|
||||
device = get_current_device()
|
||||
sp_group = dist.group.WORLD
|
||||
dp_size, pp_size, tp_size = 1, 1, 1
|
||||
sp_size = dist.get_world_size()
|
||||
sp_axis = 2
|
||||
pg_mesh = ProcessGroupMesh(dp_size, pp_size, sp_size, tp_size)
|
||||
# Some outliers may seem large, but our errors are still lower than
|
||||
# than Megatron-LM context parallel's
|
||||
# (https://github.com/NVIDIA/TransformerEngine/blob/33a3d02f81c56e6f7b542c09bfa86657078d57fb/tests/pytorch/fused_attn/run_fused_attn_with_cp.py#L215)
|
||||
@@ -40,11 +44,11 @@ def check_ring_attn(seq_len, bs, nheads, d, dtype):
|
||||
q,
|
||||
k,
|
||||
v,
|
||||
sp_group,
|
||||
sp_axis,
|
||||
AttnMaskType.CAUSAL,
|
||||
return_softmax=True,
|
||||
inner_ring_size=max(2, sp_size // 2),
|
||||
# inner_ring_size=4
|
||||
inner_ring_size=inner_ring_size,
|
||||
pg_mesh=pg_mesh,
|
||||
)
|
||||
ring_out = ring_out.transpose(1, 2)
|
||||
out, lse, _ = flash_attn_qkvpacked_func(
|
||||
@@ -83,6 +87,7 @@ def check_packed_seq(seqlen, bs, nheads, d, dtype):
|
||||
device = get_current_device()
|
||||
sp_group = dist.group.WORLD
|
||||
sp_size = dist.get_world_size()
|
||||
sp_axis = 2
|
||||
atol = rtol = 7e-3
|
||||
torch.cuda.manual_seed(2)
|
||||
# Prepare varlen attention mask
|
||||
@@ -123,10 +128,11 @@ def check_packed_seq(seqlen, bs, nheads, d, dtype):
|
||||
q_ring,
|
||||
k_ring,
|
||||
v_ring,
|
||||
sp_group,
|
||||
sp_axis,
|
||||
**mask_info,
|
||||
pad_output=False,
|
||||
return_softmax=True,
|
||||
pg_mesh=ProcessGroupMesh(1, 1, sp_size, 1),
|
||||
# deterministic=True
|
||||
)
|
||||
ring_out = ring_out.transpose(1, 2).reshape(-1, nheads, d)
|
||||
@@ -161,12 +167,12 @@ def check_packed_seq(seqlen, bs, nheads, d, dtype):
|
||||
def launch_single_ring(rank, world_size, port):
|
||||
colossalai.launch(rank, world_size, "localhost", port)
|
||||
check_packed_seq()
|
||||
check_ring_attn()
|
||||
check_ring_attn(inner_ring_size=None)
|
||||
|
||||
|
||||
def launch_double_ring(rank, world_size, port):
|
||||
colossalai.launch(rank, world_size, "localhost", port)
|
||||
check_ring_attn()
|
||||
check_ring_attn(inner_ring_size=2)
|
||||
|
||||
|
||||
@rerun_if_address_is_in_use()
|
||||
|
Reference in New Issue
Block a user