diff --git a/tests/test_fp8/test_fp8_reduce_scatter.py b/tests/test_fp8/test_fp8_reduce_scatter.py index c18446e39..e0b558a25 100644 --- a/tests/test_fp8/test_fp8_reduce_scatter.py +++ b/tests/test_fp8/test_fp8_reduce_scatter.py @@ -13,14 +13,20 @@ from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn @parameterize("scatter_dim", [0, 1, 2]) @parameterize("dtype", [torch.bfloat16, torch.float16]) @parameterize("fp8_format", ["e4m3", "e5m2"]) -def check_4gpu(shape, scatter_dim, dtype, fp8_format): +@parameterize("async_op", [True, False]) +def check_4gpu(shape, scatter_dim, dtype, fp8_format, async_op): x = torch.rand(shape, dtype=dtype, device=get_accelerator().get_current_device()) input_list = list(torch.chunk(x, dim=scatter_dim, chunks=4)) input_list = [t.contiguous() for t in input_list] output_origin = torch.empty_like(input_list[0]) output_fp8 = torch.empty_like(input_list[0]) - reduce_scatter(output_origin, input_list, group=_get_default_group()) - reduce_scatter_fp8(output_fp8, input_list, group=_get_default_group(), fp8_format=fp8_format) + origin_handle = reduce_scatter(output_origin, input_list, group=_get_default_group(), async_op=async_op) + fp8_handle = reduce_scatter_fp8( + output_fp8, input_list, group=_get_default_group(), fp8_format=fp8_format, async_op=async_op + ) + if async_op: + origin_handle.wait() + fp8_handle.wait() assert_close(output_origin, output_fp8, rtol=0.1, atol=0.1)