diff --git a/tests/test_fp8/test_fp8_allgather.py b/tests/test_fp8/test_fp8_allgather.py index e6b618560..91e66e83c 100644 --- a/tests/test_fp8/test_fp8_allgather.py +++ b/tests/test_fp8/test_fp8_allgather.py @@ -6,14 +6,13 @@ from torch.testing import assert_close from colossalai import launch from colossalai.accelerator import get_accelerator from colossalai.quantization.fp8 import _all_gather_fp8 -from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn, clear_cache_before_run +from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn @parameterize( "shape", [(3, 7, 16)], ) - @parameterize("dtype", [torch.bfloat16, torch.float16]) @parameterize("fp8_format", ["e4m3", "e5m2"]) @parameterize("async_op", [True, False]) diff --git a/tests/test_fp8/test_fp8_allreduce.py b/tests/test_fp8/test_fp8_allreduce.py index d7e706ffd..297b05e48 100644 --- a/tests/test_fp8/test_fp8_allreduce.py +++ b/tests/test_fp8/test_fp8_allreduce.py @@ -5,7 +5,7 @@ from torch.testing import assert_close from colossalai import launch from colossalai.accelerator import get_accelerator from colossalai.quantization.fp8 import all_reduce_fp8 -from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn, clear_cache_before_run +from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn @parameterize(