mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-06-21 13:11:27 +00:00
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
This commit is contained in:
parent
5c56a7fd7b
commit
6997862a91
@ -6,7 +6,7 @@ from torch.testing import assert_close
|
||||
from colossalai import launch
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.quantization.fp8 import all_to_all_single_fp8
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn, clear_cache_before_run
|
||||
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
|
@ -6,7 +6,7 @@ from torch.testing import assert_close
|
||||
from colossalai import launch
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.quantization.fp8 import _all_to_all_fp8
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn, clear_cache_before_run
|
||||
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
|
@ -6,7 +6,7 @@ from torch.testing import assert_close
|
||||
from colossalai import launch
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.quantization.fp8 import all_to_all_single_fp8
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn, clear_cache_before_run
|
||||
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
|
||||
|
||||
dist.all_to_all_single
|
||||
|
||||
|
@ -3,8 +3,7 @@ from torch.testing import assert_close
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.quantization.fp8 import cast_from_fp8, cast_from_fp8_pipeline, cast_to_fp8, cast_to_fp8_pipeline
|
||||
from colossalai.testing import parameterize, clear_cache_before_run
|
||||
|
||||
from colossalai.testing import clear_cache_before_run, parameterize
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
|
@ -8,7 +8,7 @@ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||
from torch.testing import assert_close
|
||||
|
||||
from colossalai import launch
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn, clear_cache_before_run
|
||||
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
|
||||
|
||||
# example modified from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html
|
||||
|
||||
@ -27,6 +27,7 @@ class ToyModel(nn.Module):
|
||||
def forward(self, x):
|
||||
return self.net2(self.relu(self.net1(x)))
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
@parameterize("mode", ["grad", "params"])
|
||||
def run_model(mode):
|
||||
|
@ -6,7 +6,7 @@ from torch.testing import assert_close
|
||||
from colossalai import launch
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.quantization.fp8 import reduce_scatter_fp8
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn, clear_cache_before_run
|
||||
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
|
Loading…
Reference in New Issue
Block a user