[test] refactor tests with spawn (#3452)

* [test] added spawn decorator

* polish code

* polish code

* polish code

* polish code

* polish code

* polish code
This commit is contained in:
Frank Lee
2023-04-06 14:51:35 +08:00
committed by GitHub
parent 62f4e2eb07
commit 80eba05b0a
240 changed files with 1723 additions and 2342 deletions

View File

@@ -3,7 +3,7 @@ import torch
from packaging import version
from torch.utils.checkpoint import checkpoint
from colossalai.testing.utils import parameterize
from colossalai.testing.utils import clear_cache_before_run, parameterize
try:
from colossalai._analyzer.fx import symbolic_trace
@@ -81,6 +81,7 @@ class AddmmModel(torch.nn.Module):
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@clear_cache_before_run()
@parameterize("bias", [True, False])
@parameterize("bias_addition_split", [True, False])
@parameterize("shape", [(3, 3, 3), (3, 3, 3, 3)])

View File

@@ -1,6 +1,8 @@
import pytest
import torch
from colossalai.testing import clear_cache_before_run, parameterize
try:
from colossalai._analyzer.fx import symbolic_trace
except:
@@ -62,9 +64,10 @@ class AModel(torch.nn.Module):
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason='torch version < 12')
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("bias_addition_split", [True, False])
@pytest.mark.parametrize("shape", [(3, 3, 3), (3, 3, 3, 3)])
@clear_cache_before_run()
@parameterize("bias", [True, False])
@parameterize("bias_addition_split", [True, False])
@parameterize("shape", [(3, 3, 3), (3, 3, 3, 3)])
def test_mod_dir(bias, bias_addition_split, shape):
model = AModel(bias=bias)
x = torch.rand(shape)
@@ -75,4 +78,4 @@ def test_mod_dir(bias, bias_addition_split, shape):
if __name__ == '__main__':
test_mod_dir(True, True, (3, 3, 3))
test_mod_dir(bias=True, bias_addition_split=True, shape=(3, 3, 3))

View File

@@ -1,7 +1,9 @@
import pytest
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
import pytest
from colossalai.testing import clear_cache_before_run
try:
from colossalai._analyzer.fx import symbolic_trace
@@ -42,6 +44,7 @@ class MyModule(nn.Module):
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason='torch version < 12')
@clear_cache_before_run()
def test_nested_ckpt():
model = MyModule()
x = torch.rand(10, 10)

View File

@@ -3,7 +3,7 @@ import torch
import torchvision.models as tm
from packaging import version
from colossalai.testing.utils import parameterize
from colossalai.testing.utils import clear_cache_before_run, parameterize
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
try:
@@ -32,6 +32,7 @@ def _check_gm_validity(gm: torch.fx.GraphModule):
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@clear_cache_before_run()
@parameterize('m', tm_models)
def test_torchvision_shape_prop(m):
with MetaTensorMode():
@@ -46,6 +47,7 @@ def test_torchvision_shape_prop(m):
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@clear_cache_before_run()
@parameterize('m', tmm_models)
def test_timm_shape_prop(m):
with MetaTensorMode():

View File

@@ -3,7 +3,7 @@ import torch
import torchvision.models as tm
from packaging import version
from colossalai.testing.utils import parameterize
from colossalai.testing.utils import clear_cache_before_run, parameterize
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
try:
@@ -19,6 +19,7 @@ def _check_gm_validity(gm: torch.fx.GraphModule):
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@clear_cache_before_run()
@parameterize('m', tm_models)
def test_torchvision_profile(m, verbose=False, bias_addition_split=False):
with MetaTensorMode():
@@ -33,6 +34,7 @@ def test_torchvision_profile(m, verbose=False, bias_addition_split=False):
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@clear_cache_before_run()
@parameterize('m', tmm_models)
def test_timm_profile(m, verbose=False, bias_addition_split=False):
with MetaTensorMode():

View File

@@ -1,9 +1,11 @@
from typing import Any, Callable, Union
import pytest
import pytest
import torch
import torch.nn as nn
from colossalai.testing import clear_cache_before_run
try:
from colossalai._analyzer._subclasses import MetaTensor
except:
@@ -72,6 +74,7 @@ def run_and_compare(f: Union[nn.Module, Callable], x: torch.Tensor, requires_bac
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason='torch version < 12')
@clear_cache_before_run()
def test_meta_aten():
for (aten_op, requires_backward), v in registered_meta.items():
for f, x in v:

View File

@@ -4,6 +4,7 @@ import torch.nn.functional as F
import torchvision.models as tm
from packaging import version
from colossalai.testing import clear_cache_before_run, parameterize
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
try:
@@ -39,7 +40,8 @@ odd_cases = [
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@pytest.mark.parametrize('func, args, kwargs', odd_cases)
@clear_cache_before_run()
@parameterize('func, args, kwargs', odd_cases)
def test_flop_count_function(func, args, kwargs):
rs_fwd, rs_bwd = flop_count(func, *args, **kwargs, verbose=True)
assert rs_fwd > 0, f'fwd flop count of {func.__name__} is {rs_fwd}'

View File

@@ -3,6 +3,8 @@ import torch
import torchvision.models as tm
from packaging import version
from colossalai.testing import clear_cache_before_run, parameterize
try:
from colossalai._analyzer._subclasses import MetaTensor, MetaTensorMode
except:
@@ -30,7 +32,8 @@ def run_and_compare(model):
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@pytest.mark.parametrize('m', tm_models + tmm_models)
@clear_cache_before_run()
@parameterize('m', tm_models + tmm_models)
def test_meta_mode_shape(m):
run_and_compare(m())