mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-08 12:30:42 +00:00
[devops] add large-scale distributed test marker (#4452)
* [test] remove cpu marker * [test] remove gpu marker * [test] update pytest markers * [ci] update unit test ci
This commit is contained in:
@@ -8,7 +8,6 @@ import pytest
|
||||
from colossalai.context.config import Config
|
||||
|
||||
|
||||
@pytest.mark.cpu
|
||||
def test_load_config():
|
||||
filename = Path(__file__).parent.joinpath('sample_config.py')
|
||||
config = Config.from_file(filename)
|
||||
|
@@ -143,7 +143,6 @@ def run_dist(rank, world_size, port, backend, port_list, host):
|
||||
reset_seeds()
|
||||
|
||||
|
||||
@pytest.mark.cpu
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_context():
|
||||
"""
|
||||
|
@@ -5,11 +5,10 @@ import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from torchvision import transforms, datasets
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision import datasets, transforms
|
||||
|
||||
|
||||
@pytest.mark.cpu
|
||||
def test_cifar10_dataset():
|
||||
# build transform
|
||||
transform_pipeline = [transforms.ToTensor()]
|
||||
|
@@ -53,7 +53,6 @@ def run_data_sampler(rank, world_size, port):
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.cpu
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_data_sampler():
|
||||
spawn(run_data_sampler, 4)
|
||||
|
@@ -64,7 +64,6 @@ def run_data_sampler(rank, world_size, port):
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.cpu
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_data_sampler():
|
||||
spawn(run_data_sampler, 4)
|
||||
|
@@ -40,7 +40,6 @@ def forward_inplace(x, weight):
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.gpu
|
||||
@clear_cache_before_run()
|
||||
@parameterize("use_reentrant", [True, False])
|
||||
@parameterize("cpu_offload", [True, False])
|
||||
|
Reference in New Issue
Block a user