[test] ignore 8 gpu test (#1080)

* [test] ignore 8 gpu test

* polish code

* polish workflow

* polish workflow
This commit is contained in:
Frank Lee
2022-06-08 23:14:18 +08:00
committed by GitHub
parent 0653c63eaa
commit 65ee6dcc20
11 changed files with 15 additions and 8 deletions

View File

@@ -7,9 +7,9 @@ from colossalai.utils.cuda import get_current_device
class DummyDataLoader(DummyDataGenerator):
vocab_size = 50304
vocab_size = 128
batch_size = 4
seq_len = 1024
seq_len = 64
def generate(self):
input_ids = torch.randint(0,
@@ -47,6 +47,8 @@ class GPTLMModel(nn.Module):
# Only return lm_logits
return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=not self.checkpoint)[0]
def gpt2_micro(checkpoint=True):
return GPTLMModel(checkpoint=checkpoint, hidden_size=32, num_layers=2, num_attention_heads=4, max_seq_len=64, vocab_size=128)
def gpt2_s(checkpoint=True):
return GPTLMModel(checkpoint=checkpoint)
@@ -76,4 +78,4 @@ def get_training_components():
testloader = DummyDataLoader()
criterion = GPTLMLoss()
return gpt2_s, trainloader, testloader, torch.optim.Adam, criterion
return gpt2_micro, trainloader, testloader, torch.optim.Adam, criterion

View File

@@ -83,6 +83,7 @@ def run_trainer(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.skip("This test requires 8 GPUs to execute")
@rerun_if_address_is_in_use()
def test_hybrid_parallel():
world_size = 8

View File

@@ -51,6 +51,7 @@ def check_layer_and_operation(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.skip("This test requires 8 GPUs to execute")
@rerun_if_address_is_in_use()
def test_3d():
world_size = 8

View File

@@ -328,7 +328,6 @@ def run_model_dist(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
# @parameterize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_model(world_size):
run_func = partial(run_model_dist, world_size=world_size, port=free_port())

View File

@@ -67,6 +67,7 @@ def check_checkpoint_1d(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.skip("This test should be invoked with 8 GPUs")
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_1d():
world_size = 8

View File

@@ -67,6 +67,7 @@ def check_checkpoint_2d(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.skip("This test should be invoked with 8 GPUs")
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_2d():
world_size = 8

View File

@@ -67,6 +67,7 @@ def check_checkpoint_2p5d(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.skip("This test should be invoked with 8 GPUs")
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_2p5d():
world_size = 8

View File

@@ -67,6 +67,7 @@ def check_checkpoint_3d(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.skip("This test requires 8 GPUs to execute")
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_3d():
world_size = 8

View File

@@ -22,7 +22,7 @@ def run_dist(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4, 5])
@pytest.mark.parametrize("world_size", [3, 4])
def test_memory_utils(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)

View File

@@ -85,7 +85,7 @@ def run_dist(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4, 5])
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_if_address_is_in_use()
def test_zero_tensor_utils(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())