mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-12 20:54:35 +00:00
Hotfix/Colossalai layers (#92)
* optimized 1d layer apis; reorganized nn.layer modules; fixed tests * fixed 2.5d runtime issue * reworked split batch, now called in trainer.schedule.load_batch Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import model
|
||||
from pathlib import Path
|
||||
|
||||
BATCH_SIZE = 128
|
||||
|
@@ -1,11 +1,12 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
from functools import partial
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
from colossalai.communication import (recv_backward, recv_forward,
|
||||
recv_tensor_meta, send_backward,
|
||||
send_backward_recv_forward, send_forward,
|
||||
@@ -15,8 +16,7 @@ from colossalai.context.parallel_mode import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.initialize import launch
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.utils import get_current_device
|
||||
from functools import partial
|
||||
from colossalai.utils import free_port, get_current_device
|
||||
|
||||
BATCH_SIZE = 16
|
||||
SEQ_LENGTH = 64
|
||||
@@ -123,13 +123,13 @@ def check_comm(size, rank, prev_rank, next_rank, up_group, down_group, logger):
|
||||
check_forward_backward(tensor, grad, rank, logger)
|
||||
|
||||
|
||||
def run_check(rank, world_size):
|
||||
def run_check(rank, world_size, port):
|
||||
launch(
|
||||
config=CONFIG,
|
||||
rank=rank,
|
||||
world_size=world_size,
|
||||
host='localhost',
|
||||
port=29932,
|
||||
port=port,
|
||||
backend='nccl'
|
||||
)
|
||||
logger = get_dist_logger()
|
||||
@@ -154,7 +154,7 @@ def run_check(rank, world_size):
|
||||
@pytest.mark.dist
|
||||
def test_p2p():
|
||||
world_size = 4
|
||||
run_func = partial(run_check, world_size=world_size)
|
||||
run_func = partial(run_check, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
|
@@ -3,25 +3,24 @@ import os.path as osp
|
||||
import pytest
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from colossalai.builder.pipeline import build_pipeline_model_from_cfg
|
||||
from colossalai.core import global_context
|
||||
from colossalai.initialize import launch
|
||||
from colossalai.logging import get_dist_logger
|
||||
from functools import partial
|
||||
import model
|
||||
from colossalai.utils import free_port
|
||||
|
||||
DIR_PATH = osp.dirname(osp.realpath(__file__))
|
||||
CONFIG_PATH = osp.join(DIR_PATH, 'resnet_config.py')
|
||||
|
||||
|
||||
def run_partition(rank, world_size):
|
||||
def run_partition(rank, world_size, port):
|
||||
launch(config=CONFIG_PATH,
|
||||
rank=rank,
|
||||
world_size=world_size,
|
||||
host='localhost',
|
||||
port=29933,
|
||||
port=port,
|
||||
backend='nccl'
|
||||
)
|
||||
logger = get_dist_logger()
|
||||
@@ -40,7 +39,7 @@ def run_partition(rank, world_size):
|
||||
@pytest.mark.dist
|
||||
def test_partition():
|
||||
world_size = 4
|
||||
run_func = partial(run_partition, world_size=world_size)
|
||||
run_func = partial(run_partition, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
|
@@ -1,26 +1,23 @@
|
||||
# referenced from Megatron and used to testify communication
|
||||
|
||||
import colossalai
|
||||
import os
|
||||
import os.path as osp
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
|
||||
import colossalai
|
||||
import pytest
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
import model
|
||||
|
||||
from colossalai.builder import build_pipeline_model_from_cfg
|
||||
from colossalai.communication import p2p as p2p_communication
|
||||
from colossalai.communication.utils import send_tensor_meta, recv_tensor_meta
|
||||
from colossalai.context.parallel_mode import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.initialize import launch
|
||||
from colossalai.utils import print_rank_0, get_current_device, get_dataloader
|
||||
from colossalai.engine.schedule import PipelineSchedule
|
||||
from torchvision.datasets import CIFAR10
|
||||
from colossalai.initialize import launch
|
||||
from colossalai.utils import free_port, get_dataloader, print_rank_0
|
||||
from torchvision import transforms
|
||||
from pathlib import Path
|
||||
from functools import partial
|
||||
from torchvision.datasets import CIFAR10
|
||||
|
||||
import model
|
||||
|
||||
BATCH_SIZE = 32
|
||||
NUM_MICRO = 8
|
||||
@@ -30,12 +27,12 @@ DIR_PATH = osp.dirname(osp.realpath(__file__))
|
||||
CONFIG_PATH = osp.join(DIR_PATH, './resnet_config.py')
|
||||
|
||||
|
||||
def run_schedule(rank, world_size):
|
||||
def run_schedule(rank, world_size, port):
|
||||
launch(config=CONFIG_PATH,
|
||||
rank=rank,
|
||||
world_size=world_size,
|
||||
host='localhost',
|
||||
port=29934,
|
||||
port=port,
|
||||
backend='nccl')
|
||||
|
||||
# build model
|
||||
@@ -86,7 +83,7 @@ def run_schedule(rank, world_size):
|
||||
@pytest.mark.dist
|
||||
def test_pipeline_schedule():
|
||||
world_size = 4
|
||||
run_func = partial(run_schedule, world_size=world_size)
|
||||
run_func = partial(run_schedule, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
|
@@ -11,7 +11,7 @@ from colossalai.amp.amp_type import AMP_TYPE
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.trainer import Trainer
|
||||
from colossalai.utils import MultiTimer, get_dataloader
|
||||
from colossalai.utils import MultiTimer, free_port, get_dataloader
|
||||
from torch.optim import Adam
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
@@ -26,8 +26,8 @@ CONFIG = dict(
|
||||
fp16=dict(mode=AMP_TYPE.TORCH))
|
||||
|
||||
|
||||
def run_trainer_no_pipeline(rank, world_size):
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=29930, backend='nccl')
|
||||
def run_trainer_no_pipeline(rank, world_size, port):
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
|
||||
# build model
|
||||
model = resnet18(num_classes=10)
|
||||
@@ -88,7 +88,7 @@ def run_trainer_no_pipeline(rank, world_size):
|
||||
@pytest.mark.dist
|
||||
def test_trainer_no_pipeline():
|
||||
world_size = 4
|
||||
run_func = partial(run_trainer_no_pipeline, world_size=world_size)
|
||||
run_func = partial(run_trainer_no_pipeline, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
|
@@ -12,7 +12,7 @@ from colossalai.core import global_context as gpc
|
||||
from colossalai.engine.schedule import PipelineSchedule
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.trainer import Trainer
|
||||
from colossalai.utils import MultiTimer, get_dataloader
|
||||
from colossalai.utils import MultiTimer, free_port, get_dataloader
|
||||
from torch.optim import Adam
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
@@ -25,8 +25,8 @@ NUM_EPOCHS = 200
|
||||
CONFIG = dict(parallel=dict(pipeline=2, ), )
|
||||
|
||||
|
||||
def run_trainer_with_pipeline(rank, world_size):
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=29931, backend='nccl')
|
||||
def run_trainer_with_pipeline(rank, world_size, port):
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
|
||||
# build model
|
||||
model = resnet18(num_classes=10)
|
||||
@@ -99,7 +99,7 @@ def run_trainer_with_pipeline(rank, world_size):
|
||||
@pytest.mark.dist
|
||||
def test_trainer_with_pipeline():
|
||||
world_size = 4
|
||||
run_func = partial(run_trainer_with_pipeline, world_size=world_size)
|
||||
run_func = partial(run_trainer_with_pipeline, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user