Hotfix/Colossalai layers (#92)

* optimized 1d layer apis; reorganized nn.layer modules; fixed tests

* fixed 2.5d runtime issue

* reworked split batch, now called in trainer.schedule.load_batch

Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
This commit is contained in:
アマデウス
2021-12-29 23:32:10 +08:00
committed by GitHub
parent 0fedef4f3c
commit 01a80cd86d
71 changed files with 1033 additions and 773 deletions

View File

@@ -1,27 +1,19 @@
from .activation_checkpoint import checkpoint
from .common import (print_rank_0, sync_model_param_in_dp, is_dp_rank_0,
is_tp_rank_0, is_no_pp_or_last_stage, is_using_ddp,
is_using_pp, conditional_context, is_model_parallel_parameter,
clip_grad_norm_fp32, count_zeros_fp32, copy_tensor_parallel_attributes,
param_is_not_tensor_parallel_duplicate, switch_virtual_pipeline_parallel_rank)
from .cuda import get_current_device, synchronize, empty_cache, set_to_cuda
from .common import (clip_grad_norm_fp32, conditional_context, copy_tensor_parallel_attributes, count_zeros_fp32,
free_port, is_dp_rank_0, is_model_parallel_parameter, is_no_pp_or_last_stage, is_tp_rank_0,
is_using_ddp, is_using_pp, multi_tensor_applier, param_is_not_tensor_parallel_duplicate,
print_rank_0, switch_virtual_pipeline_parallel_rank, sync_model_param_in_dp)
from .cuda import empty_cache, get_current_device, set_to_cuda, synchronize
from .data_sampler import DataParallelSampler, get_dataloader
from .gradient_accumulation import accumulate_gradient
from .memory import report_memory_usage
from .timer import MultiTimer, Timer
from .multi_tensor_apply import multi_tensor_applier
from .gradient_accumulation import accumulate_gradient
from .data_sampler import DataParallelSampler, get_dataloader
__all__ = ['checkpoint',
'print_rank_0', 'sync_model_param_in_dp', 'is_dp_rank_0',
'is_tp_rank_0', 'is_no_pp_or_last_stage', 'is_using_ddp',
'is_using_pp', 'conditional_context', 'is_model_parallel_parameter',
'clip_grad_norm_fp32', 'count_zeros_fp32', 'copy_tensor_parallel_attributes',
'param_is_not_tensor_parallel_duplicate',
'get_current_device', 'synchronize', 'empty_cache', 'set_to_cuda',
'report_memory_usage',
'Timer', 'MultiTimer',
'multi_tensor_applier',
'accumulate_gradient',
'DataParallelSampler', 'get_dataloader',
'switch_virtual_pipeline_parallel_rank'
]
__all__ = [
'checkpoint', 'free_port', 'print_rank_0', 'sync_model_param_in_dp', 'is_dp_rank_0', 'is_tp_rank_0',
'is_no_pp_or_last_stage', 'is_using_ddp', 'is_using_pp', 'conditional_context', 'is_model_parallel_parameter',
'clip_grad_norm_fp32', 'count_zeros_fp32', 'copy_tensor_parallel_attributes',
'param_is_not_tensor_parallel_duplicate', 'get_current_device', 'synchronize', 'empty_cache', 'set_to_cuda',
'report_memory_usage', 'Timer', 'MultiTimer', 'multi_tensor_applier', 'accumulate_gradient', 'DataParallelSampler',
'get_dataloader', 'switch_virtual_pipeline_parallel_rank'
]

View File

@@ -1,5 +1,7 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import random
import socket
import torch
from torch._six import inf
@@ -9,16 +11,15 @@ try:
except:
pass
import torch.distributed as dist
from contextlib import contextmanager
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from .multi_tensor_apply import multi_tensor_applier
from colossalai.constants import IS_TENSOR_PARALLEL, TENSOR_PARALLEL_ATTRIBUTES, NUM_PARTITIONS
import torch.distributed as dist
from colossalai.constants import IS_TENSOR_PARALLEL, NUM_PARTITIONS, TENSOR_PARALLEL_ATTRIBUTES
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from .multi_tensor_apply import multi_tensor_applier
def print_rank_0(msg: str, logger=None):
'''Print messages and save logs(optional). This is executed only if you are the rank-0 gpu.
@@ -33,6 +34,18 @@ def print_rank_0(msg: str, logger=None):
logger.info(msg)
def free_port():
while True:
try:
sock = socket.socket()
port = random.randint(20000, 65000)
sock.bind(('localhost', port))
sock.close()
return port
except Exception:
continue
def sync_model_param_in_dp(model):
'''Make sure data parameters are consistent during Data Parallel Mode