mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 03:52:01 +00:00
[MOE] remove old MoE legacy (#493)
This commit is contained in:
@@ -2,10 +2,10 @@ from .cuda import empty_cache, get_current_device, set_to_cuda, synchronize
|
||||
from .activation_checkpoint import checkpoint
|
||||
|
||||
from .common import (clip_grad_norm_fp32, conditional_context, copy_tensor_parallel_attributes, count_zeros_fp32,
|
||||
free_port, is_dp_rank_0, is_model_parallel_parameter, is_moe_parallel_parameter,
|
||||
is_no_pp_or_last_stage, is_tp_rank_0, is_using_ddp, is_using_pp, is_using_sequence,
|
||||
multi_tensor_applier, param_is_not_tensor_parallel_duplicate, print_rank_0,
|
||||
switch_virtual_pipeline_parallel_rank, sync_model_param)
|
||||
free_port, is_dp_rank_0, is_model_parallel_parameter, is_no_pp_or_last_stage, is_tp_rank_0,
|
||||
is_using_ddp, is_using_pp, is_using_sequence, multi_tensor_applier,
|
||||
param_is_not_tensor_parallel_duplicate, print_rank_0, switch_virtual_pipeline_parallel_rank,
|
||||
sync_model_param)
|
||||
from .data_sampler import DataParallelSampler, get_dataloader
|
||||
from .gradient_accumulation import accumulate_gradient
|
||||
from .memory import report_memory_usage
|
||||
@@ -18,5 +18,5 @@ __all__ = [
|
||||
'is_model_parallel_parameter', 'clip_grad_norm_fp32', 'count_zeros_fp32', 'copy_tensor_parallel_attributes',
|
||||
'param_is_not_tensor_parallel_duplicate', 'get_current_device', 'synchronize', 'empty_cache', 'set_to_cuda',
|
||||
'report_memory_usage', 'Timer', 'MultiTimer', 'multi_tensor_applier', 'accumulate_gradient', 'DataParallelSampler',
|
||||
'get_dataloader', 'switch_virtual_pipeline_parallel_rank', 'is_moe_parallel_parameter', 'TensorDetector'
|
||||
'get_dataloader', 'switch_virtual_pipeline_parallel_rank', 'TensorDetector'
|
||||
]
|
||||
|
Reference in New Issue
Block a user