mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 03:52:01 +00:00
[legacy] clean up legacy code (#4743)
* [legacy] remove outdated codes of pipeline (#4692) * [legacy] remove cli of benchmark and update optim (#4690) * [legacy] remove cli of benchmark and update optim * [doc] fix cli doc test * [legacy] fix engine clip grad norm * [legacy] remove outdated colo tensor (#4694) * [legacy] remove outdated colo tensor * [test] fix test import * [legacy] move outdated zero to legacy (#4696) * [legacy] clean up utils (#4700) * [legacy] clean up utils * [example] update examples * [legacy] clean up amp * [legacy] fix amp module * [legacy] clean up gpc (#4742) * [legacy] clean up context * [legacy] clean core, constants and global vars * [legacy] refactor initialize * [example] fix examples ci * [example] fix examples ci * [legacy] fix tests * [example] fix gpt example * [example] fix examples ci * [devops] fix ci installation * [example] fix examples ci
This commit is contained in:
@@ -1,79 +1,32 @@
|
||||
from .activation_checkpoint import checkpoint
|
||||
from .checkpointing import load_checkpoint, save_checkpoint
|
||||
from .common import (
|
||||
_cast_float,
|
||||
clip_grad_norm_fp32,
|
||||
conditional_context,
|
||||
copy_tensor_parallel_attributes,
|
||||
count_zeros_fp32,
|
||||
disposable,
|
||||
ensure_path_exists,
|
||||
free_storage,
|
||||
is_ddp_ignored,
|
||||
is_dp_rank_0,
|
||||
is_model_parallel_parameter,
|
||||
is_no_pp_or_last_stage,
|
||||
is_tp_rank_0,
|
||||
is_using_ddp,
|
||||
is_using_pp,
|
||||
is_using_sequence,
|
||||
multi_tensor_applier,
|
||||
param_is_not_tensor_parallel_duplicate,
|
||||
print_rank_0,
|
||||
switch_virtual_pipeline_parallel_rank,
|
||||
sync_model_param,
|
||||
)
|
||||
from .cuda import empty_cache, get_current_device, set_to_cuda, synchronize
|
||||
from .data_sampler import DataParallelSampler, get_dataloader
|
||||
from .memory import (
|
||||
colo_device_memory_capacity,
|
||||
colo_device_memory_used,
|
||||
colo_get_cpu_memory_capacity,
|
||||
colo_set_cpu_memory_capacity,
|
||||
colo_set_process_memory_fraction,
|
||||
report_memory_usage,
|
||||
set_seed,
|
||||
)
|
||||
from .cuda import empty_cache, get_current_device, set_device, set_to_cuda, synchronize
|
||||
from .multi_tensor_apply import multi_tensor_applier
|
||||
from .tensor_detector import TensorDetector
|
||||
from .timer import MultiTimer, Timer
|
||||
|
||||
__all__ = [
|
||||
'checkpoint',
|
||||
'print_rank_0',
|
||||
'sync_model_param',
|
||||
'is_ddp_ignored',
|
||||
'is_dp_rank_0',
|
||||
'is_tp_rank_0',
|
||||
'is_no_pp_or_last_stage',
|
||||
'is_using_ddp',
|
||||
'is_using_pp',
|
||||
'is_using_sequence',
|
||||
'conditional_context',
|
||||
'is_model_parallel_parameter',
|
||||
'clip_grad_norm_fp32',
|
||||
'count_zeros_fp32',
|
||||
'copy_tensor_parallel_attributes',
|
||||
'param_is_not_tensor_parallel_duplicate',
|
||||
'get_current_device',
|
||||
'synchronize',
|
||||
'empty_cache',
|
||||
'set_to_cuda',
|
||||
'report_memory_usage',
|
||||
'colo_device_memory_capacity',
|
||||
'colo_device_memory_used',
|
||||
'colo_set_process_memory_fraction',
|
||||
'Timer',
|
||||
'MultiTimer',
|
||||
'multi_tensor_applier',
|
||||
'DataParallelSampler',
|
||||
'get_dataloader',
|
||||
'switch_virtual_pipeline_parallel_rank',
|
||||
'TensorDetector',
|
||||
'load_checkpoint',
|
||||
'save_checkpoint',
|
||||
'ensure_path_exists',
|
||||
'disposable',
|
||||
'colo_set_cpu_memory_capacity',
|
||||
'colo_get_cpu_memory_capacity',
|
||||
'_cast_float',
|
||||
'free_storage',
|
||||
'set_seed',
|
||||
'is_ddp_ignored',
|
||||
'set_device',
|
||||
]
|
||||
|
Reference in New Issue
Block a user