mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-06 19:40:28 +00:00
[legacy] clean up legacy code (#4743)
* [legacy] remove outdated codes of pipeline (#4692) * [legacy] remove cli of benchmark and update optim (#4690) * [legacy] remove cli of benchmark and update optim * [doc] fix cli doc test * [legacy] fix engine clip grad norm * [legacy] remove outdated colo tensor (#4694) * [legacy] remove outdated colo tensor * [test] fix test import * [legacy] move outdated zero to legacy (#4696) * [legacy] clean up utils (#4700) * [legacy] clean up utils * [example] update examples * [legacy] clean up amp * [legacy] fix amp module * [legacy] clean up gpc (#4742) * [legacy] clean up context * [legacy] clean core, constants and global vars * [legacy] refactor initialize * [example] fix examples ci * [example] fix examples ci * [legacy] fix tests * [example] fix gpt example * [example] fix examples ci * [devops] fix ci installation * [example] fix examples ci
This commit is contained in:
53
colossalai/legacy/utils/__init__.py
Normal file
53
colossalai/legacy/utils/__init__.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from .checkpointing import load_checkpoint, save_checkpoint
|
||||
from .common import (
|
||||
clip_grad_norm_fp32,
|
||||
copy_tensor_parallel_attributes,
|
||||
count_zeros_fp32,
|
||||
is_dp_rank_0,
|
||||
is_model_parallel_parameter,
|
||||
is_no_pp_or_last_stage,
|
||||
is_tp_rank_0,
|
||||
is_using_ddp,
|
||||
is_using_pp,
|
||||
is_using_sequence,
|
||||
param_is_not_tensor_parallel_duplicate,
|
||||
print_rank_0,
|
||||
switch_virtual_pipeline_parallel_rank,
|
||||
sync_model_param,
|
||||
)
|
||||
from .data_sampler import DataParallelSampler, get_dataloader
|
||||
from .memory import (
|
||||
colo_device_memory_capacity,
|
||||
colo_device_memory_used,
|
||||
colo_get_cpu_memory_capacity,
|
||||
colo_set_cpu_memory_capacity,
|
||||
colo_set_process_memory_fraction,
|
||||
report_memory_usage,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'DataParallelSampler',
|
||||
'get_dataloader',
|
||||
'save_checkpoint',
|
||||
'load_checkpoint',
|
||||
'colo_device_memory_capacity',
|
||||
'colo_device_memory_used',
|
||||
'colo_get_cpu_memory_capacity',
|
||||
'colo_set_cpu_memory_capacity',
|
||||
'colo_set_process_memory_fraction',
|
||||
'report_memory_usage',
|
||||
'clip_grad_norm_fp32',
|
||||
'copy_tensor_parallel_attributes',
|
||||
'count_zeros_fp32',
|
||||
'is_dp_rank_0',
|
||||
'is_model_parallel_parameter',
|
||||
'is_no_pp_or_last_stage',
|
||||
'is_tp_rank_0',
|
||||
'is_using_ddp',
|
||||
'is_using_pp',
|
||||
'is_using_sequence',
|
||||
'param_is_not_tensor_parallel_duplicate',
|
||||
'print_rank_0',
|
||||
'switch_virtual_pipeline_parallel_rank',
|
||||
'sync_model_param',
|
||||
]
|
Reference in New Issue
Block a user