mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-16 06:30:41 +00:00
[legacy] clean up legacy code (#4743)
* [legacy] remove outdated codes of pipeline (#4692) * [legacy] remove cli of benchmark and update optim (#4690) * [legacy] remove cli of benchmark and update optim * [doc] fix cli doc test * [legacy] fix engine clip grad norm * [legacy] remove outdated colo tensor (#4694) * [legacy] remove outdated colo tensor * [test] fix test import * [legacy] move outdated zero to legacy (#4696) * [legacy] clean up utils (#4700) * [legacy] clean up utils * [example] update examples * [legacy] clean up amp * [legacy] fix amp module * [legacy] clean up gpc (#4742) * [legacy] clean up context * [legacy] clean core, constants and global vars * [legacy] refactor initialize * [example] fix examples ci * [example] fix examples ci * [legacy] fix tests * [example] fix gpt example * [example] fix examples ci * [devops] fix ci installation * [example] fix examples ci
This commit is contained in:
45
colossalai/legacy/amp/torch_amp/__init__.py
Normal file
45
colossalai/legacy/amp/torch_amp/__init__.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from torch.nn.modules.loss import _Loss
|
||||
from torch.optim import Optimizer
|
||||
|
||||
from colossalai.context import Config
|
||||
|
||||
from .torch_amp import TorchAMPLoss, TorchAMPModel, TorchAMPOptimizer
|
||||
|
||||
|
||||
def convert_to_torch_amp(model: nn.Module,
|
||||
optimizer: Optimizer,
|
||||
criterion: Optional[_Loss] = None,
|
||||
amp_config: Optional[Config] = None):
|
||||
"""A helper function to wrap training components with Pytorch AMP modules
|
||||
|
||||
Args:
|
||||
model (:class:`torch.nn.Module`): your model object.
|
||||
optimizer (:class:`torch.optim.Optimizer`): your optimizer object
|
||||
criterion (:class:`torch.nn.modules.loss._Loss`, optional): your loss function object
|
||||
amp_config (:class:`colossalai.context.Config` or dict, optional): configuration for Pytorch AMP.
|
||||
|
||||
The ``amp_config`` should include parameters below:
|
||||
::
|
||||
|
||||
init_scale (float, optional, default=2.**16)
|
||||
growth_factor (float, optional, default=2.0)
|
||||
backoff_factor (float, optional, default=0.5)
|
||||
growth_interval (int, optional, default=2000)
|
||||
enabled (bool, optional, default=True)
|
||||
|
||||
Returns:
|
||||
A tuple (model, optimizer, criterion)
|
||||
"""
|
||||
model = TorchAMPModel(model)
|
||||
if amp_config is None:
|
||||
amp_config = dict()
|
||||
optimizer = TorchAMPOptimizer(optimizer, **amp_config)
|
||||
if criterion:
|
||||
criterion = TorchAMPLoss(criterion)
|
||||
return model, optimizer, criterion
|
||||
|
||||
|
||||
__all__ = ['convert_to_torch_amp', 'TorchAMPModel', 'TorchAMPLoss', 'TorchAMPOptimizer']
|
Reference in New Issue
Block a user