ColossalAI/colossalai/booster/mixed_precision/fp16_naive.py
jiangmingyan e871e342b3
[API] add docstrings and initialization to apex amp, naive amp (#3783)
* [mixed_precison] add naive amp demo

* [mixed_precison] add naive amp demo

* [api] add docstrings and initialization to apex amp, naive amp

* [api] add docstring to apex amp/ naive amp

* [api] add docstring to apex amp/ naive amp

* [api] add docstring to apex amp/ naive amp

* [api] add docstring to apex amp/ naive amp

* [api] add docstring to apex amp/ naive amp

* [api] add docstring to apex amp/ naive amp

* [api] fix

* [api] fix
2023-05-23 15:17:24 +08:00

27 lines
918 B
Python

from .mixed_precision_base import MixedPrecision
class FP16NaiveMixedPrecision(MixedPrecision):
"""
Precision for mixed precision training in FP16 using naive AMP.
Args:
log_num_zeros_in_grad(bool): return number of zeros in the gradients.
initial_scale(int): initial scale of gradient scaler.
growth_factor(int): the growth rate of loss scale.
backoff_factor(float): the decrease rate of loss scale.
hysteresis(int): delay shift in dynamic loss scaling.
max_scale(int): maximum loss scale allowed.
verbose(bool): if set to `True`, will print debug info.
"""
def __init__(self,
log_num_zeros_in_grad: bool,
initial_scale: int,
growth_factor: int,
backoff_factor: float,
hysteresis: int,
max_scale: int,
verbose: bool = None) -> None:
pass