mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-06 19:40:28 +00:00
Refactored docstring to google style
This commit is contained in:
@@ -11,16 +11,13 @@ class LambdaLR(_LambdaLR):
|
||||
"""Sets the learning rate of each parameter group to the initial lr
|
||||
times a given function. When last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param lr_lambda: A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such
|
||||
functions, one for each group in optimizer.param_groups, defaults to None
|
||||
:type lr_lambda: function or list, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
lr_lambda (Union[``function``, ``list[function]``]): A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such functions,
|
||||
one for each group in optimizer.param_groups, defaults to None.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, lr_lambda=None, last_epoch: int = -1) -> None:
|
||||
@@ -30,18 +27,15 @@ class LambdaLR(_LambdaLR):
|
||||
@LR_SCHEDULERS.register_module
|
||||
class MultiplicativeLR(_MultiplicativeLR):
|
||||
"""Multiply the learning rate of each parameter group by the factor given
|
||||
in the specified function. When last_epoch=-1, sets initial lr as lr
|
||||
in the specified function. When last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param lr_lambda: A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such
|
||||
functions, one for each group in optimizer.param_groups, defaults to None
|
||||
:type lr_lambda: function or list, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
lr_lambda (Union[``function``, ``list[function]``]): A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such functions,
|
||||
one for each group in optimizer.param_groups, defaults to None.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, lr_lambda=None, last_epoch: int = -1) -> None:
|
||||
@@ -53,18 +47,14 @@ class StepLR(_StepLR):
|
||||
"""Decays the learning rate of each parameter group by gamma every
|
||||
step_size epochs. Notice that such decay can happen simultaneously with
|
||||
other changes to the learning rate from outside this scheduler. When
|
||||
last_epoch=-1, sets initial lr as lr
|
||||
last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param step_size: Period of learning rate decay, defaults to 1
|
||||
:type step_size: int, optional
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 0.1
|
||||
:type gamma: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
step_size (int, optional): Period of learning rate decay, defaults to 1.
|
||||
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 0.1.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, step_size: int = 1, gamma: float = 0.1, last_epoch: int = -1) -> None:
|
||||
@@ -77,14 +67,11 @@ class ExponentialLR(_ExponentialLR):
|
||||
"""Decays the learning rate of each parameter group by gamma every epoch.
|
||||
When last_epoch=-1, sets initial lr as lr
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 1.0
|
||||
:type gamma: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 1.0.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, gamma: float = 1.0,
|
||||
|
Reference in New Issue
Block a user