mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-13 21:22:49 +00:00
Refactored docstring to google style
This commit is contained in:
@@ -36,14 +36,12 @@ class CosineAnnealingLR(_CosineAnnealingLR):
|
||||
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
|
||||
https://arxiv.org/abs/1608.03983
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param eta_min: Minimum learning rate, defaults to 0
|
||||
:type eta_min: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
eta_min (int, optional): Minimum learning rate, defaults to 0.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, eta_min: int = 0, last_epoch: int = -1, **kwargs):
|
||||
@@ -54,16 +52,13 @@ class CosineAnnealingLR(_CosineAnnealingLR):
|
||||
class CosineAnnealingWarmupLR(WarmupScheduler):
|
||||
"""Cosine annealing learning rate scheduler with learning rate warmup. A linear warmup schedule will be applied.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param warmup_steps: Number of warmup steps, defaults to 0
|
||||
:type warmup_steps: int, optional
|
||||
:param eta_min: Minimum learning rate, defaults to 0
|
||||
:type eta_min: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
warmup_steps (int, optional): Number of warmup steps, defaults to 0.
|
||||
eta_min (int, optional): Minimum learning rate, defaults to 0.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, eta_min: float = 0., last_epoch: int = -1):
|
||||
@@ -76,14 +71,12 @@ class CosineAnnealingWarmupLR(WarmupScheduler):
|
||||
class FlatAnnealingLR(DelayerScheduler):
|
||||
"""Flat and cosine annealing learning rate scheduler. The learning rate will be a fixed value before starting decay.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param pct_start: Percent of steps before starting learning rate decay
|
||||
:type pct_start: float
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
pct_start (float, optional): Percent of steps before starting learning rate decay, defaults to -0.72.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, pct_start: float = 0.72, last_epoch: int = -1, **kwargs):
|
||||
@@ -102,18 +95,14 @@ class FlatAnnealingWarmupLR(WarmupDelayerScheduler):
|
||||
"""Flat and cosine annealing learning rate scheduler with learning rate warmup. A linear warmup schedule will be
|
||||
applied, and then the learning rate will be a fixed value before starting decay.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param warmup_steps: Number of warmup steps, defaults to 0
|
||||
:type warmup_steps: int, optional
|
||||
:param pct_start: Percent of steps before starting learning rate decay
|
||||
:type pct_start: float
|
||||
:param eta_min: Minimum learning rate, defaults to 0
|
||||
:type eta_min: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
warmup_steps (int, optional): Number of warmup steps, defaults to 0.
|
||||
pct_start (float, optional): Percent of steps before starting learning rate decay, defaults to -0.72.
|
||||
eta_min (int, optional): Minimum learning rate, defaults to 0.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, pct_start: float = 0.72, eta_min: int = 0,
|
||||
|
@@ -14,16 +14,15 @@ class _enable_get_lr_call:
|
||||
|
||||
|
||||
class DelayerScheduler(_LRScheduler):
|
||||
""" Starts with a flat lr schedule until it reaches N epochs the applies a scheduler
|
||||
"""Starts with a flat lr schedule until it reaches N epochs then applies
|
||||
the specific scheduler (For example: ReduceLROnPlateau)
|
||||
|
||||
:param optimizer: Wrapped optimizer.
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param delay_epochs: Number of epochs to keep the initial lr until starting aplying the scheduler
|
||||
:type delay_epochs: int
|
||||
:param after_scheduler: After target_epoch, use this scheduler(eg. ReduceLROnPlateau)
|
||||
:type after_scheduler: torch.optim.lr_scheduler
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.
|
||||
after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, delay_epochs, after_scheduler, last_epoch=-1):
|
||||
@@ -57,16 +56,15 @@ class DelayerScheduler(_LRScheduler):
|
||||
|
||||
|
||||
class WarmupScheduler(_LRScheduler):
|
||||
""" Starts with a linear warmup lr schedule until it reaches N epochs the applies a scheduler
|
||||
"""Starts with a linear warmup lr schedule until it reaches N epochs then applies
|
||||
the specific scheduler (For example: ReduceLROnPlateau).
|
||||
|
||||
:param optimizer: Wrapped optimizer.
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param warmup_epochs: Number of epochs to linearly warmup lr until starting aplying the scheduler
|
||||
:type warmup_epochs: int
|
||||
:param after_scheduler: After target_epoch, use this scheduler(eg. ReduceLROnPlateau)
|
||||
:type after_scheduler: torch.optim.lr_scheduler
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.
|
||||
after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, warmup_epochs, after_scheduler, last_epoch=-1):
|
||||
@@ -97,18 +95,16 @@ class WarmupScheduler(_LRScheduler):
|
||||
|
||||
|
||||
class WarmupDelayerScheduler(_LRScheduler):
|
||||
""" Starts with a linear warmup lr schedule until it reaches N epochs and a flat lr schedule until it reaches M epochs the applies a scheduler
|
||||
"""Starts with a linear warmup lr schedule until it reaches N epochs and a flat lr schedule
|
||||
until it reaches M epochs then applies the specific scheduler (For example: ReduceLROnPlateau).
|
||||
|
||||
:param optimizer: Wrapped optimizer.
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param warmup_epochs: Number of epochs to linearly warmup lr until starting aplying the scheduler
|
||||
:type warmup_epochs: int
|
||||
:param delay_epochs: Number of epochs to keep the initial lr until starting aplying the scheduler
|
||||
:type delay_epochs: int
|
||||
:param after_scheduler: After target_epoch, use this scheduler(eg. ReduceLROnPlateau)
|
||||
:type after_scheduler: torch.optim.lr_scheduler
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.
|
||||
delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.
|
||||
after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, warmup_epochs, delay_epochs, after_scheduler, last_epoch=-1):
|
||||
|
@@ -5,16 +5,14 @@ from colossalai.registry import LR_SCHEDULERS
|
||||
|
||||
@LR_SCHEDULERS.register_module
|
||||
class LinearWarmupLR(_LRScheduler):
|
||||
"""Linearly warmup learning rate and then linearly decay
|
||||
"""Linearly warmup learning rate and then linearly decay.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param warmup_steps: Number of warmup steps, defaults to 0
|
||||
:type warmup_steps: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
warmup_steps (int, optional): Number of warmup steps, defaults to 0
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, last_epoch: int = -1, **kwargs):
|
||||
|
@@ -13,18 +13,13 @@ class MultiStepLR(_MultiStepLR):
|
||||
happen simultaneously with other changes to the learning rate from outside
|
||||
this scheduler. When last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param milestones: List of epoch indices. Must be increasing, defaults to None
|
||||
:type milestones: List[int], optional
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 0.1
|
||||
:type gamma: float, optional
|
||||
:param num_steps_per_epoch: Number of steps per epoch, defaults to -1
|
||||
:type num_steps_per_epoch: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
milestones (List[int], optional): List of epoch indices. Must be increasing, defaults to None.
|
||||
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 0.1.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, milestones: List[int] = None, gamma: float = 0.1, last_epoch: int = -1, **kwargs):
|
||||
@@ -33,22 +28,17 @@ class MultiStepLR(_MultiStepLR):
|
||||
|
||||
@LR_SCHEDULERS.register_module
|
||||
class MultiStepWarmupLR(WarmupScheduler):
|
||||
"""Multi-step laerning rate scheduler with warmup.
|
||||
"""Multistep learning rate scheduler with warmup.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param warmup_steps: Number of warmup steps, defaults to 0
|
||||
:type warmup_steps: int, optional
|
||||
:param milestones: List of epoch indices. Must be increasing, defaults to None
|
||||
:type milestones: List[int], optional
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 0.1
|
||||
:type gamma: float, optional
|
||||
:param num_steps_per_epoch: Number of steps per epoch, defaults to -1
|
||||
:type num_steps_per_epoch: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
warmup_steps (int, optional): Number of warmup steps, defaults to 0.
|
||||
milestones (List[int], optional): List of epoch indices. Must be increasing, defaults to None.
|
||||
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 0.1.
|
||||
num_steps_per_epoch (int, optional): Number of steps per epoch, defaults to -1.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, milestones: List[int] = None,
|
||||
|
@@ -28,43 +28,41 @@ class OneCycleLR(_OneCycleLR):
|
||||
claims that "unpublished work has shown even better results by using only two phases". To
|
||||
mimic the behaviour of the original paper instead, set ``three_phase=True``.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param pct_start: The percentage of the cycle (in number of steps) spent increasing the learning rate, defaults to 0.3
|
||||
:type pct_start: float, optional
|
||||
:param anneal_strategy: {'cos', 'linear'}
|
||||
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
|
||||
linear annealing, defaults to 'cos'
|
||||
:type anneal_strategy: str, optional
|
||||
:param cycle_momentum: If ``True``, momentum is cycled inversely
|
||||
to learning rate between 'base_momentum' and 'max_momentum', defaults to True
|
||||
:type cycle_momentum: bool, optional
|
||||
:param base_momentum: Lower momentum boundaries in the cycle
|
||||
for each parameter group. Note that momentum is cycled inversely
|
||||
to learning rate; at the peak of a cycle, momentum is
|
||||
'base_momentum' and learning rate is 'max_lr', defaults to 0.85
|
||||
:type base_momentum: float, optional
|
||||
:param max_momentum: Upper momentum boundaries in the cycle
|
||||
for each parameter group. Functionally,
|
||||
it defines the cycle amplitude (max_momentum - base_momentum).
|
||||
Note that momentum is cycled inversely
|
||||
to learning rate; at the start of a cycle, momentum is 'max_momentum'
|
||||
and learning rate is 'base_lr', defaults to 0.95
|
||||
:type max_momentum: float, optional
|
||||
:param div_factor: Determines the initial learning rate via
|
||||
initial_lr = max_lr/div_factor, defaults to 25.0
|
||||
:type div_factor: float, optional
|
||||
:param final_div_factor: Determines the minimum learning rate via
|
||||
min_lr = initial_lr/final_div_factor, defaults to 10000.0
|
||||
:type final_div_factor: float, optional
|
||||
:param last_epoch: The index of the last batch. This parameter is used when
|
||||
resuming a training job. Since `step()` should be invoked after each
|
||||
batch instead of after each epoch, this number represents the total
|
||||
number of *batches* computed, not the total number of epochs computed.
|
||||
When last_epoch=-1, the schedule is started from the beginning, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
pct_start (float, optional):
|
||||
The percentage of the cycle (in number of steps) spent increasing the learning rate, defaults to 0.3.
|
||||
anneal_strategy (str, optional): {'cos', 'linear'}, Specifies the annealing strategy:
|
||||
"cos" for cosine annealing, "linear" for linear annealing, defaults to 'cos'.
|
||||
cycle_momentum (bool, optional): If ``True``, momentum is cycled inversely
|
||||
to learning rate between 'base_momentum' and 'max_momentum', defaults to True.
|
||||
base_momentum (float, optional): Lower momentum boundaries in the cycle for each parameter group.
|
||||
Note that momentum is cycled inversely to learning rate; at the peak of a cycle, momentum is
|
||||
'base_momentum' and learning rate is 'max_lr', defaults to 0.85.
|
||||
max_momentum (float, optional): Upper momentum boundaries in the cycle for each parameter group.
|
||||
Functionally, it defines the cycle amplitude (max_momentum - base_momentum).
|
||||
Note that momentum is cycled inversely to learning rate; at the start of a cycle, momentum is 'max_momentum'
|
||||
and learning rate is 'base_lr', defaults to 0.95.
|
||||
div_factor (float, optional): Determines the initial learning rate via
|
||||
initial_lr = max_lr/div_factor, defaults to 25.0.
|
||||
final_div_factor (float, optional): Determines the minimum learning rate via
|
||||
min_lr = initial_lr/final_div_factor, defaults to 10000.0.
|
||||
last_epoch (int, optional): The index of the last batch. This parameter is used when resuming a training job.
|
||||
Since `step()` should be invoked after each batch instead of after each epoch, this number represents
|
||||
the total number of *batches* computed, not the total number of epochs computed.
|
||||
When last_epoch=-1, the schedule is started from the beginning, defaults to -1
|
||||
|
||||
The ``kwargs`` for initializing torch.optim.lr_scheduler.OneCycleLR should include parameters below:
|
||||
::
|
||||
|
||||
epochs (int, optional, default=None)
|
||||
steps_per_epoch (int, optional, default=None)
|
||||
three_phase (bool, optional, default=False)
|
||||
verbose (bool, optional, default=False)
|
||||
|
||||
More details about kwargs could be found in
|
||||
`OneCycleLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.OneCycleLR.html#torch.optim.lr_scheduler.OneCycleLR>`_.
|
||||
|
||||
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
|
||||
https://arxiv.org/abs/1708.07120
|
||||
|
@@ -8,16 +8,13 @@ from .delayed import WarmupScheduler
|
||||
class PolynomialLR(_LRScheduler):
|
||||
"""Polynomial learning rate scheduler.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param end_lr: Minimum learning rate, defaults to 0.0001
|
||||
:type end_lr: float, optional
|
||||
:param power: The power of polynomial, defaults to 1.0
|
||||
:type power: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
end_lr (float, optional): Minimum learning rate, defaults to 0.0001.
|
||||
power (float, optional): The power of polynomial, defaults to 1.0.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, end_lr: float = 0.0001, power: float = 1.0, last_epoch: int = -1,
|
||||
@@ -44,18 +41,14 @@ class PolynomialLR(_LRScheduler):
|
||||
class PolynomialWarmupLR(WarmupScheduler):
|
||||
"""Polynomial learning rate scheduler with warmup.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param warmup_steps: Number of warmup steps, defaults to 0
|
||||
:type warmup_steps: int, optional
|
||||
:param end_lr: Minimum learning rate, defaults to 0.0001
|
||||
:type end_lr: float, optional
|
||||
:param power: The power of polynomial, defaults to 1.0
|
||||
:type power: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
warmup_steps (int, optional): Number of warmup steps, defaults to 0.
|
||||
end_lr (float, optional): Minimum learning rate, defaults to 0.0001.
|
||||
power (float, optional): The power of polynomial, defaults to 1.0.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
|
||||
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, end_lr: float = 0.0001, power: float = 1.0,
|
||||
|
@@ -11,16 +11,13 @@ class LambdaLR(_LambdaLR):
|
||||
"""Sets the learning rate of each parameter group to the initial lr
|
||||
times a given function. When last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param lr_lambda: A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such
|
||||
functions, one for each group in optimizer.param_groups, defaults to None
|
||||
:type lr_lambda: function or list, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
lr_lambda (Union[``function``, ``list[function]``]): A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such functions,
|
||||
one for each group in optimizer.param_groups, defaults to None.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, lr_lambda=None, last_epoch: int = -1) -> None:
|
||||
@@ -30,18 +27,15 @@ class LambdaLR(_LambdaLR):
|
||||
@LR_SCHEDULERS.register_module
|
||||
class MultiplicativeLR(_MultiplicativeLR):
|
||||
"""Multiply the learning rate of each parameter group by the factor given
|
||||
in the specified function. When last_epoch=-1, sets initial lr as lr
|
||||
in the specified function. When last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param lr_lambda: A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such
|
||||
functions, one for each group in optimizer.param_groups, defaults to None
|
||||
:type lr_lambda: function or list, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
lr_lambda (Union[``function``, ``list[function]``]): A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such functions,
|
||||
one for each group in optimizer.param_groups, defaults to None.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, lr_lambda=None, last_epoch: int = -1) -> None:
|
||||
@@ -53,18 +47,14 @@ class StepLR(_StepLR):
|
||||
"""Decays the learning rate of each parameter group by gamma every
|
||||
step_size epochs. Notice that such decay can happen simultaneously with
|
||||
other changes to the learning rate from outside this scheduler. When
|
||||
last_epoch=-1, sets initial lr as lr
|
||||
last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param step_size: Period of learning rate decay, defaults to 1
|
||||
:type step_size: int, optional
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 0.1
|
||||
:type gamma: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
step_size (int, optional): Period of learning rate decay, defaults to 1.
|
||||
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 0.1.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, step_size: int = 1, gamma: float = 0.1, last_epoch: int = -1) -> None:
|
||||
@@ -77,14 +67,11 @@ class ExponentialLR(_ExponentialLR):
|
||||
"""Decays the learning rate of each parameter group by gamma every epoch.
|
||||
When last_epoch=-1, sets initial lr as lr
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 1.0
|
||||
:type gamma: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
Args:
|
||||
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Wrapped optimizer.
|
||||
total_steps (int): Number of total training steps.
|
||||
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 1.0.
|
||||
last_epoch (int, optional): The index of last epoch, defaults to -1.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_steps, gamma: float = 1.0,
|
||||
|
Reference in New Issue
Block a user