mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-06 03:20:52 +00:00
Fixed docstring in colossalai (#171)
This commit is contained in:
@@ -10,16 +10,15 @@ from colossalai.registry import LR_SCHEDULERS
|
||||
class LambdaLR(_LambdaLR):
|
||||
"""Sets the learning rate of each parameter group to the initial lr
|
||||
times a given function. When last_epoch=-1, sets initial lr as lr.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: number of total training steps
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param lr_lambda: A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such
|
||||
functions, one for each group in optimizer.param_groups, defaults to None
|
||||
:type lr_lambda: function or list, optional
|
||||
:param num_steps_per_epoch: number of steps per epoch, defaults to -1
|
||||
:type num_steps_per_epoch: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
"""
|
||||
@@ -32,16 +31,15 @@ class LambdaLR(_LambdaLR):
|
||||
class MultiplicativeLR(_MultiplicativeLR):
|
||||
"""Multiply the learning rate of each parameter group by the factor given
|
||||
in the specified function. When last_epoch=-1, sets initial lr as lr
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: number of total training steps
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param lr_lambda: A function which computes a multiplicative
|
||||
factor given an integer parameter epoch, or a list of such
|
||||
functions, one for each group in optimizer.param_groups, defaults to None
|
||||
:type lr_lambda: function or list, optional
|
||||
:param num_steps_per_epoch: number of steps per epoch, defaults to -1
|
||||
:type num_steps_per_epoch: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
"""
|
||||
@@ -56,16 +54,15 @@ class StepLR(_StepLR):
|
||||
step_size epochs. Notice that such decay can happen simultaneously with
|
||||
other changes to the learning rate from outside this scheduler. When
|
||||
last_epoch=-1, sets initial lr as lr
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: number of total training steps
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param step_size: Period of learning rate decay, defaults to 1
|
||||
:type step_size: int, optional
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 0.1
|
||||
:type gamma: float, optional
|
||||
:param num_steps_per_epoch: number of steps per epoch, defaults to -1
|
||||
:type num_steps_per_epoch: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
"""
|
||||
@@ -79,14 +76,13 @@ class StepLR(_StepLR):
|
||||
class ExponentialLR(_ExponentialLR):
|
||||
"""Decays the learning rate of each parameter group by gamma every epoch.
|
||||
When last_epoch=-1, sets initial lr as lr
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: number of total training steps
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param gamma: Multiplicative factor of learning rate decay, defaults to 1.0
|
||||
:type gamma: float, optional
|
||||
:param num_steps_per_epoch: number of steps per epoch, defaults to -1
|
||||
:type num_steps_per_epoch: int, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
"""
|
||||
|
Reference in New Issue
Block a user