mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-08-31 16:40:41 +00:00
Fixed docstring in colossalai (#171)
This commit is contained in:
@@ -7,13 +7,14 @@ from .delayed import WarmupScheduler
|
||||
@LR_SCHEDULERS.register_module
|
||||
class PolynomialLR(_LRScheduler):
|
||||
"""Polynomial learning rate scheduler.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: number of total training steps
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param end_lr: Minimum learning rate, defaults to 0.0001
|
||||
:type end_lr: float, optional
|
||||
:param power: the power of polynomial, defaults to 1.0
|
||||
:param power: The power of polynomial, defaults to 1.0
|
||||
:type power: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
@@ -42,15 +43,16 @@ class PolynomialLR(_LRScheduler):
|
||||
@LR_SCHEDULERS.register_module
|
||||
class PolynomialWarmupLR(WarmupScheduler):
|
||||
"""Polynomial learning rate scheduler with warmup.
|
||||
|
||||
:param optimizer: Wrapped optimizer
|
||||
:type optimizer: torch.optim.Optimizer
|
||||
:param total_steps: number of total training steps
|
||||
:param total_steps: Number of total training steps
|
||||
:type total_steps: int
|
||||
:param warmup_steps: number of warmup steps, defaults to 0
|
||||
:param warmup_steps: Number of warmup steps, defaults to 0
|
||||
:type warmup_steps: int, optional
|
||||
:param end_lr: Minimum learning rate, defaults to 0.0001
|
||||
:type end_lr: float, optional
|
||||
:param power: the power of polynomial, defaults to 1.0
|
||||
:param power: The power of polynomial, defaults to 1.0
|
||||
:type power: float, optional
|
||||
:param last_epoch: The index of last epoch, defaults to -1
|
||||
:type last_epoch: int, optional
|
||||
|
Reference in New Issue
Block a user