mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-06 11:32:10 +00:00
Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b7699
.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
This commit is contained in:
@@ -28,18 +28,3 @@ class LinearWarmupLR(_LRScheduler):
|
||||
else:
|
||||
return [(self.total_steps - self.last_epoch) / (self.total_steps - self.warmup_steps) * lr for lr in
|
||||
self.base_lrs]
|
||||
|
||||
|
||||
@LR_SCHEDULERS.register_module
|
||||
class LinearWarmupDecay(_LRScheduler):
|
||||
def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, last_epoch: int = -1, **kwargs):
|
||||
self.warmup_steps = int(warmup_steps)
|
||||
self.total_steps = total_steps
|
||||
super().__init__(optimizer, last_epoch=last_epoch)
|
||||
|
||||
def get_lr(self):
|
||||
if self.last_epoch < self.warmup_steps:
|
||||
return [(self.last_epoch + 1) / self.warmup_steps * lr for lr in self.base_lrs]
|
||||
else:
|
||||
return [(self.total_steps - self.last_epoch - 1) / (self.total_steps - self.warmup_steps) * lr for lr in
|
||||
self.base_lrs]
|
||||
|
Reference in New Issue
Block a user