mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-09 04:50:17 +00:00
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699
. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> * Split conv2d, class token, positional embedding in 2d, Fix random number in ddp Fix convergence in cifar10, Imagenet1000 * Integrate 1d tensor parallel in Colossal-AI (#39) * fixed 1D and 2D convergence (#38) * optimized 2D operations * fixed 1D ViT convergence problem * Feature/ddp (#49) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699
. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * support torch ddp * fix loss accumulation * add log for ddp * change seed * modify timing hook Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * Feature/pipeline (#40) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699
. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * optimize communication of pipeline parallel * fix grad clip for pipeline Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51) * Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset * update api for better usability (#58) update api for better usability Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
This commit is contained in:
@@ -3,7 +3,6 @@ from torch import Tensor
|
||||
from colossalai.builder import build_lr_scheduler
|
||||
from colossalai.registry import HOOKS
|
||||
from ._metric_hook import MetricHook
|
||||
from .._trainer import Trainer
|
||||
from ..metric import LearningRate
|
||||
|
||||
|
||||
@@ -22,37 +21,26 @@ class LRSchedulerHook(MetricHook):
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
trainer: Trainer,
|
||||
lr_scheduler_cfg: dict,
|
||||
by_epoch: bool = True,
|
||||
lr_scheduler,
|
||||
by_epoch: bool,
|
||||
store_lr_in_state: bool = True,
|
||||
priority: int = 1,
|
||||
):
|
||||
super().__init__(trainer=trainer, priority=priority)
|
||||
super().__init__(priority=priority)
|
||||
self.by_epoch = by_epoch
|
||||
self.lr_scheduler = lr_scheduler
|
||||
self.store_lr_in_state = store_lr_in_state
|
||||
|
||||
if by_epoch:
|
||||
total_steps = trainer.max_epochs
|
||||
else:
|
||||
total_steps = trainer.max_epochs * trainer.steps_per_epoch
|
||||
if trainer.max_steps is not None:
|
||||
total_steps = min(total_steps, trainer.max_steps)
|
||||
def after_hook_is_attached(self, trainer):
|
||||
trainer.states['metrics']['train']['lr'] = LearningRate(epoch_only=self.by_epoch,
|
||||
initial_lr=self.lr_scheduler.get_last_lr()[0])
|
||||
|
||||
lr_scheduler_cfg['total_steps'] = total_steps
|
||||
|
||||
self.lr_scheduler = build_lr_scheduler(
|
||||
lr_scheduler_cfg, trainer.engine.optimizer)
|
||||
|
||||
if store_lr_in_state:
|
||||
self.trainer.states['metrics']['train']['lr'] = LearningRate(epoch_only=by_epoch,
|
||||
initial_lr=self.lr_scheduler.get_lr()[0])
|
||||
|
||||
def after_train_epoch(self):
|
||||
def after_train_epoch(self, trainer):
|
||||
if self.by_epoch:
|
||||
self.lr_scheduler.step()
|
||||
self.trainer.states['metrics']['train']['lr'].update(self.lr_scheduler.get_lr()[0])
|
||||
trainer.states['metrics']['train']['lr'].update(self.lr_scheduler.get_last_lr()[0])
|
||||
|
||||
def after_train_iter(self, output: Tensor, label: Tensor, loss: Tensor):
|
||||
def after_train_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor):
|
||||
if not self.by_epoch:
|
||||
self.lr_scheduler.step()
|
||||
self.trainer.states['metrics']['train']['lr'].update(self.lr_scheduler.get_lr()[0])
|
||||
trainer.states['metrics']['train']['lr'].update(self.lr_scheduler.get_last_lr()[0])
|
||||
|
Reference in New Issue
Block a user