mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-22 18:09:06 +00:00
Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b7699
.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
This commit is contained in:
@@ -2,10 +2,12 @@ from ._base_hook import BaseHook
|
||||
from ._checkpoint_hook import SaveCheckpointHook, LoadCheckpointHook
|
||||
from ._metric_hook import LossHook, Accuracy2DHook, AccuracyHook, MetricHook
|
||||
from ._log_hook import LogMetricByEpochHook, TensorboardHook, LogTimingByEpochHook, LogMemoryByEpochHook
|
||||
from ._lr_scheduler_hook import LRSchedulerHook
|
||||
|
||||
__all__ = [
|
||||
'BaseHook', 'MetricHook',
|
||||
'LoadCheckpointHook', 'SaveCheckpointHook',
|
||||
'LossHook', 'AccuracyHook', 'Accuracy2DHook',
|
||||
'LogMetricByEpochHook', 'TensorboardHook', 'LogTimingByEpochHook', 'LogMemoryByEpochHook',
|
||||
'LRSchedulerHook'
|
||||
]
|
||||
|
Reference in New Issue
Block a user