Support TP-compatible Torch AMP and Update trainer API (#27)

* Add gradient accumulation, fix lr scheduler

* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)

* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes

* fixed trainer

* Revert "fixed trainer"

This reverts commit 2e0b0b7699.

* improved consistency between trainer, engine and schedule (#23)

Co-authored-by: 1SAA <c2h214748@gmail.com>

Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
This commit is contained in:
Frank Lee
2021-11-18 19:45:06 +08:00
committed by GitHub
parent 2b05de4c64
commit 3defa32aee
80 changed files with 2194 additions and 1584 deletions

View File

@@ -2,10 +2,12 @@ from ._base_hook import BaseHook
from ._checkpoint_hook import SaveCheckpointHook, LoadCheckpointHook
from ._metric_hook import LossHook, Accuracy2DHook, AccuracyHook, MetricHook
from ._log_hook import LogMetricByEpochHook, TensorboardHook, LogTimingByEpochHook, LogMemoryByEpochHook
from ._lr_scheduler_hook import LRSchedulerHook
__all__ = [
'BaseHook', 'MetricHook',
'LoadCheckpointHook', 'SaveCheckpointHook',
'LossHook', 'AccuracyHook', 'Accuracy2DHook',
'LogMetricByEpochHook', 'TensorboardHook', 'LogTimingByEpochHook', 'LogMemoryByEpochHook',
'LRSchedulerHook'
]

View File

@@ -3,13 +3,13 @@
import os.path as osp
import torch.distributed as dist
from colossalai.checkpointing import get_latest_checkpoint_path, get_checkpoint_path
from colossalai.registry import HOOKS
from colossalai.trainer.hooks import BaseHook
from colossalai.trainer import Trainer
from colossalai.trainer.hooks import BaseHook
from colossalai.utils import is_dp_rank_0
from colossalai.utils.checkpointing import get_latest_checkpoint_path, get_checkpoint_path
from colossalai.utils.checkpointing import save_checkpoint, load_checkpoint
from ._lr_scheduler_hook import LRSchedulerHook
@HOOKS.register_module
@@ -33,7 +33,7 @@ class SaveCheckpointHook(BaseHook):
interval: int = 1,
checkpoint_dir: str = None,
suffix: str = '',
priority: int = 0):
priority: int = 10):
super().__init__(trainer=trainer, priority=priority)
assert isinstance(trainer, Trainer), \
f'SaveCheckpointHook expects a Trainer, got {type(trainer)}'
@@ -41,6 +41,16 @@ class SaveCheckpointHook(BaseHook):
self.checkpoint_dir = checkpoint_dir
self.suffix = suffix
# get lr scheduler from the LRSchedulerHook before train
self._lr_scheduler = None
def before_train(self):
# check if lr scheduler is present in LRSchedulerHook
for hook in self.trainer.hooks:
if isinstance(hook, LRSchedulerHook):
self._lr_scheduler = hook.lr_scheduler
break
def after_train_epoch(self):
"""Saves the model after a training epoch.
"""
@@ -48,14 +58,18 @@ class SaveCheckpointHook(BaseHook):
if self.trainer.cur_epoch % self.interval == 0:
# only gpus with data parallel rank equals to 0 write to the disk
if is_dp_rank_0():
self.trainer.save(path=self.checkpoint_dir, suffix=self.suffix)
save_path = get_checkpoint_path(self.checkpoint_dir,
self.trainer.cur_epoch,
suffix=self.suffix)
save_checkpoint(save_path,
self.trainer.cur_epoch,
self.trainer.engine.model,
self.trainer.engine.optimizer,
self._lr_scheduler)
self.logger.info(
f'checkpoint for epoch {self.trainer.cur_epoch} is saved to {self.checkpoint_dir}')
# wait until everyone is done
if dist.is_initialized():
dist.barrier()
@HOOKS.register_module
class LoadCheckpointHook(BaseHook):
@@ -81,30 +95,46 @@ class LoadCheckpointHook(BaseHook):
epoch: int = -1,
finetune: bool = False,
strict: bool = False,
priority: int = 10) -> None:
suffix: str = '',
priority: int = 0) -> None:
super().__init__(trainer=trainer, priority=priority)
assert isinstance(trainer, Trainer), \
f'LoadLatestCheckpointHook excepts a Trainer, got {type(trainer)}'
self.epoch = epoch
self.checkpoint_dir = checkpoint_dir
self.finetune = finetune
self.suffix = suffix
self.strict = strict
super().__init__(trainer=trainer, priority=priority)
def before_train(self):
"""Loads parameters to the model before training.
"""
# check if lr scheduler is present in LRSchedulerHook
lr_scheduler = None
for hook in self.trainer.hooks:
if isinstance(hook, LRSchedulerHook):
lr_scheduler = hook.lr_scheduler
break
# use latest checkpoint if epoch = -1
if self.epoch == -1:
path = get_latest_checkpoint_path(self.checkpoint_dir)
path = get_latest_checkpoint_path(self.checkpoint_dir, suffix=self.suffix)
else:
path = get_checkpoint_path(self.checkpoint_dir, epoch=self.epoch)
path = get_checkpoint_path(self.checkpoint_dir, epoch=self.epoch, suffix=self.suffix)
if osp.exists(path):
self.trainer.load(
path, finetune=self.finetune, strict=self.strict)
last_epoch, _ = load_checkpoint(path,
self.trainer.engine.model,
self.trainer.engine.optimizer,
lr_scheduler,
finetune=self.finetune,
strict=self.strict)
if self.finetune:
self.trainer.cur_epoch = 0
else:
self.trainer.cur_epoch = last_epoch
self.logger.info(
f'loaded checkpoint from {path}')
else:
raise FileNotFoundError(f'checkpoint is not found at {path}')
# Some utilities want to load a checkpoint without distributed being initialized
if dist.is_initialized():
dist.barrier()

View File

@@ -5,7 +5,7 @@ import os
import os.path as osp
import torch
from tensorboardX import SummaryWriter
from torch.utils.tensorboard import SummaryWriter
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
@@ -13,7 +13,7 @@ from colossalai.registry import HOOKS
from colossalai.trainer._trainer import Trainer
from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, \
is_tp_rank_0, is_no_pp_or_last_stage
from ._metric_hook import MetricHook
from ._base_hook import BaseHook
def _format_number(val):
@@ -24,7 +24,7 @@ def _format_number(val):
return val
class EpochIntervalHook(MetricHook):
class EpochIntervalHook(BaseHook):
def __init__(self, trainer: Trainer, interval: int = 1, priority: int = 1):
super().__init__(trainer, priority)
self._interval = interval
@@ -45,7 +45,7 @@ class LogMetricByEpochHook(EpochIntervalHook):
:type priority: int, optional
"""
def __init__(self, trainer: Trainer, interval: int = 1, priority: int = 1) -> None:
def __init__(self, trainer: Trainer, interval: int = 1, priority: int = 10) -> None:
super().__init__(trainer=trainer, interval=interval, priority=priority)
self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage()
@@ -74,7 +74,7 @@ class LogMetricByEpochHook(EpochIntervalHook):
@HOOKS.register_module
class TensorboardHook(MetricHook):
class TensorboardHook(BaseHook):
"""Specialized Hook to record the metric to Tensorboard.
:param trainer: Trainer attached with current hook
@@ -85,59 +85,71 @@ class TensorboardHook(MetricHook):
:type priority: int, optional
"""
def __init__(self, trainer: Trainer, log_dir: str, priority: int = 1) -> None:
def __init__(self,
trainer: Trainer,
log_dir: str,
dp_rank_0_only: bool = True,
tp_rank_0_only: bool = True,
priority: int = 10,
) -> None:
super().__init__(trainer=trainer, priority=priority)
self._is_rank_to_log = is_no_pp_or_last_stage()
if self._is_rank_to_log:
# create log dir
if not gpc.is_initialized(ParallelMode.GLOBAL) or gpc.get_global_rank() == 0:
os.makedirs(log_dir, exist_ok=True)
# determine the ranks to generate tensorboard logs
self._is_valid_rank_to_log = is_no_pp_or_last_stage()
if dp_rank_0_only:
self._is_valid_rank_to_log = self._is_valid_rank_to_log and is_dp_rank_0()
if tp_rank_0_only:
self._is_valid_rank_to_log = self._is_valid_rank_to_log and is_tp_rank_0()
if self._is_valid_rank_to_log:
# create workspace on only one rank
if gpc.is_initialized(ParallelMode.GLOBAL):
rank = gpc.get_global_rank()
else:
rank = 0
log_dir = osp.join(log_dir, f'rank_{rank}')
# create workspace
if not osp.exists(log_dir):
os.makedirs(log_dir)
log_dir = osp.join(log_dir, f'rank_{rank}')
os.makedirs(log_dir, exist_ok=True)
self.writer = SummaryWriter(
log_dir=log_dir, filename_suffix=f'_rank_{rank}')
def after_train_iter(self, *args):
for metric_name, metric_calculator in self.trainer.states['metrics']['train'].items():
def _log_by_iter(self, mode: str):
for metric_name, metric_calculator in self.trainer.states['metrics'][mode].items():
if metric_calculator.epoch_only:
continue
val = metric_calculator.get_last_step_value()
if self._is_rank_to_log:
self.writer.add_scalar(
f'{metric_name}/train', val, self.trainer.cur_step)
def after_test_iter(self, *args):
for metric_name, metric_calculator in self.trainer.states['metrics']['test'].items():
if metric_calculator.epoch_only:
continue
val = metric_calculator.get_last_step_value()
if self._is_rank_to_log:
self.writer.add_scalar(f'{metric_name}/test', val,
if self._is_valid_rank_to_log:
self.writer.add_scalar(f'{metric_name}/{mode}', val,
self.trainer.cur_step)
def after_test_epoch(self):
for metric_name, metric_calculator in self.trainer.states['metrics']['test'].items():
def _log_by_epoch(self, mode: str):
for metric_name, metric_calculator in self.trainer.states['metrics'][mode].items():
if metric_calculator.epoch_only:
val = metric_calculator.get_accumulated_value()
if self._is_rank_to_log:
self.writer.add_scalar(f'{metric_name}/test', val,
if self._is_valid_rank_to_log:
self.writer.add_scalar(f'{metric_name}/{mode}', val,
self.trainer.cur_step)
def after_test_iter(self, *args):
self._log_by_iter(mode='test')
def after_test_epoch(self):
self._log_by_epoch(mode='test')
def after_train_iter(self, *args):
self._log_by_iter(mode='train')
def after_train_epoch(self):
for metric_name, metric_calculator in self.trainer.states['metrics']['train'].items():
if metric_calculator.epoch_only:
val = metric_calculator.get_accumulated_value()
if self._is_rank_to_log:
self.writer.add_scalar(f'{metric_name}/train', val,
self.trainer.cur_step)
self._log_by_epoch(mode='train')
@HOOKS.register_module
@@ -157,7 +169,7 @@ class LogTimingByEpochHook(EpochIntervalHook):
def __init__(self,
trainer: Trainer,
interval: int = 1,
priority: int = 1,
priority: int = 10,
log_eval: bool = True
) -> None:
super().__init__(trainer=trainer, interval=interval, priority=priority)
@@ -217,7 +229,7 @@ class LogMemoryByEpochHook(EpochIntervalHook):
def __init__(self,
trainer: Trainer,
interval: int = 1,
priority: int = 1,
priority: int = 10,
log_eval: bool = True
) -> None:
super().__init__(trainer=trainer, interval=interval, priority=priority)

View File

@@ -0,0 +1,58 @@
from torch import Tensor
from colossalai.builder import build_lr_scheduler
from colossalai.registry import HOOKS
from ._metric_hook import MetricHook
from .._trainer import Trainer
from ..metric import LearningRate
@HOOKS.register_module
class LRSchedulerHook(MetricHook):
"""Build LR scheduler
:param trainer: Trainer attached with current hook
:type trainer: Trainer
:param lr_scheduler_cfg: The config of LR scheduler
:type lr_scheduler_cfg: dict
:param by_epoch: If `True`, the LR will be scheduled every epoch. Else, the LR will be scheduled every batch. Defaults to `True`.
:type by_epoch: bool
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type priority: int, optional
"""
def __init__(self,
trainer: Trainer,
lr_scheduler_cfg: dict,
by_epoch: bool = True,
store_lr_in_state: bool = True,
priority: int = 1,
):
super().__init__(trainer=trainer, priority=priority)
self.by_epoch = by_epoch
if by_epoch:
total_steps = trainer.max_epochs
else:
total_steps = trainer.max_epochs * trainer.steps_per_epoch
if trainer.max_steps is not None:
total_steps = min(total_steps, trainer.max_steps)
lr_scheduler_cfg['total_steps'] = total_steps
self.lr_scheduler = build_lr_scheduler(
lr_scheduler_cfg, trainer.engine.optimizer)
if store_lr_in_state:
self.trainer.states['metrics']['train']['lr'] = LearningRate(epoch_only=by_epoch,
initial_lr=self.lr_scheduler.get_lr()[0])
def after_train_epoch(self):
if self.by_epoch:
self.lr_scheduler.step()
self.trainer.states['metrics']['train']['lr'].update(self.lr_scheduler.get_lr()[0])
def after_train_iter(self, output: Tensor, label: Tensor, loss: Tensor):
if not self.by_epoch:
self.lr_scheduler.step()
self.trainer.states['metrics']['train']['lr'].update(self.lr_scheduler.get_lr()[0])

View File

@@ -21,9 +21,12 @@ class MetricHook(BaseHook):
:type priority: int
"""
def __init__(self, trainer: Trainer, priority: int):
def __init__(self,
trainer: Trainer,
priority: int,
):
super().__init__(trainer, priority)
self._is_stage_to_log = is_no_pp_or_last_stage()
self._is_stage_to_compute = is_no_pp_or_last_stage()
self._check_metric_states_initialization()
def _check_metric_states_initialization(self):
@@ -41,33 +44,34 @@ class LossHook(MetricHook):
:type priority: int, optional
"""
def __init__(self, trainer: Trainer, priority: int = 10):
def __init__(self, trainer: Trainer, priority: int = 0):
super().__init__(trainer, priority)
if self._is_stage_to_log:
self.metric = Loss(epoch_only=False)
if self._is_stage_to_compute:
self.train_loss = Loss(epoch_only=False)
self.test_loss = Loss(epoch_only=True)
# register the metric calculator
self.trainer.states['metrics']['train'][
self.metric.__class__.__name__] = self.metric
self.train_loss.__class__.__name__] = self.train_loss
self.trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
self.test_loss.__class__.__name__] = self.test_loss
def before_train_epoch(self):
if self._is_stage_to_log:
self.metric.reset()
if self._is_stage_to_compute:
self.train_loss.reset()
def after_train_iter(self, logits, label, loss):
if self._is_stage_to_log:
self.metric.update(loss)
if self._is_stage_to_compute:
self.train_loss.update(loss)
def before_test_epoch(self):
if self._is_stage_to_log:
self.metric.reset()
if self._is_stage_to_compute:
self.test_loss.reset()
def after_test_iter(self, logits, label, loss):
if self._is_stage_to_log:
self.metric.update(loss)
if self._is_stage_to_compute:
self.test_loss.update(loss)
@HOOKS.register_module
@@ -81,10 +85,10 @@ class Accuracy2DHook(MetricHook):
:type priority: int, optional
"""
def __init__(self, trainer: Trainer, priority: int = 10):
def __init__(self, trainer: Trainer, priority: int = 0):
super().__init__(trainer, priority)
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric = Accuracy2D(epoch_only=True)
# register the metric
@@ -92,20 +96,20 @@ class Accuracy2DHook(MetricHook):
self.metric.__class__.__name__] = self.metric
def before_test(self):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, logits, label, *args):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class Accuracy2p5DHook(MetricHook):
def __init__(self, trainer: Trainer, priority: int = 10):
def __init__(self, trainer: Trainer, priority: int = 0):
super().__init__(trainer, priority)
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric = Accuracy2p5D(epoch_only=True)
# register the metric
@@ -113,11 +117,11 @@ class Accuracy2p5DHook(MetricHook):
self.metric.__class__.__name__] = self.metric
def before_test(self):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, logits, label, *args):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.update(logits, label)
@@ -138,7 +142,7 @@ class Accuracy3DHook(MetricHook):
priority: int = 10):
super().__init__(trainer, priority)
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric = Accuracy3D(epoch_only=True,
input_parallel_mode=input_parallel_mode,
weight_parallel_mode=weight_parallel_mode)
@@ -148,11 +152,11 @@ class Accuracy3DHook(MetricHook):
self.metric.__class__.__name__] = self.metric
def before_test(self):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, logits, label, *args):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.update(logits, label)
@@ -166,10 +170,10 @@ class AccuracyHook(MetricHook):
:type priority: int
"""
def __init__(self, trainer: Trainer, priority: int = 10):
def __init__(self, trainer: Trainer, priority: int = 0):
super().__init__(trainer, priority)
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric = Accuracy(epoch_only=True)
# register the metric
@@ -177,9 +181,9 @@ class AccuracyHook(MetricHook):
self.metric.__class__.__name__] = self.metric
def before_test(self):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, logits, label, *args):
if self._is_stage_to_log:
if self._is_stage_to_compute:
self.metric.update(logits, label)