mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 12:01:39 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -51,20 +51,20 @@ class LogMetricByStepHook(BaseHook):
|
||||
super().__init__(priority)
|
||||
|
||||
def after_train_iter(self, trainer, *args):
|
||||
trainer.states['step_metrics'] = dict()
|
||||
for metric_name, metric_calculator in trainer.states['metrics']['train'].items():
|
||||
trainer.states["step_metrics"] = dict()
|
||||
for metric_name, metric_calculator in trainer.states["metrics"]["train"].items():
|
||||
if isinstance(metric_calculator, ThroughputMetric):
|
||||
trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_info()
|
||||
trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_info()
|
||||
else:
|
||||
trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_value()
|
||||
trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_value()
|
||||
|
||||
def after_test_iter(self, trainer, *args):
|
||||
trainer.states['step_metrics'] = dict()
|
||||
for metric_name, metric_calculator in trainer.states['metrics']['test'].items():
|
||||
trainer.states["step_metrics"] = dict()
|
||||
for metric_name, metric_calculator in trainer.states["metrics"]["test"].items():
|
||||
if isinstance(metric_calculator, ThroughputMetric):
|
||||
trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_info()
|
||||
trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_info()
|
||||
else:
|
||||
trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_value()
|
||||
trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_value()
|
||||
|
||||
|
||||
@HOOKS.register_module
|
||||
@@ -85,24 +85,24 @@ class LogMetricByEpochHook(LogByEpochHook):
|
||||
|
||||
def _get_str(self, trainer, mode):
|
||||
msg = []
|
||||
for metric_name, metric_calculator in trainer.states['metrics'][mode].items():
|
||||
msg.append(f'{metric_name} = {_format_number(metric_calculator.get_accumulated_value())}')
|
||||
msg = ' | '.join(msg)
|
||||
for metric_name, metric_calculator in trainer.states["metrics"][mode].items():
|
||||
msg.append(f"{metric_name} = {_format_number(metric_calculator.get_accumulated_value())}")
|
||||
msg = " | ".join(msg)
|
||||
return msg
|
||||
|
||||
def after_train_epoch(self, trainer):
|
||||
if self._is_epoch_to_log(trainer):
|
||||
msg = self._get_str(trainer=trainer, mode='train')
|
||||
msg = self._get_str(trainer=trainer, mode="train")
|
||||
|
||||
if self._is_rank_to_log:
|
||||
self.logger.info(f'[Epoch {trainer.cur_epoch} / Train]: {msg}')
|
||||
self.logger.info(f"[Epoch {trainer.cur_epoch} / Train]: {msg}")
|
||||
# f'Training - Epoch {trainer.cur_epoch} - {self.__class__.__name__}: {msg}')
|
||||
|
||||
def after_test_epoch(self, trainer):
|
||||
if self._is_epoch_to_log(trainer):
|
||||
msg = self._get_str(trainer=trainer, mode='test')
|
||||
msg = self._get_str(trainer=trainer, mode="test")
|
||||
if self._is_rank_to_log:
|
||||
self.logger.info(f'[Epoch {trainer.cur_epoch} / Test]: {msg}')
|
||||
self.logger.info(f"[Epoch {trainer.cur_epoch} / Test]: {msg}")
|
||||
# f'Testing - Epoch {trainer.cur_epoch} - {self.__class__.__name__}: {msg}')
|
||||
|
||||
|
||||
@@ -145,8 +145,11 @@ class TensorboardHook(BaseHook):
|
||||
self._is_valid_rank_to_log = True
|
||||
|
||||
# check for
|
||||
if gpc.is_initialized(ParallelMode.PIPELINE) and \
|
||||
not gpc.is_last_rank(ParallelMode.PIPELINE) and self._is_valid_rank_to_log:
|
||||
if (
|
||||
gpc.is_initialized(ParallelMode.PIPELINE)
|
||||
and not gpc.is_last_rank(ParallelMode.PIPELINE)
|
||||
and self._is_valid_rank_to_log
|
||||
):
|
||||
raise ValueError("Tensorboard hook can only log on the last rank of pipeline process group")
|
||||
|
||||
if self._is_valid_rank_to_log:
|
||||
@@ -157,38 +160,38 @@ class TensorboardHook(BaseHook):
|
||||
rank = 0
|
||||
|
||||
# create workspace
|
||||
log_dir = osp.join(log_dir, f'{parallel_mode}_rank_{rank}')
|
||||
log_dir = osp.join(log_dir, f"{parallel_mode}_rank_{rank}")
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
self.writer = SummaryWriter(log_dir=log_dir, filename_suffix=f'_rank_{rank}')
|
||||
self.writer = SummaryWriter(log_dir=log_dir, filename_suffix=f"_rank_{rank}")
|
||||
|
||||
def _log_by_iter(self, trainer, mode: str):
|
||||
for metric_name, metric_calculator in trainer.states['metrics'][mode].items():
|
||||
for metric_name, metric_calculator in trainer.states["metrics"][mode].items():
|
||||
if metric_calculator.epoch_only:
|
||||
continue
|
||||
val = metric_calculator.get_last_step_value()
|
||||
|
||||
if self._is_valid_rank_to_log:
|
||||
self.writer.add_scalar(f'{metric_name}/{mode}', val, trainer.cur_step)
|
||||
self.writer.add_scalar(f"{metric_name}/{mode}", val, trainer.cur_step)
|
||||
|
||||
def _log_by_epoch(self, trainer, mode: str):
|
||||
for metric_name, metric_calculator in trainer.states['metrics'][mode].items():
|
||||
for metric_name, metric_calculator in trainer.states["metrics"][mode].items():
|
||||
if metric_calculator.epoch_only:
|
||||
val = metric_calculator.get_accumulated_value()
|
||||
if self._is_valid_rank_to_log:
|
||||
self.writer.add_scalar(f'{metric_name}/{mode}', val, trainer.cur_step)
|
||||
self.writer.add_scalar(f"{metric_name}/{mode}", val, trainer.cur_step)
|
||||
|
||||
def after_test_iter(self, trainer, *args):
|
||||
self._log_by_iter(trainer, mode='test')
|
||||
self._log_by_iter(trainer, mode="test")
|
||||
|
||||
def after_test_epoch(self, trainer):
|
||||
self._log_by_epoch(trainer, mode='test')
|
||||
self._log_by_epoch(trainer, mode="test")
|
||||
|
||||
def after_train_iter(self, trainer, *args):
|
||||
self._log_by_iter(trainer, mode='train')
|
||||
self._log_by_iter(trainer, mode="train")
|
||||
|
||||
def after_train_epoch(self, trainer):
|
||||
self._log_by_epoch(trainer, mode='train')
|
||||
self._log_by_epoch(trainer, mode="train")
|
||||
|
||||
|
||||
@HOOKS.register_module
|
||||
@@ -206,13 +209,15 @@ class LogTimingByEpochHook(LogByEpochHook):
|
||||
ignore_num_train_steps (int, optional): Number of training steps to ignore, defaults to 0.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
timer: MultiTimer,
|
||||
logger: DistributedLogger,
|
||||
interval: int = 1,
|
||||
priority: int = 10,
|
||||
log_eval: bool = True,
|
||||
ignore_num_train_steps: int = 0) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
timer: MultiTimer,
|
||||
logger: DistributedLogger,
|
||||
interval: int = 1,
|
||||
priority: int = 10,
|
||||
log_eval: bool = True,
|
||||
ignore_num_train_steps: int = 0,
|
||||
) -> None:
|
||||
super().__init__(logger=logger, interval=interval, priority=priority)
|
||||
self._timer = timer
|
||||
self._log_eval = log_eval
|
||||
@@ -229,33 +234,31 @@ class LogTimingByEpochHook(LogByEpochHook):
|
||||
if timer_name.startswith(mode):
|
||||
last_elapsed_time = timer.get_elapsed_time()
|
||||
if timer.has_history:
|
||||
if timer_name == 'Train-step' and not self._is_train_step_history_trimmed:
|
||||
timer._history = timer._history[self._ignore_num_train_steps:]
|
||||
if timer_name == "Train-step" and not self._is_train_step_history_trimmed:
|
||||
timer._history = timer._history[self._ignore_num_train_steps :]
|
||||
self._is_train_step_history_trimmed = True
|
||||
history_mean = timer.get_history_mean()
|
||||
history_sum = timer.get_history_sum()
|
||||
timer.get_history_sum()
|
||||
msg.append(
|
||||
f'{timer_name}: last = {_format_number(last_elapsed_time)} s, mean = {_format_number(history_mean)} s'
|
||||
f"{timer_name}: last = {_format_number(last_elapsed_time)} s, mean = {_format_number(history_mean)} s"
|
||||
)
|
||||
else:
|
||||
msg.append(f'{timer_name}: last = {_format_number(last_elapsed_time)} s')
|
||||
msg.append(f"{timer_name}: last = {_format_number(last_elapsed_time)} s")
|
||||
|
||||
msg = ' | '.join(msg)
|
||||
msg = " | ".join(msg)
|
||||
return msg
|
||||
|
||||
def after_train_epoch(self, trainer):
|
||||
"""Writes log after finishing a training epoch.
|
||||
"""
|
||||
"""Writes log after finishing a training epoch."""
|
||||
if self._is_epoch_to_log(trainer) and self._is_rank_to_log:
|
||||
msg = self._get_message('Train')
|
||||
self.logger.info(f'[Epoch {trainer.cur_epoch} / Train]: {msg} | #steps/epoch = {trainer.steps_per_epoch}')
|
||||
msg = self._get_message("Train")
|
||||
self.logger.info(f"[Epoch {trainer.cur_epoch} / Train]: {msg} | #steps/epoch = {trainer.steps_per_epoch}")
|
||||
|
||||
def after_test_epoch(self, trainer):
|
||||
"""Writes log after finishing a testing epoch.
|
||||
"""
|
||||
"""Writes log after finishing a testing epoch."""
|
||||
if self._is_epoch_to_log(trainer) and self._is_rank_to_log and self._log_eval:
|
||||
msg = self._get_message('Test')
|
||||
self.logger.info(f'[Epoch {trainer.cur_epoch} / Test]: {msg}')
|
||||
msg = self._get_message("Test")
|
||||
self.logger.info(f"[Epoch {trainer.cur_epoch} / Test]: {msg}")
|
||||
|
||||
|
||||
@HOOKS.register_module
|
||||
@@ -272,31 +275,28 @@ class LogMemoryByEpochHook(LogByEpochHook):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: DistributedLogger,
|
||||
interval: int = 1,
|
||||
priority: int = 10,
|
||||
log_eval: bool = True,
|
||||
report_cpu: bool = False, # no reference
|
||||
self,
|
||||
logger: DistributedLogger,
|
||||
interval: int = 1,
|
||||
priority: int = 10,
|
||||
log_eval: bool = True,
|
||||
report_cpu: bool = False, # no reference
|
||||
) -> None:
|
||||
super().__init__(logger=logger, interval=interval, priority=priority)
|
||||
self._log_eval = log_eval
|
||||
self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0()
|
||||
|
||||
def before_train(self, trainer):
|
||||
"""Resets before training.
|
||||
"""
|
||||
"""Resets before training."""
|
||||
if self._is_epoch_to_log(trainer) and self._is_rank_to_log:
|
||||
report_memory_usage('Before-train', self.logger)
|
||||
report_memory_usage("Before-train", self.logger)
|
||||
|
||||
def after_train_epoch(self, trainer):
|
||||
"""Writes log after finishing a training epoch.
|
||||
"""
|
||||
"""Writes log after finishing a training epoch."""
|
||||
if self._is_epoch_to_log(trainer) and self._is_rank_to_log:
|
||||
report_memory_usage(f'[Epoch {trainer.cur_epoch} / Train]', self.logger)
|
||||
report_memory_usage(f"[Epoch {trainer.cur_epoch} / Train]", self.logger)
|
||||
|
||||
def after_test(self, trainer):
|
||||
"""Reports after testing.
|
||||
"""
|
||||
"""Reports after testing."""
|
||||
if self._is_epoch_to_log(trainer) and self._is_rank_to_log and self._log_eval:
|
||||
report_memory_usage(f'[Epoch {trainer.cur_epoch} / Test]', self.logger)
|
||||
report_memory_usage(f"[Epoch {trainer.cur_epoch} / Test]", self.logger)
|
||||
|
Reference in New Issue
Block a user