[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -1,4 +1,4 @@
from .model import AMPModelMixin, ModelWrapper
from .optimizer import OptimizerWrapper
__all__ = ['OptimizerWrapper', 'ModelWrapper', 'AMPModelMixin']
__all__ = ["OptimizerWrapper", "ModelWrapper", "AMPModelMixin"]

View File

@@ -26,11 +26,9 @@ class ModelWrapper(nn.Module):
class AMPModelMixin:
"""This mixin class defines the interface for AMP training.
"""
"""This mixin class defines the interface for AMP training."""
def update_master_params(self):
"""
Update the master parameters for AMP training.
"""
pass

View File

@@ -22,7 +22,7 @@ class OptimizerWrapper:
params = []
for group in self.param_groups:
params += group['params']
params += group["params"]
return params
@property
@@ -82,12 +82,14 @@ class OptimizerWrapper:
"""
nn.utils.clip_grad_value_(self.parameters, clip_value, *args, **kwargs)
def clip_grad_by_norm(self,
max_norm: Union[float, int],
norm_type: Union[float, int] = 2.0,
error_if_nonfinite: bool = False,
*args,
**kwargs) -> Tensor:
def clip_grad_by_norm(
self,
max_norm: Union[float, int],
norm_type: Union[float, int] = 2.0,
error_if_nonfinite: bool = False,
*args,
**kwargs,
) -> Tensor:
"""
Clips gradient norm of an iterable of parameters.
@@ -113,7 +115,8 @@ class OptimizerWrapper:
loss (Tensor): The loss to be scaled.
"""
raise NotImplementedError(
"The method scale_loss is only available for optimizers with mixed precision training")
"The method scale_loss is only available for optimizers with mixed precision training"
)
def unscale_grad(self):
"""
@@ -122,7 +125,8 @@ class OptimizerWrapper:
Note: Only available for optimizers with mixed precision training.
"""
raise NotImplementedError(
"The method unscale_grad is only available for optimizers with mixed precision training")
"The method unscale_grad is only available for optimizers with mixed precision training"
)
def unwrap(self):
"""