mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-12-24 04:52:45 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -22,7 +22,7 @@ class OptimizerWrapper:
|
||||
params = []
|
||||
|
||||
for group in self.param_groups:
|
||||
params += group['params']
|
||||
params += group["params"]
|
||||
return params
|
||||
|
||||
@property
|
||||
@@ -82,12 +82,14 @@ class OptimizerWrapper:
|
||||
"""
|
||||
nn.utils.clip_grad_value_(self.parameters, clip_value, *args, **kwargs)
|
||||
|
||||
def clip_grad_by_norm(self,
|
||||
max_norm: Union[float, int],
|
||||
norm_type: Union[float, int] = 2.0,
|
||||
error_if_nonfinite: bool = False,
|
||||
*args,
|
||||
**kwargs) -> Tensor:
|
||||
def clip_grad_by_norm(
|
||||
self,
|
||||
max_norm: Union[float, int],
|
||||
norm_type: Union[float, int] = 2.0,
|
||||
error_if_nonfinite: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> Tensor:
|
||||
"""
|
||||
Clips gradient norm of an iterable of parameters.
|
||||
|
||||
@@ -113,7 +115,8 @@ class OptimizerWrapper:
|
||||
loss (Tensor): The loss to be scaled.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"The method scale_loss is only available for optimizers with mixed precision training")
|
||||
"The method scale_loss is only available for optimizers with mixed precision training"
|
||||
)
|
||||
|
||||
def unscale_grad(self):
|
||||
"""
|
||||
@@ -122,7 +125,8 @@ class OptimizerWrapper:
|
||||
Note: Only available for optimizers with mixed precision training.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"The method unscale_grad is only available for optimizers with mixed precision training")
|
||||
"The method unscale_grad is only available for optimizers with mixed precision training"
|
||||
)
|
||||
|
||||
def unwrap(self):
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user