[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -1,5 +1,7 @@
from typing import Tuple
import torch
from ..registry import meta_profiler_function
# TODO: different activation has different FLOPs count, currently unused.

View File

@@ -41,15 +41,15 @@ def _elementwise_flops_compute(input, other):
@meta_profiler_function.register(torch.sub)
@meta_profiler_function.register(torch.mul)
@meta_profiler_function.register(torch.floor_divide)
@meta_profiler_function.register('add') # for built-in op +
@meta_profiler_function.register('iadd') # for built-in op +=
@meta_profiler_function.register('eq') # for built-in op =
@meta_profiler_function.register('sub') # for built-in op -
@meta_profiler_function.register('isub') # for built-in op -=
@meta_profiler_function.register('mul') # for built-in op *
@meta_profiler_function.register('imul') # for built-in op *=
@meta_profiler_function.register('floordiv') # for built-in op //
@meta_profiler_function.register('ifloordiv') # for built-in op //=
@meta_profiler_function.register("add") # for built-in op +
@meta_profiler_function.register("iadd") # for built-in op +=
@meta_profiler_function.register("eq") # for built-in op =
@meta_profiler_function.register("sub") # for built-in op -
@meta_profiler_function.register("isub") # for built-in op -=
@meta_profiler_function.register("mul") # for built-in op *
@meta_profiler_function.register("imul") # for built-in op *=
@meta_profiler_function.register("floordiv") # for built-in op //
@meta_profiler_function.register("ifloordiv") # for built-in op //=
def torch_add_like_ops(input: Any, other: Any, *, out: Optional[torch.Tensor] = None) -> Tuple[int, int]:
return _elementwise_flops_compute(input, other)
@@ -62,7 +62,7 @@ def torch_elementwise_op(input: torch.Tensor, *, out: Optional[torch.Tensor] = N
@meta_profiler_function.register(torch.matmul)
@meta_profiler_function.register('matmul') # for built-in op @
@meta_profiler_function.register("matmul") # for built-in op @
@meta_profiler_function.register(torch.Tensor.matmul)
def torch_matmul(input: torch.Tensor, other: torch.Tensor, *, out: Optional[torch.Tensor] = None) -> Tuple[int, int]:
macs = reduce(operator.mul, input.shape) * other.shape[-1]
@@ -78,13 +78,15 @@ def torch_bmm(input: torch.Tensor, other: torch.Tensor, *, out: Optional[torch.T
@meta_profiler_function.register(torch.var_mean)
def torch_var_mean(input: torch.Tensor,
dim: Union[int, Tuple[int, ...]],
unbiased: Optional[bool] = True,
keepdim: Optional[bool] = False,
*,
out: Optional[torch.Tensor] = None) -> Tuple[int, int]:
assert out is None, 'saving to out is not supported yet'
def torch_var_mean(
input: torch.Tensor,
dim: Union[int, Tuple[int, ...]],
unbiased: Optional[bool] = True,
keepdim: Optional[bool] = False,
*,
out: Optional[torch.Tensor] = None,
) -> Tuple[int, int]:
assert out is None, "saving to out is not supported yet"
flops = input.numel() * 3
macs = 0
return flops, macs

View File

@@ -1,5 +1,7 @@
import torch
from typing import Optional
import torch
from ..registry import meta_profiler_function

View File

@@ -1,5 +1,7 @@
from typing import Tuple
import torch
from ..registry import meta_profiler_function

View File

@@ -1,5 +1,7 @@
from typing import List, Optional, Tuple
import torch
from ..registry import meta_profiler_function
@@ -21,11 +23,13 @@ def torch_nn_func_instancenorm(
@meta_profiler_function.register(torch.nn.functional.group_norm)
def torch_nn_func_groupnorm(input: torch.Tensor,
num_groups: int,
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
eps: float = 1e-5) -> Tuple[int, int]:
def torch_nn_func_groupnorm(
input: torch.Tensor,
num_groups: int,
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
eps: float = 1e-5,
) -> Tuple[int, int]:
has_affine = weight is not None
flops = input.numel() * (5 if has_affine else 4)
macs = 0

View File

@@ -1,5 +1,7 @@
from typing import Tuple, Union
from typing import Tuple
import torch
from ..registry import meta_profiler_function

View File

@@ -1,6 +1,6 @@
import operator
from typing import Any, Tuple
import torch
from ..registry import meta_profiler_function

View File

@@ -1,7 +1,9 @@
from functools import reduce
import operator
from functools import reduce
from typing import Any, Optional, Tuple
import torch
from ..registry import meta_profiler_function
@@ -43,13 +45,11 @@ def torch_where(condition: torch.Tensor, x: Any, y: Any) -> Tuple[int, int]:
@meta_profiler_function.register(torch.max)
def torch_max(input: torch.Tensor,
dim: int = None,
keepdim: bool = False,
*,
out: Optional[torch.Tensor] = None) -> Tuple[int, int]:
def torch_max(
input: torch.Tensor, dim: int = None, keepdim: bool = False, *, out: Optional[torch.Tensor] = None
) -> Tuple[int, int]:
macs = 0
assert out is None, 'assigning value to out is not supported yet'
assert out is None, "assigning value to out is not supported yet"
if dim is not None:
shape = list(input.shape)
shape.pop(int(dim))