mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-06 11:32:10 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -10,7 +10,6 @@ except:
|
||||
|
||||
|
||||
class LinearModel(torch.nn.Module):
|
||||
|
||||
def __init__(self, in_features, out_features, bias):
|
||||
super().__init__()
|
||||
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
|
||||
@@ -21,25 +20,14 @@ class LinearModel(torch.nn.Module):
|
||||
|
||||
|
||||
class ConvModel(torch.nn.Module):
|
||||
|
||||
def __init__(self, in_channel, out_channels, kernel_size, bias) -> None:
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Conv2d(in_channel,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
bias=bias,
|
||||
padding=1,
|
||||
stride=2,
|
||||
dilation=2,
|
||||
groups=3)
|
||||
self.conv_transpose = torch.nn.ConvTranspose2d(out_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
bias=bias,
|
||||
padding=1,
|
||||
stride=2,
|
||||
dilation=2,
|
||||
groups=3)
|
||||
self.conv = torch.nn.Conv2d(
|
||||
in_channel, out_channels, kernel_size, bias=bias, padding=1, stride=2, dilation=2, groups=3
|
||||
)
|
||||
self.conv_transpose = torch.nn.ConvTranspose2d(
|
||||
out_channels, out_channels, kernel_size, bias=bias, padding=1, stride=2, dilation=2, groups=3
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv(x)
|
||||
@@ -48,7 +36,6 @@ class ConvModel(torch.nn.Module):
|
||||
|
||||
|
||||
class AModel(torch.nn.Module):
|
||||
|
||||
def __init__(self, bias) -> None:
|
||||
super().__init__()
|
||||
self.linear_1 = LinearModel(3, 3, bias)
|
||||
@@ -63,7 +50,7 @@ class AModel(torch.nn.Module):
|
||||
return x
|
||||
|
||||
|
||||
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason='torch version < 12')
|
||||
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="torch version < 12")
|
||||
@clear_cache_before_run()
|
||||
@parameterize("bias", [True, False])
|
||||
@parameterize("bias_addition_split", [True, False])
|
||||
@@ -71,11 +58,11 @@ class AModel(torch.nn.Module):
|
||||
def test_mod_dir(bias, bias_addition_split, shape):
|
||||
model = AModel(bias=bias)
|
||||
x = torch.rand(shape)
|
||||
gm = symbolic_trace(model, meta_args={'x': x}, bias_addition_split=bias_addition_split)
|
||||
gm = symbolic_trace(model, meta_args={"x": x}, bias_addition_split=bias_addition_split)
|
||||
for node in gm.graph.nodes:
|
||||
assert len(node.meta['info'].mod_dir), f"{node} should have non-trivial ``mod_dir``."
|
||||
print(node, node.meta['info'].mod_dir)
|
||||
assert len(node.meta["info"].mod_dir), f"{node} should have non-trivial ``mod_dir``."
|
||||
print(node, node.meta["info"].mod_dir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
test_mod_dir(bias=True, bias_addition_split=True, shape=(3, 3, 3))
|
||||
|
Reference in New Issue
Block a user