[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -1,5 +1,5 @@
import time
from typing import Any, Dict, List
from typing import Any
import torch
import torch.fx
@@ -111,13 +111,14 @@ def _benchmark_speed(model, inputs, loop=5):
def benchmark_evoformer_stack(data_args):
from test_autochunk_evoformer_stack import get_data, get_model
print("\nmsa len: %d, pair len: %d" % (data_args[0], data_args[1]))
max_mem = _benchmark_evoformer_stack_origin(data_args, get_model, get_data)
for ratio in [0.5, 0.4, 0.3, 0.2, 0.1]:
try:
_benchmark_evoformer_stack_gm(data_args, max_mem * ratio, get_model, get_data)
except RuntimeError as e:
if e.args[0] == 'Search failed. Try a larger memory threshold.':
if e.args[0] == "Search failed. Try a larger memory threshold.":
break
except Exception as e:
raise e

View File

@@ -8,7 +8,6 @@ from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
from colossalai.autochunk.utils import flat_list
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import free_port
if AUTOCHUNK_AVAILABLE:
@@ -80,9 +79,9 @@ def assert_codegen_run(
out_gm = flat_list(out_gm)
out_model = flat_list(out_model)
for out_gm_i, out_model_i in zip(out_gm, out_model):
assert torch.allclose(out_gm_i, out_model_i,
atol=1e-4), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
torch.abs(out_gm_i - out_model_i))
assert torch.allclose(
out_gm_i, out_model_i, atol=1e-4
), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(torch.abs(out_gm_i - out_model_i))
return chunks

View File

@@ -6,6 +6,7 @@ import torch.fx
try:
from fastfold.model.nn.evoformer import EvoformerBlock
HAS_REPO = True
except:
HAS_REPO = False
@@ -17,22 +18,26 @@ from colossalai.testing import clear_cache_before_run, parameterize, spawn
def get_model():
model = EvoformerBlock(
c_m=256,
c_z=128,
c_hidden_msa_att=32,
c_hidden_opm=32,
c_hidden_mul=128,
c_hidden_pair_att=32,
no_heads_msa=8,
no_heads_pair=4,
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.15,
inf=1e4,
eps=1e-4,
is_multimer=False,
).eval().cuda()
model = (
EvoformerBlock(
c_m=256,
c_z=128,
c_hidden_msa_att=32,
c_hidden_opm=32,
c_hidden_mul=128,
c_hidden_pair_att=32,
no_heads_msa=8,
no_heads_pair=4,
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.15,
inf=1e4,
eps=1e-4,
is_multimer=False,
)
.eval()
.cuda()
)
return model
@@ -54,8 +59,20 @@ def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]:
def get_chunk_target() -> Dict:
return {
None: [(120, 126), (225, 244), (270, 289), (306, 311), (70, 106), (23, 46), (146, 152), (187, 193), (181, 184),
(140, 145), (162, 163), (203, 204)],
None: [
(120, 126),
(225, 244),
(270, 289),
(306, 311),
(70, 106),
(23, 46),
(146, 152),
(187, 193),
(181, 184),
(140, 145),
(162, 163),
(203, 204),
],
20: [(120, 123), (232, 237), (277, 282), (305, 306)],
24: [(122, 123)],
}

View File

@@ -6,6 +6,7 @@ import torch.fx
try:
from fastfold.model.nn.evoformer import EvoformerStack
HAS_REPO = True
except:
HAS_REPO = False
@@ -17,26 +18,30 @@ from colossalai.testing import clear_cache_before_run, parameterize, spawn
def get_model():
model = EvoformerStack(
c_m=256,
c_z=128,
c_hidden_msa_att=32,
c_hidden_opm=32,
c_hidden_mul=128,
c_hidden_pair_att=32,
c_s=384,
no_heads_msa=8,
no_heads_pair=4,
no_blocks=2, # 48
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.25,
blocks_per_ckpt=None,
inf=1000000000.0,
eps=1e-08,
clear_cache_between_blocks=False,
is_multimer=False,
).eval().cuda()
model = (
EvoformerStack(
c_m=256,
c_z=128,
c_hidden_msa_att=32,
c_hidden_opm=32,
c_hidden_mul=128,
c_hidden_pair_att=32,
c_s=384,
no_heads_msa=8,
no_heads_pair=4,
no_blocks=2, # 48
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.25,
blocks_per_ckpt=None,
inf=1000000000.0,
eps=1e-08,
clear_cache_between_blocks=False,
is_multimer=False,
)
.eval()
.cuda()
)
return model
@@ -62,7 +67,7 @@ def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]:
)
@clear_cache_before_run()
@parameterize("max_memory", [None, 20, 24])
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
def test_evoformer_stack(data_args, max_memory):
spawn(
run_test,

View File

@@ -1,4 +1,4 @@
from typing import Dict, List, Tuple
from typing import List, Tuple
import pytest
import torch
@@ -6,6 +6,7 @@ import torch.fx
try:
from fastfold.model.nn.evoformer import ExtraMSABlock
HAS_REPO = True
except:
HAS_REPO = False
@@ -16,23 +17,27 @@ from colossalai.testing import clear_cache_before_run, parameterize, spawn
def get_model():
model = ExtraMSABlock(
c_m=256,
c_z=128,
c_hidden_msa_att=32,
c_hidden_opm=32,
c_hidden_mul=128,
c_hidden_pair_att=32,
no_heads_msa=8,
no_heads_pair=4,
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.15,
inf=1e4,
eps=1e-4,
ckpt=False,
is_multimer=False,
).eval().cuda()
model = (
ExtraMSABlock(
c_m=256,
c_z=128,
c_hidden_msa_att=32,
c_hidden_opm=32,
c_hidden_mul=128,
c_hidden_pair_att=32,
no_heads_msa=8,
no_heads_pair=4,
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.15,
inf=1e4,
eps=1e-4,
ckpt=False,
is_multimer=False,
)
.eval()
.cuda()
)
return model
@@ -58,7 +63,7 @@ def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]:
)
@clear_cache_before_run()
@parameterize("max_memory", [None, 20, 24])
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
def test_extramsa_block(data_args, max_memory):
spawn(
run_test,