mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-11 22:10:37 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch.fx
|
||||
@@ -111,13 +111,14 @@ def _benchmark_speed(model, inputs, loop=5):
|
||||
|
||||
def benchmark_evoformer_stack(data_args):
|
||||
from test_autochunk_evoformer_stack import get_data, get_model
|
||||
|
||||
print("\nmsa len: %d, pair len: %d" % (data_args[0], data_args[1]))
|
||||
max_mem = _benchmark_evoformer_stack_origin(data_args, get_model, get_data)
|
||||
for ratio in [0.5, 0.4, 0.3, 0.2, 0.1]:
|
||||
try:
|
||||
_benchmark_evoformer_stack_gm(data_args, max_mem * ratio, get_model, get_data)
|
||||
except RuntimeError as e:
|
||||
if e.args[0] == 'Search failed. Try a larger memory threshold.':
|
||||
if e.args[0] == "Search failed. Try a larger memory threshold.":
|
||||
break
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
@@ -8,7 +8,6 @@ from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
|
||||
from colossalai.autochunk.utils import flat_list
|
||||
from colossalai.fx.graph_module import ColoGraphModule
|
||||
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
|
||||
from colossalai.legacy.core import global_context as gpc
|
||||
from colossalai.testing import free_port
|
||||
|
||||
if AUTOCHUNK_AVAILABLE:
|
||||
@@ -80,9 +79,9 @@ def assert_codegen_run(
|
||||
out_gm = flat_list(out_gm)
|
||||
out_model = flat_list(out_model)
|
||||
for out_gm_i, out_model_i in zip(out_gm, out_model):
|
||||
assert torch.allclose(out_gm_i, out_model_i,
|
||||
atol=1e-4), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
|
||||
torch.abs(out_gm_i - out_model_i))
|
||||
assert torch.allclose(
|
||||
out_gm_i, out_model_i, atol=1e-4
|
||||
), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(torch.abs(out_gm_i - out_model_i))
|
||||
|
||||
return chunks
|
||||
|
||||
|
@@ -6,6 +6,7 @@ import torch.fx
|
||||
|
||||
try:
|
||||
from fastfold.model.nn.evoformer import EvoformerBlock
|
||||
|
||||
HAS_REPO = True
|
||||
except:
|
||||
HAS_REPO = False
|
||||
@@ -17,22 +18,26 @@ from colossalai.testing import clear_cache_before_run, parameterize, spawn
|
||||
|
||||
|
||||
def get_model():
|
||||
model = EvoformerBlock(
|
||||
c_m=256,
|
||||
c_z=128,
|
||||
c_hidden_msa_att=32,
|
||||
c_hidden_opm=32,
|
||||
c_hidden_mul=128,
|
||||
c_hidden_pair_att=32,
|
||||
no_heads_msa=8,
|
||||
no_heads_pair=4,
|
||||
transition_n=4,
|
||||
msa_dropout=0.15,
|
||||
pair_dropout=0.15,
|
||||
inf=1e4,
|
||||
eps=1e-4,
|
||||
is_multimer=False,
|
||||
).eval().cuda()
|
||||
model = (
|
||||
EvoformerBlock(
|
||||
c_m=256,
|
||||
c_z=128,
|
||||
c_hidden_msa_att=32,
|
||||
c_hidden_opm=32,
|
||||
c_hidden_mul=128,
|
||||
c_hidden_pair_att=32,
|
||||
no_heads_msa=8,
|
||||
no_heads_pair=4,
|
||||
transition_n=4,
|
||||
msa_dropout=0.15,
|
||||
pair_dropout=0.15,
|
||||
inf=1e4,
|
||||
eps=1e-4,
|
||||
is_multimer=False,
|
||||
)
|
||||
.eval()
|
||||
.cuda()
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
@@ -54,8 +59,20 @@ def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]:
|
||||
|
||||
def get_chunk_target() -> Dict:
|
||||
return {
|
||||
None: [(120, 126), (225, 244), (270, 289), (306, 311), (70, 106), (23, 46), (146, 152), (187, 193), (181, 184),
|
||||
(140, 145), (162, 163), (203, 204)],
|
||||
None: [
|
||||
(120, 126),
|
||||
(225, 244),
|
||||
(270, 289),
|
||||
(306, 311),
|
||||
(70, 106),
|
||||
(23, 46),
|
||||
(146, 152),
|
||||
(187, 193),
|
||||
(181, 184),
|
||||
(140, 145),
|
||||
(162, 163),
|
||||
(203, 204),
|
||||
],
|
||||
20: [(120, 123), (232, 237), (277, 282), (305, 306)],
|
||||
24: [(122, 123)],
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import torch.fx
|
||||
|
||||
try:
|
||||
from fastfold.model.nn.evoformer import EvoformerStack
|
||||
|
||||
HAS_REPO = True
|
||||
except:
|
||||
HAS_REPO = False
|
||||
@@ -17,26 +18,30 @@ from colossalai.testing import clear_cache_before_run, parameterize, spawn
|
||||
|
||||
|
||||
def get_model():
|
||||
model = EvoformerStack(
|
||||
c_m=256,
|
||||
c_z=128,
|
||||
c_hidden_msa_att=32,
|
||||
c_hidden_opm=32,
|
||||
c_hidden_mul=128,
|
||||
c_hidden_pair_att=32,
|
||||
c_s=384,
|
||||
no_heads_msa=8,
|
||||
no_heads_pair=4,
|
||||
no_blocks=2, # 48
|
||||
transition_n=4,
|
||||
msa_dropout=0.15,
|
||||
pair_dropout=0.25,
|
||||
blocks_per_ckpt=None,
|
||||
inf=1000000000.0,
|
||||
eps=1e-08,
|
||||
clear_cache_between_blocks=False,
|
||||
is_multimer=False,
|
||||
).eval().cuda()
|
||||
model = (
|
||||
EvoformerStack(
|
||||
c_m=256,
|
||||
c_z=128,
|
||||
c_hidden_msa_att=32,
|
||||
c_hidden_opm=32,
|
||||
c_hidden_mul=128,
|
||||
c_hidden_pair_att=32,
|
||||
c_s=384,
|
||||
no_heads_msa=8,
|
||||
no_heads_pair=4,
|
||||
no_blocks=2, # 48
|
||||
transition_n=4,
|
||||
msa_dropout=0.15,
|
||||
pair_dropout=0.25,
|
||||
blocks_per_ckpt=None,
|
||||
inf=1000000000.0,
|
||||
eps=1e-08,
|
||||
clear_cache_between_blocks=False,
|
||||
is_multimer=False,
|
||||
)
|
||||
.eval()
|
||||
.cuda()
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
@@ -62,7 +67,7 @@ def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]:
|
||||
)
|
||||
@clear_cache_before_run()
|
||||
@parameterize("max_memory", [None, 20, 24])
|
||||
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
|
||||
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
|
||||
def test_evoformer_stack(data_args, max_memory):
|
||||
spawn(
|
||||
run_test,
|
||||
|
@@ -1,4 +1,4 @@
|
||||
from typing import Dict, List, Tuple
|
||||
from typing import List, Tuple
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
@@ -6,6 +6,7 @@ import torch.fx
|
||||
|
||||
try:
|
||||
from fastfold.model.nn.evoformer import ExtraMSABlock
|
||||
|
||||
HAS_REPO = True
|
||||
except:
|
||||
HAS_REPO = False
|
||||
@@ -16,23 +17,27 @@ from colossalai.testing import clear_cache_before_run, parameterize, spawn
|
||||
|
||||
|
||||
def get_model():
|
||||
model = ExtraMSABlock(
|
||||
c_m=256,
|
||||
c_z=128,
|
||||
c_hidden_msa_att=32,
|
||||
c_hidden_opm=32,
|
||||
c_hidden_mul=128,
|
||||
c_hidden_pair_att=32,
|
||||
no_heads_msa=8,
|
||||
no_heads_pair=4,
|
||||
transition_n=4,
|
||||
msa_dropout=0.15,
|
||||
pair_dropout=0.15,
|
||||
inf=1e4,
|
||||
eps=1e-4,
|
||||
ckpt=False,
|
||||
is_multimer=False,
|
||||
).eval().cuda()
|
||||
model = (
|
||||
ExtraMSABlock(
|
||||
c_m=256,
|
||||
c_z=128,
|
||||
c_hidden_msa_att=32,
|
||||
c_hidden_opm=32,
|
||||
c_hidden_mul=128,
|
||||
c_hidden_pair_att=32,
|
||||
no_heads_msa=8,
|
||||
no_heads_pair=4,
|
||||
transition_n=4,
|
||||
msa_dropout=0.15,
|
||||
pair_dropout=0.15,
|
||||
inf=1e4,
|
||||
eps=1e-4,
|
||||
ckpt=False,
|
||||
is_multimer=False,
|
||||
)
|
||||
.eval()
|
||||
.cuda()
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
@@ -58,7 +63,7 @@ def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]:
|
||||
)
|
||||
@clear_cache_before_run()
|
||||
@parameterize("max_memory", [None, 20, 24])
|
||||
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
|
||||
@parameterize("data_args", [(32, 64)]) # (msa_len, pair_len)
|
||||
def test_extramsa_block(data_args, max_memory):
|
||||
spawn(
|
||||
run_test,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch.fx
|
||||
@@ -64,8 +64,10 @@ def _benchmark_autochunk_unet_gm(
|
||||
para_mem = float(parameter_size(model)) / 1024**2
|
||||
act_mem = _benchmark_memory(gm, inputs)
|
||||
speed = _benchmark_speed(gm, inputs)
|
||||
print("unet autochunk, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB" %
|
||||
(speed, act_mem, para_mem, act_mem + para_mem))
|
||||
print(
|
||||
"unet autochunk, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB"
|
||||
% (speed, act_mem, para_mem, act_mem + para_mem)
|
||||
)
|
||||
|
||||
|
||||
def _benchmark_autochunk_unet_origin(
|
||||
@@ -86,8 +88,10 @@ def _benchmark_autochunk_unet_origin(
|
||||
para_mem = float(parameter_size(model)) / 1024**2
|
||||
act_mem = _benchmark_memory(model, inputs)
|
||||
speed = _benchmark_speed(model, inputs)
|
||||
print("unet origin, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB" %
|
||||
(speed, act_mem, para_mem, act_mem + para_mem))
|
||||
print(
|
||||
"unet origin, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB"
|
||||
% (speed, act_mem, para_mem, act_mem + para_mem)
|
||||
)
|
||||
return act_mem
|
||||
|
||||
|
||||
@@ -115,6 +119,7 @@ def _benchmark_speed(model, inputs, loop=5):
|
||||
|
||||
def benchmark_autochunk_unet(batch=1, height=448, width=448):
|
||||
from test_autochunk_unet import UNet2DModel, get_data
|
||||
|
||||
model = UNet2DModel()
|
||||
latent_shape = (batch, 3, height // 7, width // 7)
|
||||
|
||||
@@ -124,7 +129,7 @@ def benchmark_autochunk_unet(batch=1, height=448, width=448):
|
||||
try:
|
||||
_benchmark_autochunk_unet_gm(model, get_data(latent_shape), max_mem * ratio)
|
||||
except RuntimeError as e:
|
||||
if e.args[0] == 'Search failed. Try a larger memory threshold.':
|
||||
if e.args[0] == "Search failed. Try a larger memory threshold.":
|
||||
break
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
@@ -83,9 +83,11 @@ def assert_codegen_run(
|
||||
max_mem_ori = torch.cuda.max_memory_allocated() / 1024**2
|
||||
print("origin mem: %.2fMB, autochunk mem: %.2fMB" % (max_mem_ori - now_mem_ori, max_mem_gm - now_mem_gm))
|
||||
|
||||
assert torch.allclose(out_gm["sample"], out_model["sample"],
|
||||
atol=1e-3), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
|
||||
torch.abs(out_gm["sample"] - out_model["sample"]))
|
||||
assert torch.allclose(
|
||||
out_gm["sample"], out_model["sample"], atol=1e-3
|
||||
), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
|
||||
torch.abs(out_gm["sample"] - out_model["sample"])
|
||||
)
|
||||
|
||||
return chunks
|
||||
|
||||
@@ -129,7 +131,7 @@ def run_test(
|
||||
if get_chunk_target is not None:
|
||||
chunk_found = [i["region"] for i in chunks]
|
||||
chunk_target = get_chunk_target()[max_memory]
|
||||
assert (chunk_found == chunk_target), "found regions %s doesn't equal target regions %s" % (
|
||||
assert chunk_found == chunk_target, "found regions %s doesn't equal target regions %s" % (
|
||||
str(chunk_found),
|
||||
str(chunk_target),
|
||||
)
|
||||
|
@@ -5,9 +5,11 @@ import torch
|
||||
|
||||
try:
|
||||
import diffusers
|
||||
|
||||
MODELS = [diffusers.UNet2DModel]
|
||||
HAS_REPO = True
|
||||
from packaging import version
|
||||
|
||||
SKIP_UNET_TEST = version.parse(diffusers.__version__) > version.parse("0.10.2")
|
||||
except:
|
||||
MODELS = []
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch.fx
|
||||
@@ -64,8 +64,10 @@ def _benchmark_autochunk_gpt_gm(
|
||||
para_mem = float(parameter_size(model)) / 1024**2 * 6
|
||||
act_mem = _benchmark_memory(gm, inputs)
|
||||
speed = _benchmark_speed(gm, inputs)
|
||||
print("gpt autochunk, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB" %
|
||||
(speed, act_mem, para_mem, act_mem + para_mem))
|
||||
print(
|
||||
"gpt autochunk, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB"
|
||||
% (speed, act_mem, para_mem, act_mem + para_mem)
|
||||
)
|
||||
|
||||
|
||||
def _benchmark_autochunk_gpt_origin(
|
||||
@@ -86,8 +88,10 @@ def _benchmark_autochunk_gpt_origin(
|
||||
para_mem = float(parameter_size(model)) / 1024**2 * 6
|
||||
act_mem = _benchmark_memory(model, inputs)
|
||||
speed = _benchmark_speed(model, inputs)
|
||||
print("gpt origin, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB" %
|
||||
(speed, act_mem, para_mem, act_mem + para_mem))
|
||||
print(
|
||||
"gpt origin, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB"
|
||||
% (speed, act_mem, para_mem, act_mem + para_mem)
|
||||
)
|
||||
return act_mem
|
||||
|
||||
|
||||
@@ -115,6 +119,7 @@ def _benchmark_speed(model, inputs, loop=5):
|
||||
|
||||
def benchmark_autochunk_gpt(batch=1, seq=512, n_embd=768, n_head=12):
|
||||
from test_autochunk_gpt import GPT2Config, GPT2Model, get_data
|
||||
|
||||
model = GPT2Model
|
||||
config = GPT2Config(n_embd=n_embd, n_positions=seq, n_layer=2, n_head=n_head)
|
||||
model = model(config=config)
|
||||
@@ -125,7 +130,7 @@ def benchmark_autochunk_gpt(batch=1, seq=512, n_embd=768, n_head=12):
|
||||
try:
|
||||
_benchmark_autochunk_gpt_gm(model, get_data(shape), max_mem * ratio)
|
||||
except RuntimeError as e:
|
||||
if e.args[0] == 'Search failed. Try a larger memory threshold.':
|
||||
if e.args[0] == "Search failed. Try a larger memory threshold.":
|
||||
break
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
@@ -5,6 +5,7 @@ import torch
|
||||
|
||||
try:
|
||||
from transformers import GPT2Config, GPT2Model
|
||||
|
||||
MODELS = [GPT2Model]
|
||||
HAS_REPO = True
|
||||
except:
|
||||
@@ -52,13 +53,15 @@ def test_autochunk_gpt(model, shape, max_memory):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_test(rank=0,
|
||||
data=get_data((BATCH_SIZE, SEQ_LENGTH)),
|
||||
max_memory=None,
|
||||
model=GPT2Model,
|
||||
config=GPT2Config(n_embd=96, n_position=SEQ_LENGTH, n_layer=2, n_head=4),
|
||||
print_code=False,
|
||||
print_est_mem=False,
|
||||
print_mem=False,
|
||||
print_progress=False,
|
||||
eval_mem=False)
|
||||
run_test(
|
||||
rank=0,
|
||||
data=get_data((BATCH_SIZE, SEQ_LENGTH)),
|
||||
max_memory=None,
|
||||
model=GPT2Model,
|
||||
config=GPT2Config(n_embd=96, n_position=SEQ_LENGTH, n_layer=2, n_head=4),
|
||||
print_code=False,
|
||||
print_est_mem=False,
|
||||
print_mem=False,
|
||||
print_progress=False,
|
||||
eval_mem=False,
|
||||
)
|
||||
|
@@ -38,11 +38,9 @@ def assert_codegen_run(
|
||||
meta_tensors = [meta_args[i] if i in meta_args else concrete_args[i] for i in sequence]
|
||||
meta_tensors = [MetaTensor(i, fake_device="cuda:0") if isinstance(i, torch.Tensor) else i for i in meta_tensors]
|
||||
interp.propagate(*meta_tensors)
|
||||
codegen = AutoChunkCodeGen(meta_graph,
|
||||
max_memory=max_memory,
|
||||
print_mem=print_est_mem,
|
||||
print_progress=print_progress,
|
||||
eval_mem=eval_mem)
|
||||
codegen = AutoChunkCodeGen(
|
||||
meta_graph, max_memory=max_memory, print_mem=print_est_mem, print_progress=print_progress, eval_mem=eval_mem
|
||||
)
|
||||
chunks = codegen.chunk_infos
|
||||
|
||||
# trace and recompile
|
||||
@@ -85,9 +83,9 @@ def assert_allclose(out_model: Any, out_gm: Any) -> None:
|
||||
assert allclose for out
|
||||
"""
|
||||
if isinstance(out_model, torch.Tensor):
|
||||
assert torch.allclose(out_model, out_gm,
|
||||
atol=1e-4), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
|
||||
torch.abs(out_model - out_gm))
|
||||
assert torch.allclose(
|
||||
out_model, out_gm, atol=1e-4
|
||||
), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(torch.abs(out_model - out_gm))
|
||||
elif isinstance(out_model, dict):
|
||||
for k in out_model.keys():
|
||||
assert_allclose(out_model[k], out_gm[k])
|
||||
@@ -123,19 +121,21 @@ def run_test(
|
||||
)
|
||||
|
||||
# build model and input
|
||||
chunks = assert_codegen_run(model,
|
||||
data=data,
|
||||
max_memory=max_memory,
|
||||
print_code=print_code,
|
||||
print_est_mem=print_est_mem,
|
||||
print_mem=print_mem,
|
||||
print_progress=print_progress,
|
||||
eval_mem=eval_mem)
|
||||
chunks = assert_codegen_run(
|
||||
model,
|
||||
data=data,
|
||||
max_memory=max_memory,
|
||||
print_code=print_code,
|
||||
print_est_mem=print_est_mem,
|
||||
print_mem=print_mem,
|
||||
print_progress=print_progress,
|
||||
eval_mem=eval_mem,
|
||||
)
|
||||
|
||||
if get_chunk_target is not None:
|
||||
chunk_found = [i["region"] for i in chunks]
|
||||
chunk_target = get_chunk_target()[max_memory]
|
||||
assert (chunk_found == chunk_target), "found regions %s doesn't equal target regions %s" % (
|
||||
assert chunk_found == chunk_target, "found regions %s doesn't equal target regions %s" % (
|
||||
str(chunk_found),
|
||||
str(chunk_target),
|
||||
)
|
||||
|
@@ -5,6 +5,7 @@ import torch
|
||||
|
||||
try:
|
||||
from timm.models.vision_transformer import vit_large_patch16_384 as vit
|
||||
|
||||
MODELS = [vit]
|
||||
HAS_REPO = True
|
||||
except:
|
||||
@@ -19,7 +20,7 @@ from colossalai.testing import clear_cache_before_run, parameterize, spawn
|
||||
|
||||
def get_data() -> Tuple[List, List]:
|
||||
data = torch.rand(1, 3, 384, 384)
|
||||
meta_args = {'x': data}
|
||||
meta_args = {"x": data}
|
||||
return data, meta_args
|
||||
|
||||
|
||||
|
@@ -75,9 +75,9 @@ def assert_codegen_run(
|
||||
max_mem_ori = torch.cuda.max_memory_allocated() / 1024**2
|
||||
print("origin mem: %.2fMB, autochunk mem: %.2fMB" % (max_mem_ori - now_mem_ori, max_mem_gm - now_mem_gm))
|
||||
|
||||
assert torch.allclose(out_gm, out_model,
|
||||
atol=1e-3), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
|
||||
torch.abs(out_gm - out_model))
|
||||
assert torch.allclose(
|
||||
out_gm, out_model, atol=1e-3
|
||||
), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(torch.abs(out_gm - out_model))
|
||||
|
||||
return chunks
|
||||
|
||||
@@ -121,7 +121,7 @@ def run_test(
|
||||
if get_chunk_target is not None:
|
||||
chunk_found = [i["region"] for i in chunks]
|
||||
chunk_target = get_chunk_target()[max_memory]
|
||||
assert (chunk_found == chunk_target), "found regions %s doesn't equal target regions %s" % (
|
||||
assert chunk_found == chunk_target, "found regions %s doesn't equal target regions %s" % (
|
||||
str(chunk_found),
|
||||
str(chunk_target),
|
||||
)
|
||||
|
Reference in New Issue
Block a user