mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-08 04:24:47 +00:00
[fx/profiler] tuned the calculation of memory estimation (#1619)
* [fx] tuned the meta info and rotor solver. * [fx] remove import. * [fx] remove import. * [fx] remove import. * [fx] tune the meta calculations. * [fx] polish comments. * [fx] remove assertions. * [fx] modify test cases. * [fx] modify test cases. * [fx] optimize import. * [fx
This commit is contained in:
@@ -1,12 +1,8 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import colossalai
|
||||
import colossalai.nn as col_nn
|
||||
from torch.fx import symbolic_trace
|
||||
from colossalai import META_COMPATIBILITY
|
||||
from colossalai.fx.passes.meta_info_prop import MetaInfoProp, TensorMetadata
|
||||
|
||||
import pytest
|
||||
|
||||
BATCH_SIZE = 2
|
||||
DIM_IN = 4
|
||||
DIM_OUT = 16
|
||||
@@ -22,6 +18,9 @@ def meta_check(meta_info_spec: TensorMetadata, orig_tensor: torch.Tensor):
|
||||
def test_meta_info_prop():
|
||||
model = torch.nn.Linear(DIM_IN, DIM_OUT)
|
||||
input_sample = torch.rand(BATCH_SIZE, DIM_IN, device='meta')
|
||||
if META_COMPATIBILITY:
|
||||
from colossalai.fx.profiler import MetaTensor
|
||||
input_sample = MetaTensor(input_sample, fake_device='cpu')
|
||||
orig_output = model(input_sample)
|
||||
gm = symbolic_trace(model)
|
||||
MetaInfoProp(gm).run(input_sample)
|
||||
|
Reference in New Issue
Block a user