mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-23 10:30:03 +00:00
[fx] support module with bias addition (#1780)
* [autoparallel] refactor tracer to fix bias addition issue * [fx] support module with bias addition * create bias_addition_module * refactor file structure * polish code * fix unit test
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
from .activation_function import *
|
||||
from .arithmetic import *
|
||||
from .convolution import *
|
||||
from .embedding import *
|
||||
from .normalization import *
|
||||
from .python_ops import *
|
||||
from .torch_ops import *
|
||||
from .convolution import *
|
@@ -1,7 +1,8 @@
|
||||
import torch
|
||||
from ..registry import meta_patched_function
|
||||
|
||||
from ...registry import meta_patched_function
|
||||
|
||||
|
||||
@meta_patched_function.register(torch.nn.functional.relu)
|
||||
def torch_nn_func_relu(input, inplace=False):
|
||||
return torch.empty(input.shape, device='meta')
|
||||
return torch.empty(input.shape, device='meta')
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import torch
|
||||
|
||||
from ..registry import meta_patched_function
|
||||
from ...registry import meta_patched_function
|
||||
|
||||
|
||||
@meta_patched_function.register(torch.matmul)
|
||||
@@ -57,6 +57,16 @@ def torch_bmm(input, mat2, *, out=None):
|
||||
return torch.empty(batch_size, n, p, device="meta")
|
||||
|
||||
|
||||
@meta_patched_function.register(torch.nn.functional.linear)
|
||||
def torch_linear(input, mat2, *, out=None):
|
||||
if out is not None:
|
||||
raise ValueError("Don't support in-place abs for MetaTensor analysis")
|
||||
output_shape = list(input.shape)
|
||||
output_feature = list(mat2.shape)[0]
|
||||
output_shape[-1] = output_feature
|
||||
return torch.empty(*output_shape, device="meta")
|
||||
|
||||
|
||||
@meta_patched_function.register(torch.addbmm)
|
||||
@meta_patched_function.register(torch.Tensor.addbmm)
|
||||
def torch_addbmm(input, mat1, mat2, *, beta=1, alpha=1, out=None):
|
||||
|
@@ -1,8 +1,10 @@
|
||||
import torch
|
||||
import collections
|
||||
from itertools import repeat
|
||||
from ..registry import meta_patched_function
|
||||
import math
|
||||
from itertools import repeat
|
||||
|
||||
import torch
|
||||
|
||||
from ...registry import meta_patched_function
|
||||
|
||||
|
||||
def _ntuple(n, name="parse"):
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import torch
|
||||
from ..registry import meta_patched_function
|
||||
|
||||
from ...registry import meta_patched_function
|
||||
|
||||
|
||||
@meta_patched_function.register(torch.nn.functional.embedding)
|
||||
@@ -10,4 +11,4 @@ def torch_nn_functional_embedding(input,
|
||||
norm_type=2.0,
|
||||
scale_grad_by_freq=False,
|
||||
sparse=False):
|
||||
return torch.empty(*input.shape, weight.shape[-1], device="meta")
|
||||
return torch.empty(*input.shape, weight.shape[-1], device="meta")
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import torch
|
||||
from ..registry import meta_patched_function
|
||||
|
||||
from ...registry import meta_patched_function
|
||||
|
||||
|
||||
@meta_patched_function.register(torch.nn.functional.layer_norm)
|
||||
@@ -16,4 +17,4 @@ def torch_nn_func_batchnorm(input,
|
||||
training=False,
|
||||
momentum=0.1,
|
||||
eps=1e-05):
|
||||
return torch.empty(input.shape, device='meta')
|
||||
return torch.empty(input.shape, device='meta')
|
||||
|
@@ -1,8 +1,11 @@
|
||||
import operator
|
||||
|
||||
import torch
|
||||
from ..registry import meta_patched_function
|
||||
|
||||
from colossalai.fx.proxy import ColoProxy
|
||||
|
||||
from ...registry import meta_patched_function
|
||||
|
||||
|
||||
@meta_patched_function.register(operator.getitem)
|
||||
def operator_getitem(a, b):
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import torch
|
||||
from ..registry import meta_patched_function
|
||||
|
||||
from ...registry import meta_patched_function
|
||||
|
||||
|
||||
@meta_patched_function.register(torch.arange)
|
||||
|
Reference in New Issue
Block a user