mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-02 17:46:42 +00:00
[autoparallel] Add F.conv metainfo (#2069)
* [fx] metainfo class for auto parallel * [fx] add unit test for linear metainfo * [fx] fix bwd param for linear * [fx] modify unit test * [fx] modify unit test * [fx] modify import * [fx] modify import * [fx] modify import * [fx] move meta profiler to auto parallel * [fx] add conv metainfo class * [fx] restore profiler * [fx] restore meta profiler * [autoparallel] modify unit test * [fx] modify unit test * [autoparallel] add batchnorm metainfo class * [autoparallel] fix batchnorm unit test function declaration * [fx] restore profiler * [fx] add relu metainfo class * [fx] restore profiler * [autoparallel] modify metainfo input * [autoparallel] add pooling metainfo * [autoparallel] add F.linear metainfo generator * [autoparallel] add binary elementwise metainfo * [fx] recover profiler * [autoparallel] fix forward memory calculation * [autoparallel] modify constants.py * [autoparallel] remove redundant print * [autoparallel] add F.conv metainfo * [autoparallel] linear fix
This commit is contained in:
@@ -22,6 +22,9 @@ __all__ = ['convnd_meta_info']
|
||||
@meta_register.register(torch.nn.Conv1d)
|
||||
@meta_register.register(torch.nn.Conv2d)
|
||||
@meta_register.register(torch.nn.Conv3d)
|
||||
@meta_register.register(torch.nn.functional.conv1d)
|
||||
@meta_register.register(torch.nn.functional.conv2d)
|
||||
@meta_register.register(torch.nn.functional.conv3d)
|
||||
def convnd_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:
|
||||
"""torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d meta info generator
|
||||
The atens graph of torch.nn.Convnd with bias is
|
||||
@@ -57,12 +60,19 @@ def convnd_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, L
|
||||
has_bias: bool = False
|
||||
input_tensor = next(filter(lambda x: x.type == OperationDataType.ARG, args)).data
|
||||
output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data
|
||||
weight_tensor = next(filter(lambda x: x.name == 'weight', args)).data
|
||||
weight_tensors = [x.data for x in args if x.type == OperationDataType.PARAM]
|
||||
|
||||
# check if conv has bias
|
||||
if len(args) == 4:
|
||||
bias_tensor = next(filter(lambda x: x.name == 'bias', args)).data
|
||||
if len(weight_tensors) > 1:
|
||||
has_bias = True
|
||||
# bias tensor's shape only has one dimension
|
||||
if len(weight_tensors[0].shape) == 1:
|
||||
bias_tensor, weight_tensor = weight_tensors
|
||||
else:
|
||||
weight_tensor, bias_tensor = weight_tensors
|
||||
|
||||
else:
|
||||
weight_tensor = weight_tensors[0]
|
||||
|
||||
# construct input args for forward
|
||||
fwd_args = [None] * 9
|
||||
|
@@ -143,7 +143,7 @@ def linear_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, L
|
||||
# NOTE: Linear don't have buffer and temp in forward and backward phase
|
||||
# the forward activation cost is the size of output_tensor, parameter cost is the size of weight_tensor
|
||||
# NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward
|
||||
fwd_memory_cost = MemoryCost(activation=activation_size(output_tensor),
|
||||
fwd_memory_cost = MemoryCost(activation=activation_size([input_tensor, output_tensor]),
|
||||
parameter=activation_size(weight_tensor),
|
||||
temp=0,
|
||||
buffer=0)
|
||||
|
Reference in New Issue
Block a user