mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-13 21:22:49 +00:00
[Tensor] add module check and bert test (#1031)
* add Embedding * Add bert test * polish * add check module test * polish * polish * polish * polish
This commit is contained in:
@@ -9,11 +9,11 @@ from .optim.colo_optimizer import ColoOptimizer
|
||||
from . import distspec
|
||||
from .dist_spec_mgr import DistSpecManager
|
||||
from .module_utils import register_colo_module, is_colo_module, get_colo_module, init_colo_module, check_colo_module
|
||||
from .modules import ColoLinear
|
||||
from .modules import ColoLinear, ColoEmbedding
|
||||
|
||||
__all__ = [
|
||||
'ColoTensor', 'convert_parameter', 'colo_op_impl', 'ComputePattern', 'TensorSpec', 'ParallelAction',
|
||||
'named_params_with_colotensor', 'ColoOptimizer', 'ColoParameter', 'distspec', 'DistSpecManager',
|
||||
'register_colo_module', 'is_colo_module', 'get_colo_module', 'init_colo_module', 'check_colo_module',
|
||||
'ColoLinear'
|
||||
'ColoLinear', 'ColoEmbedding'
|
||||
]
|
||||
|
@@ -26,6 +26,13 @@ class ColoParameter(ColoTensor):
|
||||
self._type = TensorType.MODEL
|
||||
self._graph_node = None
|
||||
|
||||
# a list contains modules sharing this ColoParameter with others.
|
||||
self._shared_param_modules = []
|
||||
|
||||
@property
|
||||
def shared_param_modules(self):
|
||||
return self._shared_param_modules
|
||||
|
||||
@staticmethod
|
||||
def from_torch_tensor(tensor: torch.Tensor,
|
||||
requires_grad: bool = True,
|
||||
@@ -36,3 +43,4 @@ class ColoParameter(ColoTensor):
|
||||
|
||||
def __repr__(self):
|
||||
return f'ColoParameter: {torch.Tensor.__repr__(self)}'
|
||||
|
||||
|
@@ -30,6 +30,12 @@ class _DistSpec:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __repr__(self) -> str:
|
||||
res = "\nDistSpec:\n\t"
|
||||
for attr in dir(self):
|
||||
if not attr.startswith('__'):
|
||||
res += f'{attr}: {str(getattr(self, attr))}\n\t'
|
||||
return res
|
||||
|
||||
def replicate(process_group: Optional[ProcessGroup] = None) -> _DistSpec:
|
||||
# process_group=None means global process group
|
||||
|
@@ -18,7 +18,6 @@ def get_colo_module(module: torch.nn.Module):
|
||||
global _COLOSSAL_MODULES
|
||||
if is_colo_module(module):
|
||||
colo_module = _COLOSSAL_MODULES[type(module)]
|
||||
colo_module.register()
|
||||
return colo_module
|
||||
else:
|
||||
return None
|
||||
@@ -43,6 +42,7 @@ def check_colo_module(module: torch.nn.Module, recursive=True):
|
||||
continue
|
||||
|
||||
if compute_pattern is not None:
|
||||
colo_module.register(compute_pattern)
|
||||
if not colo_module.has_compute_pattern(compute_pattern):
|
||||
raise Exception(f'Invalid ColoParameter spec: ComputePattern {compute_pattern} in {module} is not allowed.')
|
||||
|
||||
@@ -65,28 +65,34 @@ def check_colo_module(module: torch.nn.Module, recursive=True):
|
||||
break
|
||||
if match_specs == False:
|
||||
raise Exception(f'Invalid ColoParameter spec: Params in {module} are incorrectly sharded.')
|
||||
|
||||
if recursive == True:
|
||||
for submodule in module.children():
|
||||
check_colo_module(submodule, recursive=True)
|
||||
|
||||
def init_colo_module(module: torch.nn.Module, parallel_action: ParallelAction, recursive=True, label='default'):
|
||||
def init_colo_module(module: torch.nn.Module, parallel_action: ParallelAction, recursive=True, mode='default'):
|
||||
compute_pattern = parallel_action.compute_pattern
|
||||
if is_colo_module(module):
|
||||
# for each param
|
||||
# set DistSpec and ParallelAction
|
||||
colo_module = get_colo_module(module)
|
||||
if not colo_module.has_compute_pattern_with_label(compute_pattern, label=label):
|
||||
colo_module.register(compute_pattern)
|
||||
if not colo_module.has_compute_pattern_with_mode(compute_pattern, mode=mode):
|
||||
raise NotImplementedError
|
||||
for param_name, dist_spec in colo_module.get_dist_specs_with_label(compute_pattern, label=label).items():
|
||||
# a set for modules which update at least one param in the init process.
|
||||
# these modules need to be checked whether all params still match one of the valid compute pattern.
|
||||
modules_update_param = {module}
|
||||
for param_name, dist_spec in colo_module.get_dist_specs_with_mode(compute_pattern, mode=mode).items():
|
||||
if dist_spec is None:
|
||||
continue
|
||||
param = module.get_parameter(param_name)
|
||||
if isinstance(param, ColoParameter):
|
||||
spec = TensorSpec(dist_spec, parallel_action)
|
||||
param.set_spec(spec)
|
||||
check_colo_module(module, recursive=False)
|
||||
for mod in param.shared_param_modules:
|
||||
modules_update_param.add(mod)
|
||||
for mod in modules_update_param:
|
||||
check_colo_module(mod, recursive=False)
|
||||
if recursive == True:
|
||||
for submodule in module.children():
|
||||
init_colo_module(submodule, parallel_action, recursive=True, label=label)
|
||||
init_colo_module(submodule, parallel_action, recursive=True, mode=mode)
|
||||
|
@@ -1,2 +1,3 @@
|
||||
from .colo_module import ColoModule
|
||||
from .linear import ColoLinear
|
||||
from .linear import ColoLinear
|
||||
from .embedding import ColoEmbedding
|
@@ -21,14 +21,14 @@ class ColoModule(object):
|
||||
def _register_shard_params(self, params: List[str]):
|
||||
self._shard_params = params
|
||||
|
||||
def _register_allowed_patterns(self, compute_pattern: ComputePattern, dist_specs: Dict[str, _DistSpec], label='default'):
|
||||
def _register_allowed_patterns(self, compute_pattern: ComputePattern, dist_specs: Dict[str, _DistSpec], mode='default'):
|
||||
assert list(dist_specs.keys()).sort() == self._shard_params.sort(), 'Every registered param should have dist_spec.'
|
||||
if not compute_pattern in self._allowed_patterns:
|
||||
self._allowed_patterns[compute_pattern] = {}
|
||||
self._allowed_patterns[compute_pattern][label] = dist_specs
|
||||
self._allowed_patterns[compute_pattern][mode] = dist_specs
|
||||
|
||||
def _set_default(self, compute_pattern: ComputePattern, target_label):
|
||||
self._allowed_patterns[compute_pattern]['default'] = self._allowed_patterns[compute_pattern][target_label]
|
||||
def _set_default(self, compute_pattern: ComputePattern, target_mode):
|
||||
self._allowed_patterns[compute_pattern]['default'] = self._allowed_patterns[compute_pattern][target_mode]
|
||||
|
||||
def has_compute_pattern(self, compute_pattern: ComputePattern):
|
||||
return compute_pattern in self._allowed_patterns
|
||||
@@ -37,15 +37,15 @@ class ColoModule(object):
|
||||
assert self.has_compute_pattern(compute_pattern)
|
||||
return self._allowed_patterns[compute_pattern]
|
||||
|
||||
def has_compute_pattern_with_label(self, compute_pattern: ComputePattern, label='default'):
|
||||
return compute_pattern in self._allowed_patterns and label in self._allowed_patterns[compute_pattern]
|
||||
def has_compute_pattern_with_mode(self, compute_pattern: ComputePattern, mode='default'):
|
||||
return compute_pattern in self._allowed_patterns and mode in self._allowed_patterns[compute_pattern]
|
||||
|
||||
def get_dist_specs_with_label(self, compute_pattern: ComputePattern, label='default'):
|
||||
assert self.has_compute_pattern_with_label(compute_pattern, label)
|
||||
return self._allowed_patterns[compute_pattern][label]
|
||||
def get_dist_specs_with_mode(self, compute_pattern: ComputePattern, mode='default'):
|
||||
assert self.has_compute_pattern_with_mode(compute_pattern, mode)
|
||||
return self._allowed_patterns[compute_pattern][mode]
|
||||
|
||||
def get_param_names(self):
|
||||
return self._shard_params
|
||||
|
||||
def register(self):
|
||||
def register(self, compute_pattern):
|
||||
raise NotImplementedError
|
36
colossalai/tensor/modules/embedding.py
Normal file
36
colossalai/tensor/modules/embedding.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from .colo_module import ColoModule
|
||||
from colossalai.tensor import ComputePattern, distspec
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.context.parallel_mode import ParallelMode
|
||||
|
||||
class ColoEmbedding(ColoModule):
|
||||
def __init__(self):
|
||||
super(ColoEmbedding, self).__init__()
|
||||
self._register_shard_params(['weight'])
|
||||
|
||||
def register(self, compute_pattern):
|
||||
if not compute_pattern in self._allowed_patterns:
|
||||
if ComputePattern.TP1D == compute_pattern:
|
||||
self._set_TP1D()
|
||||
|
||||
def _set_TP1D(self):
|
||||
# TP1D Row Linear
|
||||
_compute_pattern = ComputePattern.TP1D
|
||||
self._register_allowed_patterns(
|
||||
compute_pattern=_compute_pattern,
|
||||
dist_specs={
|
||||
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
|
||||
},
|
||||
mode='row',
|
||||
)
|
||||
|
||||
# TP1D Col Linear
|
||||
self._register_allowed_patterns(
|
||||
compute_pattern=_compute_pattern,
|
||||
dist_specs={
|
||||
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
|
||||
},
|
||||
mode='col',
|
||||
)
|
||||
|
||||
self._set_default(compute_pattern=_compute_pattern, target_mode='row')
|
@@ -7,12 +7,11 @@ class ColoLinear(ColoModule):
|
||||
def __init__(self):
|
||||
super(ColoLinear, self).__init__()
|
||||
self._register_shard_params(['weight', 'bias'])
|
||||
self._register = False
|
||||
|
||||
def register(self):
|
||||
if self._register == False:
|
||||
self._set_TP1D()
|
||||
self._register = True
|
||||
def register(self, compute_pattern):
|
||||
if not compute_pattern in self._allowed_patterns:
|
||||
if ComputePattern.TP1D == compute_pattern:
|
||||
self._set_TP1D()
|
||||
|
||||
def _set_TP1D(self):
|
||||
# TP1D Row Linear
|
||||
@@ -23,7 +22,7 @@ class ColoLinear(ColoModule):
|
||||
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
|
||||
'bias': None
|
||||
},
|
||||
label='row',
|
||||
mode='row',
|
||||
)
|
||||
|
||||
# TP1D Col Linear
|
||||
@@ -33,7 +32,7 @@ class ColoLinear(ColoModule):
|
||||
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
|
||||
'bias': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)])
|
||||
},
|
||||
label='col',
|
||||
mode='col',
|
||||
)
|
||||
|
||||
self._set_default(compute_pattern=_compute_pattern, target_label='row')
|
||||
self._set_default(compute_pattern=_compute_pattern, target_mode='row')
|
||||
|
Reference in New Issue
Block a user