mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-08 20:40:34 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -8,16 +8,16 @@ from colossalai.device.device_mesh import DeviceMesh
|
||||
|
||||
from .utils import merge_same_dim_mesh_list
|
||||
|
||||
__all__ = ['_DimSpec', 'ShardingException', 'ShardingSpec']
|
||||
__all__ = ["_DimSpec", "ShardingException", "ShardingSpec"]
|
||||
|
||||
ALLGATHER_COST = 20
|
||||
SHARD_COST = 5
|
||||
STEP_PENALTY = 6
|
||||
NAN = 'nan'
|
||||
NAN = "nan"
|
||||
|
||||
|
||||
class _DimSpec:
|
||||
'''
|
||||
"""
|
||||
Sharding spec for single dimension of the sharded tensor describe the sharding dimension of
|
||||
logical device mesh and give a method to compute the difference between them.
|
||||
This class is used internally in ShardingSpec.
|
||||
@@ -25,7 +25,7 @@ class _DimSpec:
|
||||
Argument:
|
||||
shard_list(List[int]): if shard_list is None, the dim spec will be 'R' type.
|
||||
Otherwise, the element in shard_list means the data will be sharded in that dimension.
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self, shard_list):
|
||||
self.is_replica = len(shard_list) == 0
|
||||
@@ -37,41 +37,40 @@ class _DimSpec:
|
||||
|
||||
def __repr__(self):
|
||||
if self.is_replica:
|
||||
return 'R'
|
||||
target = 'S'
|
||||
return "R"
|
||||
target = "S"
|
||||
for dim in self.shard_list:
|
||||
target += str(dim)
|
||||
return target
|
||||
|
||||
def _convert_str_to_shard_list(self, str_spec):
|
||||
'''
|
||||
"""
|
||||
Convert str_spec into shard_list.
|
||||
|
||||
Argument:
|
||||
str_spec(str): dim spec in str type.
|
||||
'''
|
||||
"""
|
||||
|
||||
if str_spec == 'R':
|
||||
if str_spec == "R":
|
||||
return []
|
||||
if str_spec == 'S0':
|
||||
if str_spec == "S0":
|
||||
return [0]
|
||||
if str_spec == 'S1':
|
||||
if str_spec == "S1":
|
||||
return [1]
|
||||
if str_spec == 'S01':
|
||||
if str_spec == "S01":
|
||||
return [0, 1]
|
||||
|
||||
def build_difference_2d_dict(self):
|
||||
'''
|
||||
"""
|
||||
Build a difference mapping for 2D device mesh case. It will be used to
|
||||
compute the difference between DimSpec pairs.
|
||||
'''
|
||||
"""
|
||||
|
||||
source_spec_list = ['R', 'S0', 'S1', 'S01']
|
||||
target_spec_list = ['R', 'S0', 'S1', 'S01']
|
||||
source_spec_list = ["R", "S0", "S1", "S01"]
|
||||
target_spec_list = ["R", "S0", "S1", "S01"]
|
||||
difference_dict = {}
|
||||
for source_spec in source_spec_list:
|
||||
for target_spec in target_spec_list:
|
||||
legal_sharding_dims = []
|
||||
spec_pair = (deepcopy(source_spec), deepcopy(target_spec))
|
||||
source_shard_list = self._convert_str_to_shard_list(source_spec)
|
||||
target_shard_list = self._convert_str_to_shard_list(target_spec)
|
||||
@@ -81,14 +80,17 @@ class _DimSpec:
|
||||
difference = 0
|
||||
|
||||
# all_gather(source) -> target
|
||||
elif len(source_shard_list
|
||||
) == len(target_shard_list) + 1 and source_shard_list[:-1] == target_shard_list:
|
||||
elif (
|
||||
len(source_shard_list) == len(target_shard_list) + 1 and source_shard_list[:-1] == target_shard_list
|
||||
):
|
||||
difference = ALLGATHER_COST
|
||||
|
||||
# shard(source) -> target
|
||||
elif len(source_shard_list) == len(
|
||||
target_shard_list) - 1 and source_shard_list == target_shard_list[:-1] and target_shard_list[
|
||||
-1] not in source_shard_list:
|
||||
elif (
|
||||
len(source_shard_list) == len(target_shard_list) - 1
|
||||
and source_shard_list == target_shard_list[:-1]
|
||||
and target_shard_list[-1] not in source_shard_list
|
||||
):
|
||||
difference = SHARD_COST
|
||||
|
||||
# S1 -> S0 or S0 -> S1
|
||||
@@ -119,7 +121,7 @@ class _DimSpec:
|
||||
self.difference_dict = difference_dict
|
||||
|
||||
def difference(self, other):
|
||||
'''
|
||||
"""
|
||||
The difference between two _DimSpec.
|
||||
|
||||
Argument:
|
||||
@@ -135,7 +137,7 @@ class _DimSpec:
|
||||
|
||||
Output:
|
||||
5
|
||||
'''
|
||||
"""
|
||||
difference = self.difference_dict[(str(self), str(other))]
|
||||
return difference
|
||||
|
||||
@@ -157,7 +159,7 @@ class ShardingNotDivisibleError(ShardingSpecException):
|
||||
|
||||
|
||||
class ShardingSpec:
|
||||
'''
|
||||
"""
|
||||
Sharding spec for a tensor, it contains info of the logical device mesh this tensor belong
|
||||
to, the entire shape of the tensor before sharded, and the sharding sequence looks like
|
||||
[R, R, S0, S1].
|
||||
@@ -168,13 +170,11 @@ class ShardingSpec:
|
||||
dim_partition_dict(Dict[int, List[int]], optional): The key is the dimension of tensor to be sharded,
|
||||
and the value of the key describe which logical axis will be sharded in that dimension.
|
||||
sharding_sequence(List[_DimSpec], optional): A straight view of ShardingSpec looks like [R, R, S0, S1].
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
device_mesh: DeviceMesh,
|
||||
entire_shape: torch.Size,
|
||||
dim_partition_dict=None,
|
||||
sharding_sequence=None):
|
||||
def __init__(
|
||||
self, device_mesh: DeviceMesh, entire_shape: torch.Size, dim_partition_dict=None, sharding_sequence=None
|
||||
):
|
||||
self.device_mesh = device_mesh
|
||||
|
||||
if isinstance(entire_shape, (list, tuple)):
|
||||
@@ -183,12 +183,17 @@ class ShardingSpec:
|
||||
self.dim_partition_dict = dim_partition_dict
|
||||
self.sharding_sequence = sharding_sequence
|
||||
if self.sharding_sequence is None:
|
||||
assert self.dim_partition_dict is not None, f'dim_partition_dict should not be None, if sharding_sequence is NoneType object.'
|
||||
self.dim_partition_dict = merge_same_dim_mesh_list(dim_size=len(entire_shape),
|
||||
dim_partition_dict=self.dim_partition_dict)
|
||||
assert (
|
||||
self.dim_partition_dict is not None
|
||||
), f"dim_partition_dict should not be None, if sharding_sequence is NoneType object."
|
||||
self.dim_partition_dict = merge_same_dim_mesh_list(
|
||||
dim_size=len(entire_shape), dim_partition_dict=self.dim_partition_dict
|
||||
)
|
||||
self.convert_dict_to_shard_sequence()
|
||||
elif self.dim_partition_dict is None:
|
||||
assert self.sharding_sequence is not None, f'sharding_sequence should not be None, if dim_partition_dict is NoneType object.'
|
||||
assert (
|
||||
self.sharding_sequence is not None
|
||||
), f"sharding_sequence should not be None, if dim_partition_dict is NoneType object."
|
||||
self.convert_shard_sequence_to_dict()
|
||||
self._sanity_check()
|
||||
|
||||
@@ -196,7 +201,7 @@ class ShardingSpec:
|
||||
res_list = ["DistSpec:"]
|
||||
res_list.append(f"\n\tshard_sequence: " + ",".join(str(dimspec) for dimspec in self.sharding_sequence))
|
||||
res_list.append(f"\n\tdevice_mesh_shape: {self.device_mesh.shape}")
|
||||
return ' '.join(res_list)
|
||||
return " ".join(res_list)
|
||||
|
||||
def _sanity_check(self):
|
||||
# make sure all axes in logical device mesh only be used once
|
||||
@@ -207,7 +212,8 @@ class ShardingSpec:
|
||||
dim_check_list.remove(element)
|
||||
else:
|
||||
raise DuplicatedShardingDimensionError(
|
||||
f"find an invalid sharding axis {element} in dim_partition_dict in tensor dimension {dim}.")
|
||||
f"find an invalid sharding axis {element} in dim_partition_dict in tensor dimension {dim}."
|
||||
)
|
||||
|
||||
# make sure that the dimension is not out of index
|
||||
for dim in self.dim_partition_dict.keys():
|
||||
@@ -226,22 +232,22 @@ class ShardingSpec:
|
||||
|
||||
if tensor_dim_size % num_devices != 0:
|
||||
raise ShardingNotDivisibleError(
|
||||
f'The size of dimension at index {dim} is {tensor_dim_size}, it cannot be sharded over {num_devices} devices.'
|
||||
f"The size of dimension at index {dim} is {tensor_dim_size}, it cannot be sharded over {num_devices} devices."
|
||||
)
|
||||
|
||||
def convert_dict_to_shard_sequence(self):
|
||||
'''
|
||||
"""
|
||||
Convert dim_partition_dict into list of _DimSpec, and assign it to sharding_sequence.
|
||||
'''
|
||||
"""
|
||||
sharding_sequence = [_DimSpec([])] * len(self.entire_shape)
|
||||
for dim, shard_list in self.dim_partition_dict.items():
|
||||
sharding_sequence[dim] = _DimSpec(shard_list)
|
||||
self.sharding_sequence = sharding_sequence
|
||||
|
||||
def convert_shard_sequence_to_dict(self):
|
||||
'''
|
||||
"""
|
||||
Convert sharding_sequence into dim_partition_dict.
|
||||
'''
|
||||
"""
|
||||
new_dim_partition_dict = {}
|
||||
for index, dim_spec in enumerate(self.sharding_sequence):
|
||||
if not dim_spec.is_replica:
|
||||
@@ -251,7 +257,7 @@ class ShardingSpec:
|
||||
self.dim_partition_dict = new_dim_partition_dict
|
||||
|
||||
def sharding_sequence_difference(self, other):
|
||||
'''
|
||||
"""
|
||||
This function is a naive version of difference computation. It just simply accumulates difference every dimension between the
|
||||
pair of sharding sequence.
|
||||
|
||||
@@ -276,21 +282,22 @@ class ShardingSpec:
|
||||
|
||||
Return:
|
||||
difference(int): Difference between two ShardingSpec.
|
||||
'''
|
||||
"""
|
||||
assert len(self.sharding_sequence) == len(
|
||||
other.sharding_sequence), f'Cannot compare difference for two sharding specs with different length.'
|
||||
other.sharding_sequence
|
||||
), f"Cannot compare difference for two sharding specs with different length."
|
||||
difference = 0
|
||||
for orig_dim_spec, other_dim_spec in zip(self.sharding_sequence, other.sharding_sequence):
|
||||
difference += orig_dim_spec.difference(other_dim_spec)
|
||||
return difference
|
||||
|
||||
def get_sharded_shape_per_device(self):
|
||||
|
||||
sharded_shape = list(self.entire_shape)
|
||||
for dim, shard_list in self.dim_partition_dict.items():
|
||||
mesh_list = [self.device_mesh.shape[mesh_dim] for mesh_dim in shard_list]
|
||||
shard_partitions = reduce(operator.mul, mesh_list, 1)
|
||||
assert sharded_shape[
|
||||
dim] % shard_partitions == 0, f'Cannot shard dimension {dim} into {shard_partitions} partitions.'
|
||||
assert (
|
||||
sharded_shape[dim] % shard_partitions == 0
|
||||
), f"Cannot shard dimension {dim} into {shard_partitions} partitions."
|
||||
sharded_shape[dim] //= shard_partitions
|
||||
return torch.Size(sharded_shape)
|
||||
|
Reference in New Issue
Block a user