mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-02 17:46:42 +00:00
[CI] fix some spelling errors (#3707)
* fix spelling error with examples/comminity/ * fix spelling error with tests/ * fix some spelling error with tests/ colossalai/ etc.
This commit is contained in:
@@ -217,7 +217,7 @@ def recv_backward(output_grad_shape,
|
||||
next_rank (int, optional): The rank of the source of the tensor.
|
||||
|
||||
Returns:
|
||||
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradident tensor list.
|
||||
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradient tensor list.
|
||||
"""
|
||||
if gpc.is_pipeline_last_stage():
|
||||
output_tensor_grad = None
|
||||
|
@@ -19,7 +19,7 @@ _unpickler = pickle.Unpickler
|
||||
|
||||
|
||||
def init_process_group():
|
||||
"""intialise process group by dist.new_group in the adjacent stages
|
||||
"""initialise process group by dist.new_group in the adjacent stages
|
||||
|
||||
Args:
|
||||
None
|
||||
|
@@ -91,11 +91,11 @@ class Initializer_Sequence(ProcessGroupInitializer):
|
||||
|
||||
parallel_setting = []
|
||||
|
||||
local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode = \
|
||||
local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode = \
|
||||
self._sequence_initializer.init_dist_group()
|
||||
# change mode to sequence
|
||||
mode = ParallelMode.SEQUENCE
|
||||
|
||||
parallel_setting.append((local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode))
|
||||
parallel_setting.append((local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode))
|
||||
parallel_setting.append(self._sequence_dp_initializer.init_dist_group())
|
||||
return parallel_setting
|
||||
|
Reference in New Issue
Block a user