mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-02 17:46:42 +00:00
[gemini] async grad chunk reduce (all-reduce&reduce-scatter) (#5713)
* [gemini] async grad chunk reduce (all-reduce&reduce-scatter) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [gemini] add test * [gemini] rename func * [gemini] update llama benchmark * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [gemini] use tensor counter * [gemini] change default config in GeminiPlugin and GeminiDDP * [chore] typo * [gemini] fix sync issue & add test cases * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -34,7 +34,8 @@ def check_equal(param, param_cp):
|
||||
@parameterize("init_device", [None, torch.device("cpu")])
|
||||
@parameterize("keep_gathered", [True, False])
|
||||
@parameterize("pin_memory", [True, False])
|
||||
def exam_chunk_basic(init_device, keep_gathered, pin_memory):
|
||||
@parameterize("async_op", [True, False])
|
||||
def exam_chunk_basic(init_device, keep_gathered, pin_memory, async_op):
|
||||
world_size = torch.distributed.get_world_size()
|
||||
pg = _get_default_group()
|
||||
my_chunk = Chunk(
|
||||
@@ -94,9 +95,12 @@ def exam_chunk_basic(init_device, keep_gathered, pin_memory):
|
||||
|
||||
assert my_chunk.tensor_state_cnter[TensorState.READY_FOR_REDUCE] == 4
|
||||
assert my_chunk.can_reduce
|
||||
my_chunk.reduce()
|
||||
my_chunk.reduce(async_op)
|
||||
assert my_chunk.tensor_state_cnter[TensorState.HOLD] == 4
|
||||
|
||||
if async_op:
|
||||
my_chunk.wait_async_reduce()
|
||||
|
||||
if keep_gathered is False:
|
||||
assert my_chunk.cuda_shard.size(0) == 1024 // world_size
|
||||
assert my_chunk.device_type == "cuda"
|
||||
|
Reference in New Issue
Block a user