mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 03:52:01 +00:00
[gemini] async grad chunk reduce (all-reduce&reduce-scatter) (#5713)
* [gemini] async grad chunk reduce (all-reduce&reduce-scatter) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [gemini] add test * [gemini] rename func * [gemini] update llama benchmark * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [gemini] use tensor counter * [gemini] change default config in GeminiPlugin and GeminiDDP * [chore] typo * [gemini] fix sync issue & add test cases * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -34,7 +34,8 @@ def check_equal(param, param_cp):
|
||||
@parameterize("init_device", [None, torch.device("cpu")])
|
||||
@parameterize("keep_gathered", [True, False])
|
||||
@parameterize("pin_memory", [True, False])
|
||||
def exam_chunk_basic(init_device, keep_gathered, pin_memory):
|
||||
@parameterize("async_op", [True, False])
|
||||
def exam_chunk_basic(init_device, keep_gathered, pin_memory, async_op):
|
||||
world_size = torch.distributed.get_world_size()
|
||||
pg = _get_default_group()
|
||||
my_chunk = Chunk(
|
||||
@@ -94,9 +95,12 @@ def exam_chunk_basic(init_device, keep_gathered, pin_memory):
|
||||
|
||||
assert my_chunk.tensor_state_cnter[TensorState.READY_FOR_REDUCE] == 4
|
||||
assert my_chunk.can_reduce
|
||||
my_chunk.reduce()
|
||||
my_chunk.reduce(async_op)
|
||||
assert my_chunk.tensor_state_cnter[TensorState.HOLD] == 4
|
||||
|
||||
if async_op:
|
||||
my_chunk.wait_async_reduce()
|
||||
|
||||
if keep_gathered is False:
|
||||
assert my_chunk.cuda_shard.size(0) == 1024 // world_size
|
||||
assert my_chunk.device_type == "cuda"
|
||||
|
@@ -40,12 +40,14 @@ def check_grad(model: GeminiDDP, torch_model: torch.nn.Module):
|
||||
@parameterize("model_name", ["transformers_gpt_lm"])
|
||||
@parameterize("use_grad_checkpoint", [False, True])
|
||||
@parameterize("master_weights", [False, True])
|
||||
@parameterize("enable_async_reduce", [False, True])
|
||||
def exam_gpt_fwd_bwd(
|
||||
placement_config,
|
||||
keep_gather,
|
||||
model_name: str,
|
||||
use_grad_checkpoint: bool = False,
|
||||
master_weights: bool = True,
|
||||
enable_async_reduce=True,
|
||||
):
|
||||
init_device = get_accelerator().get_current_device()
|
||||
model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
|
||||
@@ -69,7 +71,13 @@ def exam_gpt_fwd_bwd(
|
||||
config_dict[world_size]["chunk_size"] = 5000
|
||||
config_dict[world_size]["keep_gathered"] = keep_gather
|
||||
model = GeminiDDP(
|
||||
model, config_dict, init_device, pin_memory=True, **placement_config, master_weights=master_weights
|
||||
model,
|
||||
config_dict,
|
||||
init_device,
|
||||
pin_memory=True,
|
||||
**placement_config,
|
||||
master_weights=master_weights,
|
||||
enable_async_reduce=enable_async_reduce,
|
||||
)
|
||||
optimizer = HybridAdam(model.parameters(), lr=1e-3)
|
||||
zero_optim = GeminiOptimizer(optimizer, model, initial_scale=1)
|
||||
|
@@ -50,8 +50,14 @@ def check_grad(model: GeminiDDP, torch_model: torch.nn.Module):
|
||||
@parameterize("model_name", ["transformers_gpt_lm"])
|
||||
@parameterize("master_weights", [False, True])
|
||||
@parameterize("use_grad_checkpoint", [False, True])
|
||||
@parameterize("enable_async_reduce", [False, True])
|
||||
def exam_gemini_grad_acc(
|
||||
placement_config, keep_gathered: bool, model_name: str, master_weights: bool, use_grad_checkpoint: bool
|
||||
placement_config,
|
||||
keep_gathered: bool,
|
||||
model_name: str,
|
||||
master_weights: bool,
|
||||
use_grad_checkpoint: bool,
|
||||
enable_async_reduce: bool,
|
||||
):
|
||||
init_device = get_accelerator().get_current_device()
|
||||
model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
|
||||
@@ -81,10 +87,13 @@ def exam_gemini_grad_acc(
|
||||
pin_memory=True,
|
||||
enable_gradient_accumulation=True,
|
||||
master_weights=master_weights,
|
||||
enable_async_reduce=enable_async_reduce,
|
||||
**placement_config,
|
||||
)
|
||||
optimizer = HybridAdam(gemini_model.parameters(), lr=1e-3)
|
||||
gemini_optim = GeminiOptimizer(optimizer, gemini_model, initial_scale=1, max_norm=1.0)
|
||||
gemini_optim = GeminiOptimizer(
|
||||
optimizer, gemini_model, initial_scale=1, max_norm=1.0, enable_async_reduce=enable_async_reduce
|
||||
)
|
||||
|
||||
rank = dist.get_rank()
|
||||
|
||||
|
@@ -52,7 +52,8 @@ def check_param(model: GeminiDDP, torch_model: torch.nn.Module):
|
||||
@parameterize("placement_config", PLACEMENT_CONFIGS)
|
||||
@parameterize("model_name", ["transformers_gpt_lm"])
|
||||
@parameterize("master_weights", [True, False])
|
||||
def exam_grad_clipping(placement_config, model_name: str, master_weights: bool):
|
||||
@parameterize("enable_async_reduce", [False, True])
|
||||
def exam_grad_clipping(placement_config, model_name: str, master_weights: bool, enable_async_reduce: bool):
|
||||
set_seed(1912)
|
||||
model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
|
||||
iter(model_zoo.get_sub_registry(model_name).values())
|
||||
@@ -84,6 +85,7 @@ def exam_grad_clipping(placement_config, model_name: str, master_weights: bool):
|
||||
chunk_init_device=init_device,
|
||||
pin_memory=True,
|
||||
master_weights=master_weights,
|
||||
enable_async_reduce=enable_async_reduce,
|
||||
**placement_config,
|
||||
)
|
||||
|
||||
|
@@ -73,7 +73,10 @@ def check_param(model: GeminiDDP, torch_model: torch.nn.Module, dtype: torch.dty
|
||||
@parameterize("model_name", TEST_MODELS)
|
||||
@parameterize("mixed_precision", [torch.half, torch.bfloat16])
|
||||
@parameterize("master_weights", [True, False])
|
||||
def exam_model_step(placement_config, model_name: str, mixed_precision: torch.dtype, master_weights: bool):
|
||||
@parameterize("enable_async_reduce", [False, True])
|
||||
def exam_model_step(
|
||||
placement_config, model_name: str, mixed_precision: torch.dtype, master_weights: bool, enable_async_reduce=True
|
||||
):
|
||||
set_seed(42)
|
||||
model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
|
||||
iter(model_zoo.get_sub_registry(model_name).values())
|
||||
@@ -96,7 +99,12 @@ def exam_model_step(placement_config, model_name: str, mixed_precision: torch.dt
|
||||
config_dict[world_size]["chunk_size"] = 5000
|
||||
config_dict[world_size]["keep_gathered"] = False
|
||||
model = GeminiDDP(
|
||||
model, config_dict, **placement_config, mixed_precision=mixed_precision, master_weights=master_weights
|
||||
model,
|
||||
config_dict,
|
||||
**placement_config,
|
||||
mixed_precision=mixed_precision,
|
||||
master_weights=master_weights,
|
||||
enable_async_reduce=enable_async_reduce,
|
||||
)
|
||||
|
||||
optimizer = HybridAdam(model.parameters(), lr=1e-3)
|
||||
|
Reference in New Issue
Block a user