mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-06 19:40:28 +00:00
[chore] remove unnecessary test & changes
This commit is contained in:
@@ -40,7 +40,7 @@ def check_grad(model: GeminiDDP, torch_model: torch.nn.Module):
|
||||
@parameterize("model_name", ["transformers_gpt_lm"])
|
||||
@parameterize("use_grad_checkpoint", [False, True])
|
||||
@parameterize("master_weights", [False, True])
|
||||
@parameterize("max_prefetch", [0, 1, 4])
|
||||
@parameterize("max_prefetch", [0, 4])
|
||||
@parameterize("enable_async_reduce", [False, True])
|
||||
def exam_gpt_fwd_bwd(
|
||||
placement_config,
|
||||
|
@@ -50,7 +50,7 @@ def check_grad(model: GeminiDDP, torch_model: torch.nn.Module):
|
||||
@parameterize("model_name", ["transformers_gpt_lm"])
|
||||
@parameterize("master_weights", [False, True])
|
||||
@parameterize("use_grad_checkpoint", [False, True])
|
||||
@parameterize("max_prefetch", [0, 1, 4])
|
||||
@parameterize("max_prefetch", [0, 4])
|
||||
@parameterize("enable_async_reduce", [False, True])
|
||||
def exam_gemini_grad_acc(
|
||||
placement_config,
|
||||
|
@@ -40,7 +40,9 @@ EXAMPLE_MODELS = [
|
||||
]
|
||||
|
||||
# bfloat16 cannot represent them exactly
|
||||
BF16_IGNORED_KEYS = ["masked_bias"]
|
||||
BF16_IGNORED_KEYS = [
|
||||
"masked_bias",
|
||||
]
|
||||
|
||||
|
||||
def check_param(model: GeminiDDP, torch_model: torch.nn.Module, dtype: torch.dtype):
|
||||
@@ -71,15 +73,9 @@ def check_param(model: GeminiDDP, torch_model: torch.nn.Module, dtype: torch.dty
|
||||
@parameterize("model_name", TEST_MODELS)
|
||||
@parameterize("mixed_precision", [torch.half, torch.bfloat16])
|
||||
@parameterize("master_weights", [True, False])
|
||||
@parameterize("max_prefetch", [0, 1, 4])
|
||||
@parameterize("enable_async_reduce", [False, True])
|
||||
def exam_model_step(
|
||||
placement_config,
|
||||
model_name: str,
|
||||
mixed_precision: torch.dtype,
|
||||
master_weights: bool,
|
||||
max_prefetch: int,
|
||||
enable_async_reduce=True,
|
||||
placement_config, model_name: str, mixed_precision: torch.dtype, master_weights: bool, enable_async_reduce=True
|
||||
):
|
||||
set_seed(42)
|
||||
model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
|
||||
@@ -108,7 +104,6 @@ def exam_model_step(
|
||||
**placement_config,
|
||||
mixed_precision=mixed_precision,
|
||||
master_weights=master_weights,
|
||||
max_prefetch=max_prefetch,
|
||||
enable_async_reduce=enable_async_reduce,
|
||||
)
|
||||
|
||||
|
@@ -28,8 +28,7 @@ def ignore_the_first_parameter(model: torch.nn.Module):
|
||||
@parameterize("keep_gathered", [True, False])
|
||||
@parameterize("model_name", ["transformers_gpt_lm", "transformers_bert_for_sequence_classification"])
|
||||
@parameterize("master_weights", [False, True])
|
||||
@parameterize("max_prefetch", [0, 1, 4])
|
||||
def exam_state_dict(placement_config, keep_gathered, model_name: str, master_weights: bool, max_prefetch: int):
|
||||
def exam_state_dict(placement_config, keep_gathered, model_name: str, master_weights: bool):
|
||||
set_seed(431)
|
||||
model_builder, data_gen_fn, output_transform_fn, *_ = next(iter(model_zoo.get_sub_registry(model_name).values()))
|
||||
|
||||
@@ -45,14 +44,7 @@ def exam_state_dict(placement_config, keep_gathered, model_name: str, master_wei
|
||||
config_dict, *_ = search_chunk_configuration(model, search_range_m=1, search_interval=100)
|
||||
config_dict[world_size]["chunk_size"] = 5000
|
||||
config_dict[world_size]["keep_gathered"] = keep_gathered
|
||||
model = GeminiDDP(
|
||||
model,
|
||||
config_dict,
|
||||
**placement_config,
|
||||
pin_memory=True,
|
||||
master_weights=master_weights,
|
||||
max_prefetch=max_prefetch,
|
||||
)
|
||||
model = GeminiDDP(model, config_dict, **placement_config, pin_memory=True, master_weights=master_weights)
|
||||
model.train()
|
||||
|
||||
zero_dict = model.state_dict(only_rank_0=False)
|
||||
|
@@ -20,8 +20,7 @@ PLACEMENT_CONFIGS = [
|
||||
|
||||
@parameterize("placement_config", PLACEMENT_CONFIGS)
|
||||
@parameterize("keep_gathered", [True, False])
|
||||
@parameterize("max_prefetch", [0, 1, 4])
|
||||
def exam_zero_optim_state_dict(placement_config, keep_gathered, max_prefetch):
|
||||
def exam_zero_optim_state_dict(placement_config, keep_gathered):
|
||||
set_seed(431)
|
||||
model_builder, data_gen_fn, output_transform_fn, *_ = next(
|
||||
iter(model_zoo.get_sub_registry("transformers_gpt_lm").values())
|
||||
@@ -36,7 +35,7 @@ def exam_zero_optim_state_dict(placement_config, keep_gathered, max_prefetch):
|
||||
config_dict[world_size]["chunk_size"] = 5000
|
||||
config_dict[world_size]["keep_gathered"] = keep_gathered
|
||||
|
||||
model = GeminiDDP(model, config_dict, **placement_config, pin_memory=True, max_prefetch=max_prefetch)
|
||||
model = GeminiDDP(model, config_dict, **placement_config, pin_memory=True)
|
||||
|
||||
optimizer = HybridAdam(model.parameters())
|
||||
optim = GeminiOptimizer(optimizer, model, initial_scale=32) # initialize the link between chunk16 and chunk32
|
||||
|
Reference in New Issue
Block a user