mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-17 15:11:20 +00:00
[misc] Accelerate CI for zero and dist optim (#5758)
* remove fp16 from lamb * remove d2h copy in checking states --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu>
This commit is contained in:
@@ -72,7 +72,7 @@ def exam_state_dict_with_origin(placement_config, model_name, use_safetensors: b
|
||||
dist.barrier()
|
||||
|
||||
new_bert_model = BertForSequenceClassification.from_pretrained(pretrained_path)
|
||||
check_state_dict_equal(bert_model.state_dict(only_rank_0=False), new_bert_model.state_dict(), False)
|
||||
check_state_dict_equal(bert_model.state_dict(only_rank_0=False), new_bert_model.state_dict())
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
@@ -130,13 +130,11 @@ def exam_state_dict(placement_config, shard: bool, model_name: str, size_per_sha
|
||||
|
||||
booster.load_model(new_model, model_ckpt_path)
|
||||
check_state_dict_equal(
|
||||
model.state_dict(only_rank_0=False), new_model.state_dict(only_rank_0=False), False, ignore_dtype=True
|
||||
model.state_dict(only_rank_0=False), new_model.state_dict(only_rank_0=False), ignore_dtype=True
|
||||
)
|
||||
|
||||
booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(
|
||||
optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(only_rank_0=False), False
|
||||
)
|
||||
check_state_dict_equal(optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(only_rank_0=False))
|
||||
for group in new_optimizer.param_groups:
|
||||
assert group["lr"] == 0.1
|
||||
|
||||
@@ -169,7 +167,7 @@ def exam_lazy_from_pretrained():
|
||||
booster.save_model(model, save_path, shard=False)
|
||||
dist.barrier()
|
||||
state_dict = torch.load(save_path, map_location="cpu")
|
||||
check_state_dict_equal(state_dict, orig_state_dict, False, ignore_dtype=True)
|
||||
check_state_dict_equal(state_dict, orig_state_dict, ignore_dtype=True)
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
|
@@ -62,12 +62,12 @@ def exam_torch_load_from_gemini(shard: bool, model_name: str):
|
||||
check_state_dict_equal(
|
||||
model.state_dict(only_rank_0=False, prefix="module.module."),
|
||||
new_model.state_dict(),
|
||||
False,
|
||||
ignore_device=False,
|
||||
ignore_dtype=True,
|
||||
)
|
||||
|
||||
new_booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(), False)
|
||||
check_state_dict_equal(optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(), ignore_device=False)
|
||||
|
||||
# Check the new model/optimizer can successfully run.
|
||||
data = data_gen_fn()
|
||||
@@ -128,7 +128,7 @@ def exam_gemini_load_from_torch(shard: bool, model_name: str):
|
||||
check_state_dict_equal(
|
||||
new_model.state_dict(only_rank_0=False, prefix="module.module."),
|
||||
model.state_dict(),
|
||||
False,
|
||||
ignore_device=False,
|
||||
ignore_dtype=True,
|
||||
)
|
||||
|
||||
@@ -145,7 +145,7 @@ def exam_gemini_load_from_torch(shard: bool, model_name: str):
|
||||
k in old_group and k in new_group
|
||||
), f"Old group's keys: {list(old_group.keys())}, New group's keys: {list(new_group.keys())}"
|
||||
assert old_group[k] == new_group[k]
|
||||
check_state_dict_equal(old_state_dict["state"], new_state_dict["state"], False)
|
||||
check_state_dict_equal(old_state_dict["state"], new_state_dict["state"], ignore_device=False)
|
||||
|
||||
# Check the new model/optimizer can successfully run.
|
||||
data = data_gen_fn()
|
||||
|
@@ -94,9 +94,9 @@ def exam_state_dict(shard: bool, model_name: str, size_per_shard: int, test_conf
|
||||
new_model, new_optimizer, criterion, _, _ = booster.boost(new_model, new_optimizer, criterion)
|
||||
|
||||
booster.load_model(new_model, model_ckpt_path)
|
||||
check_state_dict_equal(model.unwrap().state_dict(), new_model.unwrap().state_dict(), False)
|
||||
check_state_dict_equal(model.unwrap().state_dict(), new_model.unwrap().state_dict())
|
||||
booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.unwrap().state_dict(), new_optimizer.unwrap().state_dict(), False)
|
||||
check_state_dict_equal(optimizer.unwrap().state_dict(), new_optimizer.unwrap().state_dict())
|
||||
dist.barrier()
|
||||
|
||||
# Check whether the loaded model & optimizer works smoothly.
|
||||
|
@@ -55,7 +55,7 @@ def check_low_level_zero_checkpointIO(stage: int, shard: bool, offload: bool):
|
||||
new_model, new_optimizer, _, _, _ = booster.boost(new_model, new_optimizer)
|
||||
|
||||
booster.load_model(new_model, model_ckpt_path)
|
||||
check_state_dict_equal(model.state_dict(), new_model.state_dict(), False)
|
||||
check_state_dict_equal(model.state_dict(), new_model.state_dict())
|
||||
# check master weight
|
||||
assert isinstance(new_optimizer, LowLevelZeroOptimizer)
|
||||
working_param_id_set = set(id(p) for p in new_model.parameters())
|
||||
@@ -70,7 +70,7 @@ def check_low_level_zero_checkpointIO(stage: int, shard: bool, offload: bool):
|
||||
)
|
||||
|
||||
booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.optim.state_dict(), new_optimizer.optim.state_dict(), False)
|
||||
check_state_dict_equal(optimizer.optim.state_dict(), new_optimizer.optim.state_dict())
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ def run_fn(stage, shard, offload, model_fn, data_gen_fn, output_transform_fn, lo
|
||||
booster.save_optimizer(optimizer, optimizer_ckpt_path, shard=False)
|
||||
new_model = new_booster.enable_lora(new_model, pretrained_dir=model_ckpt_path, lora_config=lora_config)
|
||||
new_model, new_optimizer, criterion, _, _ = new_booster.boost(new_model, new_optimizer, criterion)
|
||||
check_state_dict_equal(model.state_dict(), new_model.state_dict(), False)
|
||||
check_state_dict_equal(model.state_dict(), new_model.state_dict())
|
||||
|
||||
# check master weight
|
||||
assert isinstance(new_optimizer, LowLevelZeroOptimizer)
|
||||
@@ -126,7 +126,7 @@ def run_fn(stage, shard, offload, model_fn, data_gen_fn, output_transform_fn, lo
|
||||
)
|
||||
|
||||
new_booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.optim.state_dict(), new_optimizer.optim.state_dict(), False)
|
||||
check_state_dict_equal(optimizer.optim.state_dict(), new_optimizer.optim.state_dict())
|
||||
|
||||
except Exception as e:
|
||||
# return repr(e)
|
||||
|
@@ -61,9 +61,9 @@ def exam_from_pretrained(plugin_type: str, model_name: str, shard=True, size_per
|
||||
new_model, new_optimizer, criterion, _, _ = booster.boost(new_model, new_optimizer, criterion)
|
||||
|
||||
if plugin_type == "gemini":
|
||||
check_state_dict_equal(model.state_dict(only_rank_0=False), new_model.state_dict(only_rank_0=False), False)
|
||||
check_state_dict_equal(model.state_dict(only_rank_0=False), new_model.state_dict(only_rank_0=False))
|
||||
else:
|
||||
check_state_dict_equal(model.unwrap().state_dict(), new_model.unwrap().state_dict(), False)
|
||||
check_state_dict_equal(model.unwrap().state_dict(), new_model.unwrap().state_dict())
|
||||
dist.barrier()
|
||||
|
||||
|
||||
|
@@ -52,12 +52,12 @@ def check_torch_ddp_checkpointIO(shard: bool, size_per_shard: int):
|
||||
)
|
||||
|
||||
booster.load_model(new_model, model_ckpt_path)
|
||||
check_state_dict_equal(model.state_dict(), new_model.state_dict(), False)
|
||||
check_state_dict_equal(model.state_dict(), new_model.state_dict())
|
||||
|
||||
booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict(), False)
|
||||
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict())
|
||||
booster.load_lr_scheduler(new_scheduler, lr_scheduler_ckpt_path)
|
||||
check_state_dict_equal(scheduler.state_dict(), new_scheduler.state_dict(), False)
|
||||
check_state_dict_equal(scheduler.state_dict(), new_scheduler.state_dict())
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
|
Reference in New Issue
Block a user