mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-04 10:34:41 +00:00
[Feature] Zigzag Ring attention (#5905)
* halfway * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * unified cross entropy func for all shardformer models * remove redundant lines * add basic ring attn; debug cross entropy * fwd bwd logic complete * fwd bwd logic complete; add experimental triton rescale * precision tests passed * precision tests passed * fix typos and remove misc files * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add sp_mode to benchmark; fix varlen interface * update softmax_lse shape by new interface * change tester name * remove buffer clone; support packed seq layout * add varlen tests * fix typo * all tests passed * add dkv_group; fix mask * remove debug statements --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -203,7 +203,6 @@ class HybridParallelCheckpointIO(GeneralCheckpointIO):
|
||||
return
|
||||
|
||||
Path(checkpoint).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Devices along the same dp_group share the same copies of model.
|
||||
# So only let the device with dp_rank == 0 save the model.
|
||||
if self.dp_rank != 0:
|
||||
@@ -643,14 +642,12 @@ class HybridParallelCheckpointIO(GeneralCheckpointIO):
|
||||
assert isinstance(model, ModelWrapper), "Please boost the model before saving!"
|
||||
model._force_wait_all_gather()
|
||||
model = model.unwrap()
|
||||
|
||||
if self.dp_rank != 0:
|
||||
return
|
||||
|
||||
# The logic of collecting parameter shards along tp degree
|
||||
# has been implemented by _save_to_state_dict method of ParallelModule in Shardformer.
|
||||
state_dict = model.state_dict()
|
||||
|
||||
if self.pp_size == 1:
|
||||
# When pipeline is not used, let master rank directly save the collected state_dict.
|
||||
if self.tp_rank == 0:
|
||||
@@ -660,7 +657,6 @@ class HybridParallelCheckpointIO(GeneralCheckpointIO):
|
||||
state_dict_list = [None for _ in range(self.pp_size)]
|
||||
dist.barrier(self.pp_group)
|
||||
dist.all_gather_object(state_dict_list, state_dict, self.pp_group)
|
||||
|
||||
# Only the master rank do the saving.
|
||||
if self.coordinator.is_master():
|
||||
complete_state_dict = dict()
|
||||
|
Reference in New Issue
Block a user