mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-26 20:23:26 +00:00
[Sharderformer] Support zbv in Sharderformer Policy (#6150)
* [feat] Sharderformer support zbv * [feat] support chatglm2, command, deepseek for zbv * [feat] support zbv in shardformer policy: falcon,gptj,mistral,opt,qwen2,t5, vit, whisper * [feat] support GPT2FusedLinearConv1D * [feat] support GPT2FusedLinear (without tp) * [fix] debug FusedConvLinear * [shardfromer] support gpt2 policy for zbv, support GPT2FusedLinearConv Col and Row. * [Shardformer] support FusedLinear1D base for zbv * [shardformer] support zbv in FusedLinear1D base, Col, Row * [shardformer] support zbv in blip2 and sam policy * [shardformer] fix bug incorrect number of gradients; add fusedLinear base testcase; * [fix] fix incorrect number of gradients ; * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [Shardformer] add en doc for zbv; * [fix] fix typo in Model compatibility table * [fix] fix API Reference typo * [Shardformer] add zh-Han doc for zbv * [fix] fix Linear name; update en & zh doc * [fix] fix shardformer doc import err * [fix] fix shardconfig import in doc * [fix] fix shardformer doc * [fix] fix shardconfig doc * [fix] fix config * [fix] remove shardconfig * [fix] fix doc * [feat] add zbv doc string * [fix] rm doc * [fix] fix doc * [fix] empty zbv doc * [fix] ifx torch version * [fix] fix torch version * [fix] fix torch versions * [fix] fix torch versions * [fix] fix pyramid versions * [fix] fix pyramid, zope version * [fix] try fix workflow * [fix] try import ShardConfig in yml * [fix] fix workflow * [fix] fix workflow * [fix] fix workflow * [fix] fix workflow * [fix] fix ci * [fix] fix zbv doc * [fix] fix param for qkv linear, gpt2fused linear; fix requirments; * [fix] fix policy use fused_linear * [fix] fix weight grad none, err caused by weight ptr change * [fix] fix comm in WeightGradStore * [fix] fix WeightGradStore pop param * [fix] remove useless param in doc; fix gpt2 qkv test; * [shardformer] simplify execute_w_pass_grad_accum; * [fix] rm useless comments * [shardformer] simplify execute_w_pass_grad_accum & execute_w_pass * [shardformer] Run meaningful doc test * [shadformer] fix doc test cmd; --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -72,6 +72,8 @@ class WhisperPolicy(Policy):
|
||||
"Whisper doesn't support sequence parallelism now, will ignore the sequence parallelism flag."
|
||||
)
|
||||
|
||||
use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv
|
||||
|
||||
# TODO using the jit fused add_and_dropout affect the accuracy
|
||||
if self.shard_config.enable_jit_fused:
|
||||
self.shard_config.enable_jit_fused = False
|
||||
@@ -93,6 +95,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -100,6 +103,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -107,6 +111,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -114,6 +119,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Row,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -121,6 +127,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -128,6 +135,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Row,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
],
|
||||
@@ -148,6 +156,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -155,6 +164,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -162,6 +172,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -169,6 +180,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Row,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -176,6 +188,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -183,6 +196,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -190,6 +204,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -197,6 +212,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Row,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -204,6 +220,7 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Col,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
@@ -211,6 +228,145 @@ class WhisperPolicy(Policy):
|
||||
target_module=col_nn.Linear1D_Row,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
elif use_zbv:
|
||||
policy[WhisperEncoderLayer] = ModulePolicyDescription(
|
||||
sub_module_replacement=[
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.q_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.k_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.v_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.out_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="fc1",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="fc2",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
policy[WhisperDecoderLayer] = ModulePolicyDescription(
|
||||
sub_module_replacement=[
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.q_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.k_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.v_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="self_attn.out_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="encoder_attn.q_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="encoder_attn.k_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="encoder_attn.v_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="encoder_attn.out_proj",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="fc1",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
SubModuleReplacementDescription(
|
||||
suffix="fc2",
|
||||
target_module=col_nn.LinearWithGradAccum,
|
||||
kwargs={
|
||||
"fp8_communication": self.shard_config.fp8_communication,
|
||||
"use_zbv": use_zbv,
|
||||
},
|
||||
),
|
||||
],
|
||||
@@ -460,30 +616,66 @@ class WhisperPolicy(Policy):
|
||||
num_decoder_layers = 0
|
||||
|
||||
held_layers = []
|
||||
layers_per_stage, decoder_starting_stage = self.distribute_whisper_layers(
|
||||
num_encoder_layers, num_decoder_layers, stage_manager.num_stages
|
||||
)
|
||||
start_idx, end_idx = self.get_whisper_stage_index(layers_per_stage, stage_manager.stage, decoder_starting_stage)
|
||||
if stage_manager.is_interleave:
|
||||
layers_per_stage, decoder_starting_stage = self.distribute_whisper_layers(
|
||||
num_encoder_layers, num_decoder_layers, stage_manager.num_stages
|
||||
)
|
||||
stage_indices = self.get_whisper_stage_index(layers_per_stage, stage_manager.stage, decoder_starting_stage)
|
||||
|
||||
if stage_manager.stage < decoder_starting_stage:
|
||||
# current stage is in whisper's encoder
|
||||
if stage_manager.is_first_stage():
|
||||
held_layers.append(encoder.embed_positions)
|
||||
held_layers.append(encoder.conv1)
|
||||
held_layers.append(encoder.conv2)
|
||||
if stage_manager.stage == decoder_starting_stage - 1:
|
||||
held_layers.append(encoder.layer_norm)
|
||||
held_layers.extend(encoder.layers[start_idx:end_idx])
|
||||
if stage_manager.stage < decoder_starting_stage:
|
||||
# current stage is in whisper's encoder
|
||||
if stage_manager.is_first_stage(ignore_chunk=True):
|
||||
held_layers.append(encoder.embed_positions)
|
||||
held_layers.append(encoder.conv1)
|
||||
held_layers.append(encoder.conv2)
|
||||
# interleaved: not use_zbv & stage_manager.stage == decoder_starting_stage - 1
|
||||
# zbv: use_zbv & stage_manager.stage == first stage
|
||||
if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or (
|
||||
not stage_manager.use_zbv and decoder_starting_stage - 1
|
||||
):
|
||||
held_layers.append(encoder.layer_norm)
|
||||
for start_idx, end_idx in stage_indices:
|
||||
held_layers.extend(encoder.layers[start_idx:end_idx])
|
||||
else:
|
||||
# current stage is in whisper's decoder
|
||||
# TODO:(Jianghai) We divide encoder and decoder layers into different parts here,
|
||||
# the case encoder and decoder put in same stage should be add in the future.
|
||||
if stage_manager.stage == decoder_starting_stage:
|
||||
held_layers.append(decoder.embed_tokens)
|
||||
held_layers.append(decoder.embed_positions)
|
||||
if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or (
|
||||
not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True)
|
||||
):
|
||||
held_layers.append(decoder.layer_norm)
|
||||
for start_idx, end_idx in stage_indices:
|
||||
held_layers.extend(encoder.layers[start_idx:end_idx])
|
||||
else:
|
||||
# current stage is in whisper's decoder
|
||||
# TODO:(Jianghai) We divide encoder and decoder layers into different parts here,
|
||||
# the case encoder and decoder put in same stage should be add in the future.
|
||||
if stage_manager.stage == decoder_starting_stage:
|
||||
held_layers.append(decoder.embed_tokens)
|
||||
held_layers.append(decoder.embed_positions)
|
||||
if stage_manager.is_last_stage():
|
||||
held_layers.append(decoder.layer_norm)
|
||||
held_layers.extend(decoder.layers[start_idx:end_idx])
|
||||
layers_per_stage, decoder_starting_stage = self.distribute_whisper_layers(
|
||||
num_encoder_layers, num_decoder_layers, stage_manager.num_stages
|
||||
)
|
||||
start_idx, end_idx = self.get_whisper_stage_index(
|
||||
layers_per_stage, stage_manager.stage, decoder_starting_stage
|
||||
)
|
||||
|
||||
if stage_manager.stage < decoder_starting_stage:
|
||||
# current stage is in whisper's encoder
|
||||
if stage_manager.is_first_stage():
|
||||
held_layers.append(encoder.embed_positions)
|
||||
held_layers.append(encoder.conv1)
|
||||
held_layers.append(encoder.conv2)
|
||||
if stage_manager.stage == decoder_starting_stage - 1:
|
||||
held_layers.append(encoder.layer_norm)
|
||||
held_layers.extend(encoder.layers[start_idx:end_idx])
|
||||
else:
|
||||
# current stage is in whisper's decoder
|
||||
# TODO:(Jianghai) We divide encoder and decoder layers into different parts here,
|
||||
# the case encoder and decoder put in same stage should be add in the future.
|
||||
if stage_manager.stage == decoder_starting_stage:
|
||||
held_layers.append(decoder.embed_tokens)
|
||||
held_layers.append(decoder.embed_positions)
|
||||
if stage_manager.is_last_stage():
|
||||
held_layers.append(decoder.layer_norm)
|
||||
held_layers.extend(decoder.layers[start_idx:end_idx])
|
||||
return held_layers
|
||||
|
||||
def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None:
|
||||
@@ -575,8 +767,15 @@ class WhisperForConditionalGenerationPolicy(WhisperPolicy):
|
||||
|
||||
def get_held_layers(self) -> List[nn.Module]:
|
||||
held_layers = super().get_held_layers()
|
||||
if self.pipeline_stage_manager.is_last_stage():
|
||||
held_layers.append(self.model.proj_out)
|
||||
stage_manager = self.pipeline_stage_manager
|
||||
if stage_manager.is_interleave:
|
||||
if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or (
|
||||
not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True)
|
||||
):
|
||||
held_layers.append(self.model.proj_out)
|
||||
else:
|
||||
if self.pipeline_stage_manager.is_last_stage():
|
||||
held_layers.append(self.model.proj_out)
|
||||
return held_layers
|
||||
|
||||
def get_shared_params(self) -> List[Dict[int, Tensor]]:
|
||||
@@ -629,9 +828,17 @@ class WhisperForAudioClassificationPolicy(WhisperPolicy):
|
||||
|
||||
def get_held_layers(self) -> List[nn.Module]:
|
||||
held_layers = super().get_held_layers()
|
||||
if self.pipeline_stage_manager.is_last_stage():
|
||||
held_layers.append(self.model.projector)
|
||||
held_layers.append(self.model.classifier)
|
||||
stage_manager = self.pipeline_stage_manager
|
||||
if stage_manager.is_interleave:
|
||||
if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or (
|
||||
not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True)
|
||||
):
|
||||
held_layers.append(self.model.projector)
|
||||
held_layers.append(self.model.classifier)
|
||||
else:
|
||||
if self.pipeline_stage_manager.is_last_stage():
|
||||
held_layers.append(self.model.projector)
|
||||
held_layers.append(self.model.classifier)
|
||||
return held_layers
|
||||
|
||||
def get_shared_params(self) -> List[Dict[int, Tensor]]:
|
||||
|
Reference in New Issue
Block a user