[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -16,11 +16,10 @@ from ..modeling.vit import (
)
from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription
__all__ = ['ViTPolicy', 'ViTModelPolicy', 'ViTForImageClassificationPolicy', 'ViTForMaskedImageModelingPolicy']
__all__ = ["ViTPolicy", "ViTModelPolicy", "ViTForImageClassificationPolicy", "ViTForMaskedImageModelingPolicy"]
class ViTPolicy(Policy):
def config_sanity_check(self):
pass
@@ -28,8 +27,7 @@ class ViTPolicy(Policy):
return self.model
def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]:
from transformers.models.vit.modeling_vit import ViTEmbeddings, ViTLayer, ViTModel, ViTOutput, ViTSelfAttention
from transformers.models.vit.modeling_vit import ViTEmbeddings, ViTLayer, ViTOutput, ViTSelfAttention
policy = {}
@@ -38,77 +36,85 @@ class ViTPolicy(Policy):
warnings.warn("Vit dosen't support sequence parallelism now, will ignore the sequence parallelism flag.")
if self.shard_config.enable_tensor_parallelism:
policy[ViTEmbeddings] = ModulePolicyDescription(attribute_replacement={},
param_replacement=[],
sub_module_replacement=[
SubModuleReplacementDescription(
suffix="dropout",
target_module=DropoutForReplicatedInput,
)
])
policy[ViTEmbeddings] = ModulePolicyDescription(
attribute_replacement={},
param_replacement=[],
sub_module_replacement=[
SubModuleReplacementDescription(
suffix="dropout",
target_module=DropoutForReplicatedInput,
)
],
)
policy[ViTLayer] = ModulePolicyDescription(attribute_replacement={
"attention.attention.num_attention_heads":
self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,
"attention.attention.all_head_size":
self.model.config.hidden_size // self.shard_config.tensor_parallel_size,
},
param_replacement=[],
sub_module_replacement=[
SubModuleReplacementDescription(
suffix="attention.attention.query",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="attention.attention.key",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="attention.attention.value",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="attention.attention.dropout",
target_module=col_nn.DropoutForParallelInput,
),
SubModuleReplacementDescription(
suffix="attention.output.dense",
target_module=col_nn.Linear1D_Row,
),
SubModuleReplacementDescription(
suffix="attention.output.dropout",
target_module=col_nn.DropoutForReplicatedInput,
),
SubModuleReplacementDescription(
suffix="intermediate.dense",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="output.dense",
target_module=col_nn.Linear1D_Row,
),
SubModuleReplacementDescription(
suffix="output.dropout",
target_module=col_nn.DropoutForReplicatedInput,
),
])
policy[ViTLayer] = ModulePolicyDescription(
attribute_replacement={
"attention.attention.num_attention_heads": self.model.config.num_attention_heads
// self.shard_config.tensor_parallel_size,
"attention.attention.all_head_size": self.model.config.hidden_size
// self.shard_config.tensor_parallel_size,
},
param_replacement=[],
sub_module_replacement=[
SubModuleReplacementDescription(
suffix="attention.attention.query",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="attention.attention.key",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="attention.attention.value",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="attention.attention.dropout",
target_module=col_nn.DropoutForParallelInput,
),
SubModuleReplacementDescription(
suffix="attention.output.dense",
target_module=col_nn.Linear1D_Row,
),
SubModuleReplacementDescription(
suffix="attention.output.dropout",
target_module=col_nn.DropoutForReplicatedInput,
),
SubModuleReplacementDescription(
suffix="intermediate.dense",
target_module=col_nn.Linear1D_Col,
),
SubModuleReplacementDescription(
suffix="output.dense",
target_module=col_nn.Linear1D_Row,
),
SubModuleReplacementDescription(
suffix="output.dropout",
target_module=col_nn.DropoutForReplicatedInput,
),
],
)
# use flash attention
if self.shard_config.enable_flash_attention:
self.append_or_create_method_replacement(description={
'forward': get_vit_flash_self_attention_forward(),
},
policy=policy,
target_key=ViTSelfAttention)
self.append_or_create_method_replacement(
description={
"forward": get_vit_flash_self_attention_forward(),
},
policy=policy,
target_key=ViTSelfAttention,
)
# use jit fused operator
if self.shard_config.enable_jit_fused:
self.append_or_create_method_replacement(description={
'forward': get_jit_fused_vit_output_forward(),
'dropout_add': get_jit_fused_dropout_add_func(),
},
policy=policy,
target_key=ViTOutput)
self.append_or_create_method_replacement(
description={
"forward": get_jit_fused_vit_output_forward(),
"dropout_add": get_jit_fused_dropout_add_func(),
},
policy=policy,
target_key=ViTOutput,
)
return policy
def new_model_class(self):
@@ -121,7 +127,7 @@ class ViTPolicy(Policy):
"""Get pipeline layers for current stage."""
assert self.pipeline_stage_manager is not None, "pipeline_stage_manager is None"
if self.model.__class__.__name__ == 'ViTModel':
if self.model.__class__.__name__ == "ViTModel":
module = self.model
else:
module = self.model.vit
@@ -138,22 +144,21 @@ class ViTPolicy(Policy):
def set_pipeline_forward(self, model_cls: nn.Module, pipeline_forward: Callable, policy: Dict):
if self.pipeline_stage_manager:
stage_manager = self.pipeline_stage_manager
if self.model.__class__.__name__ == 'ViTModel':
if self.model.__class__.__name__ == "ViTModel":
module = self.model
else:
module = self.model.vit
layers_per_stage = Policy.distribute_layers(len(module.encoder.layer), stage_manager.num_stages)
stage_index = Policy.get_stage_index(layers_per_stage, stage_manager.stage)
method_replacement = {'forward': pipeline_forward(stage_manager=stage_manager, stage_index=stage_index)}
self.append_or_create_method_replacement(description=method_replacement,
policy=policy,
target_key=model_cls)
method_replacement = {"forward": pipeline_forward(stage_manager=stage_manager, stage_index=stage_index)}
self.append_or_create_method_replacement(
description=method_replacement, policy=policy, target_key=model_cls
)
# ViTModel
class ViTModelPolicy(ViTPolicy):
def __init__(self) -> None:
super().__init__()
@@ -181,26 +186,29 @@ class ViTModelPolicy(ViTPolicy):
# ViTForImageClassification
class ViTForImageClassificationPolicy(ViTPolicy):
def module_policy(self):
from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel
policy = super().module_policy()
if self.shard_config.enable_tensor_parallelism:
new_item = {
ViTForImageClassification:
ModulePolicyDescription(sub_module_replacement=[
ViTForImageClassification: ModulePolicyDescription(
sub_module_replacement=[
SubModuleReplacementDescription(
suffix="classifier", target_module=Linear1D_Col, kwargs=dict(gather_output=True))
])
suffix="classifier", target_module=Linear1D_Col, kwargs=dict(gather_output=True)
)
]
)
}
policy.update(new_item)
if self.shard_config.pipeline_stage_manager is not None:
self.set_pipeline_forward(model_cls=ViTModel, pipeline_forward=ViTModel_pipeline_forward, policy=policy)
self.set_pipeline_forward(model_cls=ViTForImageClassification,
pipeline_forward=ViTForImageClassification_pipeline_forward,
policy=policy)
self.set_pipeline_forward(
model_cls=ViTForImageClassification,
pipeline_forward=ViTForImageClassification_pipeline_forward,
policy=policy,
)
return policy
@@ -219,7 +227,6 @@ class ViTForImageClassificationPolicy(ViTPolicy):
# ViTForMaskedImageModeling
class ViTForMaskedImageModelingPolicy(ViTPolicy):
def __init__(self) -> None:
super().__init__()
@@ -230,9 +237,11 @@ class ViTForMaskedImageModelingPolicy(ViTPolicy):
if self.shard_config.pipeline_stage_manager is not None:
self.set_pipeline_forward(model_cls=ViTModel, pipeline_forward=ViTModel_pipeline_forward, policy=policy)
self.set_pipeline_forward(model_cls=ViTForMaskedImageModeling,
pipeline_forward=ViTForMaskedImageModeling_pipeline_forward,
policy=policy)
self.set_pipeline_forward(
model_cls=ViTForMaskedImageModeling,
pipeline_forward=ViTForMaskedImageModeling_pipeline_forward,
policy=policy,
)
return policy
def get_held_layers(self) -> List[nn.Module]: