mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-25 19:55:03 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -6,7 +6,7 @@ from torch.distributed import ProcessGroup
|
||||
|
||||
from colossalai.pipeline.stage_manager import PipelineStageManager
|
||||
|
||||
__all__ = ['ShardConfig']
|
||||
__all__ = ["ShardConfig"]
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -45,7 +45,8 @@ class ShardConfig:
|
||||
def __post_init__(self):
|
||||
if not self.enable_tensor_parallelism and self.enable_sequence_parallelism:
|
||||
raise ValueError(
|
||||
"enable_sequence_parallelism can only be set to True when enable_tensor_parallelism is True")
|
||||
"enable_sequence_parallelism can only be set to True when enable_tensor_parallelism is True"
|
||||
)
|
||||
if not self.enable_sequence_parallelism and self.enable_sequence_overlap:
|
||||
raise ValueError("enable_sequence_overlap can only be set to True when enable_sequence_parallelism is True")
|
||||
if not self.enable_tensor_parallelism:
|
||||
|
Reference in New Issue
Block a user