mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-08-26 19:50:53 +00:00
* [feat] Sharderformer support zbv * [feat] support chatglm2, command, deepseek for zbv * [feat] support zbv in shardformer policy: falcon,gptj,mistral,opt,qwen2,t5, vit, whisper * [feat] support GPT2FusedLinearConv1D * [feat] support GPT2FusedLinear (without tp) * [fix] debug FusedConvLinear * [shardfromer] support gpt2 policy for zbv, support GPT2FusedLinearConv Col and Row. * [Shardformer] support FusedLinear1D base for zbv * [shardformer] support zbv in FusedLinear1D base, Col, Row * [shardformer] support zbv in blip2 and sam policy * [shardformer] fix bug incorrect number of gradients; add fusedLinear base testcase; * [fix] fix incorrect number of gradients ; * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [Shardformer] add en doc for zbv; * [fix] fix typo in Model compatibility table * [fix] fix API Reference typo * [Shardformer] add zh-Han doc for zbv * [fix] fix Linear name; update en & zh doc * [fix] fix shardformer doc import err * [fix] fix shardconfig import in doc * [fix] fix shardformer doc * [fix] fix shardconfig doc * [fix] fix config * [fix] remove shardconfig * [fix] fix doc * [feat] add zbv doc string * [fix] rm doc * [fix] fix doc * [fix] empty zbv doc * [fix] ifx torch version * [fix] fix torch version * [fix] fix torch versions * [fix] fix torch versions * [fix] fix pyramid versions * [fix] fix pyramid, zope version * [fix] try fix workflow * [fix] try import ShardConfig in yml * [fix] fix workflow * [fix] fix workflow * [fix] fix workflow * [fix] fix workflow * [fix] fix ci * [fix] fix zbv doc * [fix] fix param for qkv linear, gpt2fused linear; fix requirments; * [fix] fix policy use fused_linear * [fix] fix weight grad none, err caused by weight ptr change * [fix] fix comm in WeightGradStore * [fix] fix WeightGradStore pop param * [fix] remove useless param in doc; fix gpt2 qkv test; * [shardformer] simplify execute_w_pass_grad_accum; * [fix] rm useless comments * [shardformer] simplify execute_w_pass_grad_accum & execute_w_pass * [shardformer] Run meaningful doc test * [shadformer] fix doc test cmd; --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
49 lines
1.4 KiB
Python
49 lines
1.4 KiB
Python
from ._operation import all_to_all_comm
|
|
from .attn import AttnMaskType, ColoAttention, RingAttention, get_pad_info
|
|
from .dropout import DropoutForParallelInput, DropoutForReplicatedInput
|
|
from .embedding import Embedding1D, PaddingEmbedding, VocabParallelEmbedding1D
|
|
from .linear import Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingLMHead, VocabParallelLMHead1D
|
|
from .loss import cross_entropy_1d, dist_cross_entropy
|
|
from .normalization import FusedLayerNorm, FusedRMSNorm, LayerNorm, RMSNorm
|
|
from .parallel_module import ParallelModule
|
|
from .qkv_fused_linear import (
|
|
FusedLinear,
|
|
FusedLinear1D_Col,
|
|
FusedLinear1D_Row,
|
|
GPT2FusedLinearConv,
|
|
GPT2FusedLinearConv1D_Col,
|
|
GPT2FusedLinearConv1D_Row,
|
|
)
|
|
|
|
__all__ = [
|
|
"Embedding1D",
|
|
"VocabParallelEmbedding1D",
|
|
"LinearWithGradAccum",
|
|
"Linear1D_Col",
|
|
"Linear1D_Row",
|
|
"GPT2FusedLinearConv",
|
|
"GPT2FusedLinearConv1D_Row",
|
|
"GPT2FusedLinearConv1D_Col",
|
|
"DropoutForParallelInput",
|
|
"DropoutForReplicatedInput",
|
|
"cross_entropy_1d",
|
|
"dist_cross_entropy",
|
|
"BaseLayerNorm",
|
|
"LayerNorm",
|
|
"RMSNorm",
|
|
"FusedLayerNorm",
|
|
"FusedRMSNorm",
|
|
"FusedLinear1D_Col",
|
|
"FusedLinear",
|
|
"ParallelModule",
|
|
"PaddingEmbedding",
|
|
"PaddingLMHead",
|
|
"VocabParallelLMHead1D",
|
|
"AttnMaskType",
|
|
"ColoAttention",
|
|
"RingAttention",
|
|
"get_pad_info",
|
|
"all_to_all_comm",
|
|
"FusedLinear1D_Row",
|
|
]
|