mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-27 19:36:13 +00:00
* [feature] refactor colo attention (#5462) * [extension] update api * [feature] add colo attention * [feature] update sdpa * [feature] update npu attention * [feature] update flash-attn * [test] add flash attn test * [test] update flash attn test * [shardformer] update modeling to fit colo attention (#5465) * [misc] refactor folder structure * [shardformer] update llama flash-attn * [shardformer] fix llama policy * [devops] update tensornvme install * [test] update llama test * [shardformer] update colo attn kernel dispatch * [shardformer] update blip2 * [shardformer] update chatglm * [shardformer] update gpt2 * [shardformer] update gptj * [shardformer] update opt * [shardformer] update vit * [shardformer] update colo attention mask prep * [shardformer] update whisper * [test] fix shardformer tests (#5514) * [test] fix shardformer tests * [test] fix shardformer tests
15 lines
470 B
Python
15 lines
470 B
Python
from .flash_attention_dao_cuda import FlashAttentionDaoCudaExtension
|
|
from .flash_attention_npu import FlashAttentionNpuExtension
|
|
from .flash_attention_sdpa_cuda import FlashAttentionSdpaCudaExtension
|
|
|
|
try:
|
|
# TODO: remove this after updating openmoe example
|
|
import flash_attention # noqa
|
|
|
|
HAS_FLASH_ATTN = True
|
|
except:
|
|
HAS_FLASH_ATTN = False
|
|
|
|
|
|
__all__ = ["FlashAttentionDaoCudaExtension", "FlashAttentionSdpaCudaExtension", "FlashAttentionNpuExtension"]
|