mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-05-22 23:25:21 +00:00
* add cuda KVCache kernel * annotation benchmark_kvcache_copy * add use cuda * fix import path * move benchmark scripts to example/ * rm benchmark codes in test_kv_cache_memcpy.py * rm redundancy codes * rm redundancy codes * pr was modified according to the review
40 lines
1.2 KiB
Python
40 lines
1.2 KiB
Python
from .cpu_adam import CpuAdamArmExtension, CpuAdamX86Extension
|
|
from .flash_attention import (
|
|
FlashAttentionDaoCudaExtension,
|
|
FlashAttentionNpuExtension,
|
|
FlashAttentionXformersCudaExtension,
|
|
)
|
|
from .inference import InferenceOpsCudaExtension
|
|
from .layernorm import LayerNormCudaExtension
|
|
from .moe import MoeCudaExtension
|
|
from .optimizer import FusedOptimizerCudaExtension
|
|
from .softmax import ScaledMaskedSoftmaxCudaExtension, ScaledUpperTriangleMaskedSoftmaxCudaExtension
|
|
|
|
ALL_EXTENSIONS = [
|
|
CpuAdamArmExtension,
|
|
CpuAdamX86Extension,
|
|
LayerNormCudaExtension,
|
|
MoeCudaExtension,
|
|
FusedOptimizerCudaExtension,
|
|
InferenceOpsCudaExtension,
|
|
ScaledMaskedSoftmaxCudaExtension,
|
|
ScaledUpperTriangleMaskedSoftmaxCudaExtension,
|
|
FlashAttentionDaoCudaExtension,
|
|
FlashAttentionXformersCudaExtension,
|
|
FlashAttentionNpuExtension,
|
|
]
|
|
|
|
__all__ = [
|
|
"CpuAdamArmExtension",
|
|
"CpuAdamX86Extension",
|
|
"LayerNormCudaExtension",
|
|
"MoeCudaExtension",
|
|
"FusedOptimizerCudaExtension",
|
|
"InferenceOpsCudaExtension",
|
|
"ScaledMaskedSoftmaxCudaExtension",
|
|
"ScaledUpperTriangleMaskedSoftmaxCudaExtension",
|
|
"FlashAttentionDaoCudaExtension",
|
|
"FlashAttentionXformersCudaExtension",
|
|
"FlashAttentionNpuExtension",
|
|
]
|