mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-08-14 14:13:22 +00:00
* add some req for inference * clean codes * add codes * add some lightllm deps * clean codes * hello * delete rms files * add some comments * add comments * add doc * add lightllm deps * add lightllm cahtglm2 kernels * add lightllm cahtglm2 kernels * replace rotary embedding with lightllm kernel * add some commnets * add some comments * add some comments * add * replace fwd kernel att1 * fix a arg * add * add * fix token attention * add some comments * clean codes * modify comments * fix readme * fix bug * fix bug --------- Co-authored-by: cuiqing.li <lixx336@gmail.com> Co-authored-by: CjhHa1 <cjh18671720497@outlook.com>
33 lines
1.1 KiB
Python
33 lines
1.1 KiB
Python
try:
|
|
import triton
|
|
|
|
HAS_TRITON = True
|
|
|
|
except ImportError:
|
|
HAS_TRITON = False
|
|
print("Triton is not installed. Please install Triton to use Triton kernels.")
|
|
|
|
# There may exist import error even if we have triton installed.
|
|
if HAS_TRITON:
|
|
from .context_attention import bloom_context_attn_fwd, llama_context_attn_fwd
|
|
from .copy_kv_cache_dest import copy_kv_cache_to_dest
|
|
from .fused_layernorm import layer_norm
|
|
from .gptq_triton import gptq_fused_linear_triton
|
|
from .int8_rotary_embedding_kernel import int8_rotary_embedding_fwd
|
|
from .smooth_attention import smooth_llama_context_attn_fwd, smooth_token_attention_fwd
|
|
from .softmax import softmax
|
|
from .token_attention_kernel import token_attention_fwd
|
|
|
|
__all__ = [
|
|
"llama_context_attn_fwd",
|
|
"bloom_context_attn_fwd",
|
|
"softmax",
|
|
"layer_norm",
|
|
"copy_kv_cache_to_dest",
|
|
"token_attention_fwd",
|
|
"gptq_fused_linear_triton",
|
|
"int8_rotary_embedding_fwd",
|
|
"smooth_llama_context_attn_fwd",
|
|
"smooth_token_attention_fwd",
|
|
]
|