mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-05-05 23:18:22 +00:00
* [inference] add int8 rotary embedding kernel for smoothquant (#4843) * [inference] add smoothquant llama attention (#4850) * add smoothquant llama attention * remove uselss code * remove useless code * fix import error * rename file name * [inference] add silu linear fusion for smoothquant llama mlp (#4853) * add silu linear * update skip condition * catch smoothquant cuda lib exception * prcocess exception for tests * [inference] add llama mlp for smoothquant (#4854) * add llama mlp for smoothquant * fix down out scale * remove duplicate lines * add llama mlp check * delete useless code * [inference] add smoothquant llama (#4861) * add smoothquant llama * fix attention accuracy * fix accuracy * add kv cache and save pretrained * refactor example * delete smooth * refactor code * [inference] add smooth function and delete useless code for smoothquant (#4895) * add smooth function and delete useless code * update datasets * remove duplicate import * delete useless file * refactor codes (#4902) * rafactor code * add license * add torch-int and smoothquant license
38 lines
1.3 KiB
Python
38 lines
1.3 KiB
Python
try:
|
|
import triton
|
|
|
|
HAS_TRITON = True
|
|
|
|
except ImportError:
|
|
HAS_TRITON = False
|
|
print("Triton is not installed. Please install Triton to use Triton kernels.")
|
|
|
|
# There may exist import error even if we have triton installed.
|
|
if HAS_TRITON:
|
|
from .context_attention import bloom_context_attn_fwd, llama2_context_attn_fwd, llama_context_attn_fwd
|
|
from .copy_kv_cache_dest import copy_kv_cache_to_dest
|
|
from .fused_layernorm import layer_norm
|
|
from .gptq_triton import gptq_fused_linear_triton
|
|
from .int8_rotary_embedding_kernel import int8_rotary_embedding_fwd
|
|
from .rms_norm import rmsnorm_forward
|
|
from .rotary_embedding_kernel import rotary_embedding_fwd
|
|
from .smooth_attention import smooth_llama_context_attn_fwd, smooth_token_attention_fwd
|
|
from .softmax import softmax
|
|
from .token_attention_kernel import token_attention_fwd
|
|
|
|
__all__ = [
|
|
"llama_context_attn_fwd",
|
|
"llama2_context_attn_fwd",
|
|
"bloom_context_attn_fwd",
|
|
"softmax",
|
|
"layer_norm",
|
|
"rmsnorm_forward",
|
|
"copy_kv_cache_to_dest",
|
|
"rotary_embedding_fwd",
|
|
"token_attention_fwd",
|
|
"gptq_fused_linear_triton",
|
|
"int8_rotary_embedding_fwd",
|
|
"smooth_llama_context_attn_fwd",
|
|
"smooth_token_attention_fwd",
|
|
]
|