mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2026-04-12 07:02:22 +00:00
* [gptq] add gptq kernel (#4416) * add gptq * refactor code * fix tests * replace auto-gptq * rname inferance/quant * refactor test * add auto-gptq as an option * reset requirements * change assert and check auto-gptq * add import warnings * change test flash attn version * remove example * change requirements of flash_attn * modify tests * [skip ci] change requirements-test * [gptq] faster gptq cuda kernel (#4494) * [skip ci] add cuda kernels * add license * [skip ci] fix max_input_len * format files & change test size * [skip ci] * [gptq] add gptq tensor parallel (#4538) * add gptq tensor parallel * add gptq tp * delete print * add test gptq check * add test auto gptq check * [gptq] combine gptq and kv cache manager (#4706) * combine gptq and kv cache manager * add init bits * delete useless code * add model path * delete usless print and update test * delete usless import * move option gptq to shard config * change replace linear to shardformer * update bloom policy * delete useless code * fix import bug and delete uselss code * change colossalai/gptq to colossalai/quant/gptq * update import linear for tests * delete useless code and mv gptq_kernel to kernel directory * fix triton kernel * add triton import
30 lines
900 B
Python
30 lines
900 B
Python
try:
|
|
import triton
|
|
|
|
HAS_TRITON = True
|
|
|
|
from .context_attention import bloom_context_attn_fwd, llama_context_attn_fwd
|
|
from .copy_kv_cache_dest import copy_kv_cache_to_dest
|
|
from .fused_layernorm import layer_norm
|
|
from .gptq_triton import gptq_fused_linear_triton
|
|
from .rms_norm import rmsnorm_forward
|
|
from .rotary_embedding_kernel import rotary_embedding_fwd
|
|
from .softmax import softmax
|
|
from .token_attention_kernel import token_attention_fwd
|
|
|
|
__all__ = [
|
|
"llama_context_attn_fwd",
|
|
"bloom_context_attn_fwd",
|
|
"softmax",
|
|
"layer_norm",
|
|
"rmsnorm_forward",
|
|
"copy_kv_cache_to_dest",
|
|
"rotary_embedding_fwd",
|
|
"token_attention_fwd",
|
|
"gptq_fused_linear_triton",
|
|
]
|
|
|
|
except ImportError:
|
|
HAS_TRITON = False
|
|
print("Triton is not installed. Please install Triton to use Triton kernels.")
|