mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-08-23 02:06:35 +00:00
* [gptq] add gptq kernel (#4416) * add gptq * refactor code * fix tests * replace auto-gptq * rname inferance/quant * refactor test * add auto-gptq as an option * reset requirements * change assert and check auto-gptq * add import warnings * change test flash attn version * remove example * change requirements of flash_attn * modify tests * [skip ci] change requirements-test * [gptq] faster gptq cuda kernel (#4494) * [skip ci] add cuda kernels * add license * [skip ci] fix max_input_len * format files & change test size * [skip ci] * [gptq] add gptq tensor parallel (#4538) * add gptq tensor parallel * add gptq tp * delete print * add test gptq check * add test auto gptq check * [gptq] combine gptq and kv cache manager (#4706) * combine gptq and kv cache manager * add init bits * delete useless code * add model path * delete usless print and update test * delete usless import * move option gptq to shard config * change replace linear to shardformer * update bloom policy * delete useless code * fix import bug and delete uselss code * change colossalai/gptq to colossalai/quant/gptq * update import linear for tests * delete useless code and mv gptq_kernel to kernel directory * fix triton kernel * add triton import
52 lines
1.5 KiB
Python
52 lines
1.5 KiB
Python
import os
|
|
import torch
|
|
import re
|
|
|
|
from .builder import Builder
|
|
from .utils import append_nvcc_threads, get_cuda_cc_flag
|
|
|
|
class GPTQBuilder(Builder):
|
|
|
|
NAME = "cu_gptq"
|
|
PREBUILT_IMPORT_PATH = "colossalai._C.cu_gptq"
|
|
|
|
def __init__(self):
|
|
super().__init__(name=GPTQBuilder.NAME,
|
|
prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)
|
|
|
|
|
|
def include_dirs(self):
|
|
ret = [self.csrc_abs_path("gptq"), self.get_cuda_home_include()]
|
|
return ret
|
|
|
|
def sources_files(self):
|
|
ret = [
|
|
self.csrc_abs_path(fname) for fname in [
|
|
'gptq/linear_gptq.cpp',
|
|
'gptq/column_remap.cu',
|
|
'gptq/cuda_buffers.cu',
|
|
'gptq/q4_matmul.cu',
|
|
'gptq/q4_matrix.cu'
|
|
]
|
|
]
|
|
return ret
|
|
|
|
def cxx_flags(self):
|
|
return ['-O3'] + self.version_dependent_macros
|
|
|
|
def nvcc_flags(self):
|
|
extra_cuda_flags = ['-v',
|
|
'-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',
|
|
'-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', "-lcublas", "-std=c++17"
|
|
]
|
|
|
|
|
|
for arch in torch.cuda.get_arch_list():
|
|
res = re.search(r'sm_(\d+)', arch)
|
|
if res:
|
|
arch_cap = res[1]
|
|
if int(arch_cap) >= 80:
|
|
extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])
|
|
|
|
ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags
|
|
return append_nvcc_threads(ret) |