mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-22 18:09:06 +00:00
[feat] refactored extension module (#5298)
* [feat] refactored extension module * polish * polish * polish * polish * polish * polish * polish * polish * polish * polish
This commit is contained in:
3
extensions/moe/__init__.py
Normal file
3
extensions/moe/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .moe_cuda import MoeCudaExtension
|
||||
|
||||
__all__ = ['MoeCudaExtension']
|
29
extensions/moe/moe_cuda.py
Normal file
29
extensions/moe/moe_cuda.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from ..cuda_extension import _CudaExtension
|
||||
from ..utils import append_nvcc_threads, get_cuda_cc_flag
|
||||
|
||||
|
||||
class MoeCudaExtension(_CudaExtension):
|
||||
def __init__(self):
|
||||
super().__init__(name="moe_cuda")
|
||||
|
||||
def include_dirs(self):
|
||||
ret = [self.csrc_abs_path("cuda/include"), self.get_cuda_home_include()]
|
||||
return ret
|
||||
|
||||
def sources_files(self):
|
||||
ret = [self.csrc_abs_path(fname) for fname in ["cuda/moe_cuda.cpp", "cuda/moe_cuda_kernel.cu"]]
|
||||
return ret
|
||||
|
||||
def cxx_flags(self):
|
||||
return ["-O3"] + self.version_dependent_macros
|
||||
|
||||
def nvcc_flags(self):
|
||||
extra_cuda_flags = [
|
||||
"-U__CUDA_NO_HALF_OPERATORS__",
|
||||
"-U__CUDA_NO_HALF_CONVERSIONS__",
|
||||
"--expt-relaxed-constexpr",
|
||||
"--expt-extended-lambda",
|
||||
]
|
||||
extra_cuda_flags.extend(get_cuda_cc_flag())
|
||||
ret = ["-O3", "--use_fast_math"] + extra_cuda_flags
|
||||
return append_nvcc_threads(ret)
|
Reference in New Issue
Block a user