mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-06 03:20:52 +00:00
[buider] use builder() for cpu adam and fused optim in setup.py (#2187)
This commit is contained in:
32
setup.py
32
setup.py
@@ -1,9 +1,10 @@
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from setuptools import Extension, find_packages, setup
|
||||
|
||||
from colossalai.kernel.op_builder.utils import get_cuda_bare_metal_version
|
||||
|
||||
try:
|
||||
import torch
|
||||
from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension
|
||||
@@ -26,17 +27,6 @@ if int(os.environ.get('NO_CUDA_EXT', '0')) == 1:
|
||||
build_cuda_ext = False
|
||||
|
||||
|
||||
def get_cuda_bare_metal_version(cuda_dir):
|
||||
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
|
||||
output = raw_output.split()
|
||||
release_idx = output.index("release") + 1
|
||||
release = output[release_idx].split(".")
|
||||
bare_metal_major = release[0]
|
||||
bare_metal_minor = release[1][0]
|
||||
|
||||
return raw_output, bare_metal_major, bare_metal_minor
|
||||
|
||||
|
||||
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
|
||||
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
|
||||
torch_binary_major = torch.version.cuda.split(".")[0]
|
||||
@@ -146,6 +136,11 @@ if build_cuda_ext:
|
||||
'nvcc': append_nvcc_threads(['-O3', '--use_fast_math'] + version_dependent_macros + extra_cuda_flags)
|
||||
})
|
||||
|
||||
#### fused optim kernels ###
|
||||
from colossalai.kernel.op_builder import FusedOptimBuilder
|
||||
ext_modules.append(FusedOptimBuilder().builder('colossalai._C.fused_optim'))
|
||||
|
||||
#### N-D parallel kernels ###
|
||||
cc_flag = []
|
||||
for arch in torch.cuda.get_arch_list():
|
||||
res = re.search(r'sm_(\d+)', arch)
|
||||
@@ -154,14 +149,6 @@ if build_cuda_ext:
|
||||
if int(arch_cap) >= 60:
|
||||
cc_flag.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])
|
||||
|
||||
extra_cuda_flags = ['-lineinfo']
|
||||
|
||||
ext_modules.append(
|
||||
cuda_ext_helper('colossalai._C.fused_optim', [
|
||||
'colossal_C_frontend.cpp', 'multi_tensor_sgd_kernel.cu', 'multi_tensor_scale_kernel.cu',
|
||||
'multi_tensor_adam.cu', 'multi_tensor_l2norm_kernel.cu', 'multi_tensor_lamb.cu'
|
||||
], extra_cuda_flags + cc_flag))
|
||||
|
||||
extra_cuda_flags = [
|
||||
'-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '--expt-relaxed-constexpr',
|
||||
'--expt-extended-lambda'
|
||||
@@ -197,8 +184,9 @@ if build_cuda_ext:
|
||||
'kernels/general_kernels.cu', 'kernels/cuda_util.cu'
|
||||
], extra_cuda_flags + cc_flag))
|
||||
|
||||
extra_cxx_flags = ['-std=c++14', '-lcudart', '-lcublas', '-g', '-Wno-reorder', '-fopenmp', '-march=native']
|
||||
ext_modules.append(cuda_ext_helper('colossalai._C.cpu_optim', ['cpu_adam.cpp'], extra_cuda_flags, extra_cxx_flags))
|
||||
### Gemini Adam kernel ####
|
||||
from colossalai.kernel.op_builder import CPUAdamBuilder
|
||||
ext_modules.append(CPUAdamBuilder().builder('colossalai._C.cpu_optim'))
|
||||
|
||||
setup(name='colossalai',
|
||||
version=get_version(),
|
||||
|
Reference in New Issue
Block a user