mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-02 01:28:31 +00:00
[setup] support pre-build and jit-build of cuda kernels (#2374)
* [setup] support pre-build and jit-build of cuda kernels * polish code * polish code * polish code * polish code * polish code * polish code
This commit is contained in:
@@ -3,6 +3,7 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
from colossalai.kernel.op_builder import CPUAdamBuilder
|
||||
from colossalai.registry import OPTIMIZERS
|
||||
|
||||
from .nvme_optimizer import NVMeOptimizer
|
||||
@@ -76,12 +77,8 @@ class CPUAdam(NVMeOptimizer):
|
||||
default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction)
|
||||
super(CPUAdam, self).__init__(model_params, default_args, nvme_offload_fraction, nvme_offload_dir)
|
||||
self.adamw_mode = adamw_mode
|
||||
try:
|
||||
import colossalai._C.cpu_optim
|
||||
except ImportError:
|
||||
raise ImportError('Please install colossalai from source code to use CPUAdam')
|
||||
self.cpu_adam_op = colossalai._C.cpu_optim.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay,
|
||||
adamw_mode)
|
||||
cpu_adam = CPUAdamBuilder().load()
|
||||
self.cpu_adam_op = cpu_adam.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay, adamw_mode)
|
||||
|
||||
def torch_adam_update(self,
|
||||
data,
|
||||
|
@@ -65,7 +65,8 @@ class FusedAdam(torch.optim.Optimizer):
|
||||
self.adamw_mode = 1 if adamw_mode else 0
|
||||
self.set_grad_none = set_grad_none
|
||||
if multi_tensor_applier.available:
|
||||
from colossalai.kernel import fused_optim
|
||||
from colossalai.kernel.op_builder import FusedOptimBuilder
|
||||
fused_optim = FusedOptimBuilder().load()
|
||||
|
||||
# Skip buffer
|
||||
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
|
||||
|
@@ -76,7 +76,8 @@ class FusedLAMB(torch.optim.Optimizer):
|
||||
max_grad_norm=max_grad_norm)
|
||||
super(FusedLAMB, self).__init__(params, defaults)
|
||||
if multi_tensor_applier.available:
|
||||
from colossalai.kernel import fused_optim
|
||||
from colossalai.kernel.op_builder import FusedOptimBuilder
|
||||
fused_optim = FusedOptimBuilder().load()
|
||||
|
||||
self.multi_tensor_l2norm = fused_optim.multi_tensor_l2norm
|
||||
# Skip buffer
|
||||
|
@@ -80,7 +80,8 @@ class FusedSGD(Optimizer):
|
||||
self.wd_after_momentum = wd_after_momentum
|
||||
|
||||
if multi_tensor_applier.available:
|
||||
from colossalai.kernel import fused_optim
|
||||
from colossalai.kernel.op_builder import FusedOptimBuilder
|
||||
fused_optim = FusedOptimBuilder().load()
|
||||
|
||||
# Skip buffer
|
||||
self._dummy_overflow_buf = torch.tensor([0],
|
||||
|
@@ -2,6 +2,7 @@ from typing import Any, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from colossalai.kernel.op_builder import CPUAdamBuilder, FusedOptimBuilder
|
||||
from colossalai.registry import OPTIMIZERS
|
||||
from colossalai.utils import multi_tensor_applier
|
||||
|
||||
@@ -77,7 +78,9 @@ class HybridAdam(NVMeOptimizer):
|
||||
super(HybridAdam, self).__init__(model_params, default_args, nvme_offload_fraction, nvme_offload_dir)
|
||||
self.adamw_mode = adamw_mode
|
||||
|
||||
from colossalai.kernel import cpu_optim, fused_optim
|
||||
# build during runtime if not found
|
||||
cpu_optim = CPUAdamBuilder().load()
|
||||
fused_optim = FusedOptimBuilder().load()
|
||||
self.cpu_adam_op = cpu_optim.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay, adamw_mode)
|
||||
|
||||
self.gpu_adam_op = fused_optim.multi_tensor_adam
|
||||
|
Reference in New Issue
Block a user