mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 02:51:59 +00:00
[builder] runtime adam and fused_optim builder (#2184)
This commit is contained in:
@@ -77,15 +77,15 @@ class HybridAdam(NVMeOptimizer):
|
||||
super(HybridAdam, self).__init__(model_params, default_args, nvme_offload_fraction, nvme_offload_dir)
|
||||
self.adamw_mode = adamw_mode
|
||||
try:
|
||||
import colossalai._C.cpu_optim
|
||||
import colossalai._C.fused_optim
|
||||
from colossalai._C import cpu_optim, fused_optim
|
||||
except ImportError:
|
||||
raise ImportError('Please install colossalai from source code to use HybridAdam')
|
||||
from colossalai.kernel.op_builder import CPUAdamBuilder, FusedOptimBuilder
|
||||
fused_optim = FusedOptimBuilder().load()
|
||||
cpu_optim = CPUAdamBuilder().load()
|
||||
|
||||
self.cpu_adam_op = colossalai._C.cpu_optim.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay,
|
||||
adamw_mode)
|
||||
self.cpu_adam_op = cpu_optim.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay, adamw_mode)
|
||||
|
||||
self.gpu_adam_op = colossalai._C.fused_optim.multi_tensor_adam
|
||||
self.gpu_adam_op = fused_optim.multi_tensor_adam
|
||||
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
|
||||
|
||||
@torch.no_grad()
|
||||
|
Reference in New Issue
Block a user