mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 20:10:17 +00:00
[kernel] move all symlinks of kernel to colossalai._C
(#1971)
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
import math
|
||||
|
||||
import torch
|
||||
|
||||
from colossalai.testing import parameterize
|
||||
@@ -66,8 +67,8 @@ def test_cpu_adam(adamw, step, p_dtype, g_dtype):
|
||||
exp_avg_sq_copy = exp_avg_sq.clone()
|
||||
|
||||
try:
|
||||
import cpu_adam
|
||||
cpu_adam_op = cpu_adam.CPUAdamOptimizer(lr, beta1, beta2, eps, weight_decay, adamw)
|
||||
import colossalai._C.cpu_optim
|
||||
cpu_adam_op = colossalai._C.cpu_optim.CPUAdamOptimizer(lr, beta1, beta2, eps, weight_decay, adamw)
|
||||
except:
|
||||
raise ImportError("Import cpu adam error, please install colossal from source code")
|
||||
|
||||
|
@@ -1,8 +1,8 @@
|
||||
from numpy import dtype
|
||||
import math
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
import math
|
||||
from numpy import dtype
|
||||
|
||||
from colossalai.testing import parameterize
|
||||
from colossalai.utils import multi_tensor_applier
|
||||
@@ -47,11 +47,11 @@ def torch_adam_update(
|
||||
@parameterize('g_dtype', [torch.float, torch.half])
|
||||
def test_adam(adamw, step, p_dtype, g_dtype):
|
||||
try:
|
||||
import colossal_C
|
||||
fused_adam = colossal_C.multi_tensor_adam
|
||||
import colossalai._C.fused_optim
|
||||
fused_adam = colossalai._C.fused_optim.multi_tensor_adam
|
||||
dummy_overflow_buf = torch.cuda.IntTensor([0])
|
||||
except:
|
||||
raise ImportError("No colossal_C kernel installed.")
|
||||
raise ImportError("No colossalai._C.fused_optim kernel installed.")
|
||||
|
||||
count = 0
|
||||
|
||||
|
Reference in New Issue
Block a user