mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 12:01:39 +00:00
[npu] change device to accelerator api (#5239)
* update accelerator * fix timer * fix amp * update * fix * update bug * add error raise * fix autocast * fix set device * remove doc accelerator * update doc * update doc * update doc * use nullcontext * update cpu * update null context * change time limit for example * udpate * update * update * update * [npu] polish accelerator code --------- Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com> Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
@@ -8,7 +8,8 @@ import pytest
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
||||
from colossalai.utils import get_current_device, multi_tensor_applier
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.utils import multi_tensor_applier
|
||||
|
||||
_FUSED_ALLOWED_P_G_TYPES = [
|
||||
(torch.float, torch.half),
|
||||
@@ -155,7 +156,9 @@ def test_fused_adam_kernel(adamw, weight_decay, p_dtype, g_dtype):
|
||||
rtol, atol = 1e-3, 1e-3
|
||||
if p_dtype is torch.bfloat16 or g_dtype is torch.bfloat16:
|
||||
rtol, atol = 4e-3, 4e-3
|
||||
check_adam_kernel(FusedAdamKernel, adamw, weight_decay, p_dtype, g_dtype, get_current_device(), 3, rtol, atol)
|
||||
check_adam_kernel(
|
||||
FusedAdamKernel, adamw, weight_decay, p_dtype, g_dtype, get_accelerator().get_current_device(), 3, rtol, atol
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("adamw", [False, True])
|
||||
|
Reference in New Issue
Block a user