mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 19:13:01 +00:00
[npu] change device to accelerator api (#5239)
* update accelerator * fix timer * fix amp * update * fix * update bug * add error raise * fix autocast * fix set device * remove doc accelerator * update doc * update doc * update doc * use nullcontext * update cpu * update null context * change time limit for example * udpate * update * update * update * [npu] polish accelerator code --------- Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com> Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
@@ -3,8 +3,8 @@ from typing import Callable
|
||||
|
||||
from torch import dtype, nn
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.nn import init
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
from ..parallel_1d import Embedding1D, PatchEmbedding1D, VocabParallelEmbedding1D
|
||||
from ..parallel_2d import Embedding2D, PatchEmbedding2D, VocabParallelEmbedding2D
|
||||
@@ -83,7 +83,7 @@ class Embedding(ColossalaiModule):
|
||||
embed = (
|
||||
nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx, *args, **kwargs)
|
||||
.to(dtype)
|
||||
.to(get_current_device())
|
||||
.to(get_accelerator().get_current_device())
|
||||
)
|
||||
weight_initializer(embed.weight, fan_in=num_embeddings, fan_out=embedding_dim)
|
||||
elif num_embeddings <= vocab_parallel_limit:
|
||||
|
Reference in New Issue
Block a user