mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 02:51:59 +00:00
[npu] change device to accelerator api (#5239)
* update accelerator * fix timer * fix amp * update * fix * update bug * add error raise * fix autocast * fix set device * remove doc accelerator * update doc * update doc * update doc * use nullcontext * update cpu * update null context * change time limit for example * udpate * update * update * update * [npu] polish accelerator code --------- Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com> Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
@@ -8,6 +8,7 @@ import torch.nn.functional as F
|
||||
from torch import Tensor
|
||||
from torch.nn import Parameter
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.legacy.communication import broadcast
|
||||
from colossalai.legacy.context import ParallelMode, seed
|
||||
from colossalai.legacy.core import global_context as gpc
|
||||
@@ -18,7 +19,6 @@ from colossalai.legacy.utils.checkpointing import (
|
||||
partition_tensor_parallel_state_dict,
|
||||
)
|
||||
from colossalai.nn import init as init
|
||||
from colossalai.utils.device import get_current_device
|
||||
|
||||
from ..base_layer import ParallelLayer
|
||||
from ..utils import divide, set_tensor_parallel_attribute_by_partition, to_2tuple
|
||||
@@ -82,7 +82,7 @@ class Linear2D(ParallelLayer):
|
||||
self.hidden_size_per_partition = divide(self.out_features, self.summa_dim)
|
||||
|
||||
# create weight, shape: [k/q, h/q]
|
||||
factory_kwargs = {"device": get_current_device(), "dtype": dtype}
|
||||
factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype}
|
||||
self.weight = Parameter(
|
||||
torch.empty(self.input_size_per_partition, self.hidden_size_per_partition, **factory_kwargs)
|
||||
)
|
||||
@@ -259,7 +259,7 @@ class LayerNorm2D(ParallelLayer):
|
||||
self.partitioned_partition = divide(normalized_shape, self.summa_dim**2)
|
||||
|
||||
# create parameters
|
||||
factory_kwargs = {"device": get_current_device(), "dtype": dtype}
|
||||
factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype}
|
||||
|
||||
self.weight = Parameter(torch.ones(self.partitioned_partition, **factory_kwargs))
|
||||
if bias:
|
||||
@@ -438,18 +438,24 @@ class PatchEmbedding2D(ParallelLayer):
|
||||
self.weight = Parameter(
|
||||
torch.empty(
|
||||
(self.embed_size_per_partition, in_chans, *self.patch_size),
|
||||
device=get_current_device(),
|
||||
device=get_accelerator().get_current_device(),
|
||||
dtype=dtype,
|
||||
)
|
||||
)
|
||||
self.bias = Parameter(torch.empty(self.embed_size_per_partition, device=get_current_device(), dtype=dtype))
|
||||
self.bias = Parameter(
|
||||
torch.empty(self.embed_size_per_partition, device=get_accelerator().get_current_device(), dtype=dtype)
|
||||
)
|
||||
|
||||
self.cls_token = Parameter(
|
||||
torch.zeros((1, 1, self.embed_size_per_partition), device=get_current_device(), dtype=dtype)
|
||||
torch.zeros(
|
||||
(1, 1, self.embed_size_per_partition), device=get_accelerator().get_current_device(), dtype=dtype
|
||||
)
|
||||
)
|
||||
self.pos_embed = Parameter(
|
||||
torch.zeros(
|
||||
(1, self.num_patches + 1, self.embed_size_per_partition), device=get_current_device(), dtype=dtype
|
||||
(1, self.num_patches + 1, self.embed_size_per_partition),
|
||||
device=get_accelerator().get_current_device(),
|
||||
dtype=dtype,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -619,7 +625,9 @@ class Embedding2D(ParallelLayer):
|
||||
self.embed_kwargs = kwargs
|
||||
|
||||
self.weight = Parameter(
|
||||
torch.empty((num_embeddings, embed_dim_per_partition), device=get_current_device(), dtype=dtype)
|
||||
torch.empty(
|
||||
(num_embeddings, embed_dim_per_partition), device=get_accelerator().get_current_device(), dtype=dtype
|
||||
)
|
||||
)
|
||||
|
||||
self.reset_parameters(weight_initializer)
|
||||
@@ -758,7 +766,7 @@ class VocabParallelEmbedding2D(ParallelLayer):
|
||||
self.weight = Parameter(
|
||||
torch.empty(
|
||||
(self.num_embeddings_per_partition, self.embed_dim_per_partition),
|
||||
device=get_current_device(),
|
||||
device=get_accelerator().get_current_device(),
|
||||
dtype=dtype,
|
||||
)
|
||||
)
|
||||
@@ -895,11 +903,18 @@ class Classifier2D(ParallelLayer):
|
||||
self.has_weight = False
|
||||
else:
|
||||
self.weight = Parameter(
|
||||
torch.empty(self.num_classes, self.input_size_per_partition, device=get_current_device(), dtype=dtype)
|
||||
torch.empty(
|
||||
self.num_classes,
|
||||
self.input_size_per_partition,
|
||||
device=get_accelerator().get_current_device(),
|
||||
dtype=dtype,
|
||||
)
|
||||
)
|
||||
self.has_weight = True
|
||||
if bias:
|
||||
self.bias = Parameter(torch.zeros(self.num_classes, device=get_current_device(), dtype=dtype))
|
||||
self.bias = Parameter(
|
||||
torch.zeros(self.num_classes, device=get_accelerator().get_current_device(), dtype=dtype)
|
||||
)
|
||||
else:
|
||||
self.bias = None
|
||||
|
||||
@@ -1052,7 +1067,7 @@ class VocabParallelClassifier2D(ParallelLayer):
|
||||
self.output_size_per_partition = divide(num_classes, self.summa_dim)
|
||||
|
||||
# create weight, shape: [k/q, h/q]
|
||||
factory_kwargs = {"device": get_current_device(), "dtype": dtype}
|
||||
factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype}
|
||||
if weight is not None:
|
||||
self.weight = weight
|
||||
self.has_weight = False
|
||||
|
Reference in New Issue
Block a user