mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 20:10:17 +00:00
[npu] change device to accelerator api (#5239)
* update accelerator * fix timer * fix amp * update * fix * update bug * add error raise * fix autocast * fix set device * remove doc accelerator * update doc * update doc * update doc * use nullcontext * update cpu * update null context * change time limit for example * udpate * update * update * update * [npu] polish accelerator code --------- Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com> Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
@@ -10,6 +10,7 @@ import torch.nn as nn
|
||||
from torch.distributed import ProcessGroup
|
||||
from torch.distributed.distributed_c10d import _get_default_group
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.checkpoint_io.utils import StateDictSharder, gather_distributed_param
|
||||
from colossalai.interface import ModelWrapper
|
||||
from colossalai.lazy import LazyTensor
|
||||
@@ -27,7 +28,7 @@ from colossalai.tensor.d_tensor import (
|
||||
is_distributed_tensor,
|
||||
)
|
||||
from colossalai.tensor.param_op_hook import ColoParamOpHookManager
|
||||
from colossalai.utils import _cast_float, free_storage, get_current_device, is_ddp_ignored
|
||||
from colossalai.utils import _cast_float, free_storage, is_ddp_ignored
|
||||
|
||||
from .chunk import Chunk, ChunkManager, TensorState, init_chunk_manager
|
||||
from .gemini_hook import GeminiZeROHook
|
||||
@@ -766,7 +767,7 @@ class GeminiDDP(ModelWrapper):
|
||||
|
||||
# move ignored parameters to CUDA
|
||||
if is_ddp_ignored(p):
|
||||
p.data = p.data.to(device=get_current_device(), dtype=self.mixed_precision)
|
||||
p.data = p.data.to(device=get_accelerator().get_current_device(), dtype=self.mixed_precision)
|
||||
continue
|
||||
|
||||
# create a fp16 parameter
|
||||
@@ -815,7 +816,7 @@ class GeminiDDP(ModelWrapper):
|
||||
for buffer in self.module.buffers():
|
||||
if isinstance(buffer, LazyTensor):
|
||||
buffer.materialize()
|
||||
buffer.data = buffer.to(get_current_device())
|
||||
buffer.data = buffer.to(get_accelerator().get_current_device())
|
||||
if torch.is_floating_point(buffer):
|
||||
buffer.data = buffer.to(self.mixed_precision)
|
||||
|
||||
|
Reference in New Issue
Block a user