mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-10 21:40:02 +00:00
[npu] change device to accelerator api (#5239)
* update accelerator * fix timer * fix amp * update * fix * update bug * add error raise * fix autocast * fix set device * remove doc accelerator * update doc * update doc * update doc * use nullcontext * update cpu * update null context * change time limit for example * udpate * update * update * update * [npu] polish accelerator code --------- Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com> Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
@@ -7,10 +7,10 @@ import torch.cuda
|
||||
from torch.nn import Module
|
||||
from torch.utils._pytree import tree_map
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.inference.engine.microbatch_manager import MicroBatchManager, Status
|
||||
from colossalai.pipeline.p2p import PipelineP2PCommunication
|
||||
from colossalai.pipeline.stage_manager import PipelineStageManager
|
||||
from colossalai.utils.device import get_current_device
|
||||
|
||||
from ._utils import get_batch_size, get_micro_batch, model_forward, to_device
|
||||
from .base import PipelineSchedule
|
||||
@@ -86,7 +86,7 @@ class GenerateSchedule(PipelineSchedule):
|
||||
"""
|
||||
micro_batch = get_micro_batch(self.batch, self.microbatch_offset, self.microbatch_size)
|
||||
self.microbatch_offset += self.microbatch_size
|
||||
return tree_map(partial(to_device, device=get_current_device()), micro_batch)
|
||||
return tree_map(partial(to_device, device=get_accelerator().get_current_device()), micro_batch)
|
||||
|
||||
def _prepare_inputs_for_interval_stage(self):
|
||||
"""
|
||||
|
Reference in New Issue
Block a user