[npu] change device to accelerator api (#5239)

* update accelerator

* fix timer

* fix amp

* update

* fix

* update bug

* add error raise

* fix autocast

* fix set device

* remove doc accelerator

* update doc

* update doc

* update doc

* use nullcontext

* update cpu

* update null context

* change time limit for example

* udpate

* update

* update

* update

* [npu] polish accelerator code

---------

Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com>
Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
Hongxin Liu
2024-01-09 10:20:05 +08:00
committed by GitHub
parent dd2c28a323
commit d202cc28c0
128 changed files with 1773 additions and 868 deletions

View File

@@ -6,8 +6,8 @@ from typing import Callable, Iterable
import torch
from colossalai.accelerator import get_accelerator
from colossalai.logging import get_dist_logger
from colossalai.utils import get_current_device
class BaseSchedule(ABC):
@@ -29,12 +29,12 @@ class BaseSchedule(ABC):
def _move_tensor(element):
if torch.is_tensor(element):
if not element.is_cuda:
return element.to(get_current_device()).detach()
return element.to(get_accelerator().get_current_device()).detach()
return element
def _move_to_device(self, data):
if isinstance(data, torch.Tensor):
data = data.to(get_current_device())
data = data.to(get_accelerator().get_current_device())
elif isinstance(data, (list, tuple)):
data_to_return = []
for element in data: