mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-02 01:28:31 +00:00
[npu] change device to accelerator api (#5239)
* update accelerator * fix timer * fix amp * update * fix * update bug * add error raise * fix autocast * fix set device * remove doc accelerator * update doc * update doc * update doc * use nullcontext * update cpu * update null context * change time limit for example * udpate * update * update * update * [npu] polish accelerator code --------- Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com> Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
@@ -13,12 +13,12 @@ from torch.utils.data import DataLoader
|
||||
from tqdm import tqdm
|
||||
|
||||
import colossalai
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.booster.plugin.dp_plugin_base import DPPluginBase
|
||||
from colossalai.cluster import DistCoordinator
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
# ==============================
|
||||
# Prepare Hyperparameters
|
||||
@@ -53,8 +53,8 @@ def build_dataloader(batch_size: int, coordinator: DistCoordinator, plugin: DPPl
|
||||
@torch.no_grad()
|
||||
def evaluate(model: nn.Module, test_dataloader: DataLoader, coordinator: DistCoordinator) -> float:
|
||||
model.eval()
|
||||
correct = torch.zeros(1, dtype=torch.int64, device=get_current_device())
|
||||
total = torch.zeros(1, dtype=torch.int64, device=get_current_device())
|
||||
correct = torch.zeros(1, dtype=torch.int64, device=get_accelerator().get_current_device())
|
||||
total = torch.zeros(1, dtype=torch.int64, device=get_accelerator().get_current_device())
|
||||
for images, labels in test_dataloader:
|
||||
images = images.cuda()
|
||||
labels = labels.cuda()
|
||||
|
Reference in New Issue
Block a user