mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-04 10:34:41 +00:00
[npu] change device to accelerator api (#5239)
* update accelerator * fix timer * fix amp * update * fix * update bug * add error raise * fix autocast * fix set device * remove doc accelerator * update doc * update doc * update doc * use nullcontext * update cpu * update null context * change time limit for example * udpate * update * update * update * [npu] polish accelerator code --------- Co-authored-by: Xuanlei Zhao <xuanlei.zhao@gmail.com> Co-authored-by: zxl <43881818+oahzxl@users.noreply.github.com>
This commit is contained in:
@@ -6,12 +6,12 @@ import warnings
|
||||
from pathlib import Path
|
||||
from typing import Dict, Union
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.context import Config
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.utils import IS_NPU_AVAILABLE, set_device, set_seed
|
||||
from colossalai.utils import set_seed
|
||||
|
||||
|
||||
def launch(
|
||||
@@ -47,17 +47,18 @@ def launch(
|
||||
if rank == 0:
|
||||
warnings.warn("`config` is deprecated and will be removed soon.")
|
||||
|
||||
if IS_NPU_AVAILABLE and backend == "nccl":
|
||||
backend = "hccl"
|
||||
cur_accelerator = get_accelerator()
|
||||
|
||||
backend = cur_accelerator.communication_backend
|
||||
|
||||
# init default process group
|
||||
init_method = f"tcp://[{host}]:{port}"
|
||||
dist.init_process_group(rank=rank, world_size=world_size, backend=backend, init_method=init_method)
|
||||
|
||||
# set cuda device
|
||||
if torch.cuda.is_available() or IS_NPU_AVAILABLE:
|
||||
# if local rank is not given, calculate automatically
|
||||
set_device(local_rank)
|
||||
# if local rank is not given, calculate automatically
|
||||
if cur_accelerator.support_set_device:
|
||||
cur_accelerator.set_device(local_rank)
|
||||
|
||||
set_seed(seed)
|
||||
|
||||
|
Reference in New Issue
Block a user