chore: fix shutdown error when not install torch

This commit is contained in:
FangYin Cheng 2023-09-21 12:20:18 +08:00
parent c830598c9e
commit 896af4e16f
2 changed files with 5 additions and 2 deletions

View File

@ -18,6 +18,7 @@ from pilot.logs import logger
def _check_multi_gpu_or_4bit_quantization(model_params: ModelParameters):
# TODO: vicuna-v1.5 8-bit quantization info is slow
# TODO: support wizardlm quantization, see: https://huggingface.co/WizardLM/WizardLM-13B-V1.2/discussions/5
# TODO: support internlm quantization
model_name = model_params.model_name.lower()
supported_models = ["llama", "baichuan", "vicuna"]
return any(m in model_name for m in supported_models)

View File

@ -2,10 +2,12 @@ import logging
def _clear_torch_cache(device="cuda"):
try:
import torch
except ImportError:
return
import gc
import torch
gc.collect()
if device != "cpu":
if torch.has_mps: