feat(model): Support Qwen1.5-32B (#1385)

This commit is contained in:
Fangyin Cheng
2024-04-08 09:40:24 +08:00
committed by GitHub
parent d4da50330f
commit df36b947d1
6 changed files with 40 additions and 4 deletions

View File

@@ -99,6 +99,13 @@ LLM_MODEL_CONFIG = {
"qwen-1.8b-chat-int8": os.path.join(MODEL_PATH, "wen-1_8B-Chat-Int8"),
# https://huggingface.co/Qwen/Qwen-1_8B-Chat-Int4
"qwen-1.8b-chat-int4": os.path.join(MODEL_PATH, "Qwen-1_8B-Chat-Int4"),
# https://huggingface.co/Qwen/Qwen1.5-1.8B-Chat
"qwen1.5-1.8b-chat": os.path.join(MODEL_PATH, "Qwen1.5-1.8B-Chat"),
"qwen1.5-7b-chat": os.path.join(MODEL_PATH, "Qwen1.5-7B-Chat"),
"qwen1.5-14b-chat": os.path.join(MODEL_PATH, "Qwen1.5-14B-Chat"),
# https://huggingface.co/Qwen/Qwen1.5-32B-Chat
"qwen1.5-32b-chat": os.path.join(MODEL_PATH, "Qwen1.5-32B-Chat"),
"qwen1.5-72b-chat": os.path.join(MODEL_PATH, "Qwen1.5-72B-Chat"),
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf