feat(model): Support Qwen2.5 coder models (#2139)

This commit is contained in:
Fangyin Cheng 2024-11-21 13:55:04 +08:00 committed by GitHub
parent 3ccfa94219
commit 9566f4e9f7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -183,7 +183,20 @@ LLM_MODEL_CONFIG = {
"qwen2.5-coder-1.5b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct"
),
"qwen2.5-coder-32b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-32B-Instruct"
),
"qwen2.5-coder-14b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-14B-Instruct"
),
"qwen2.5-coder-3b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-3B-Instruct"),
"qwen2.5-coder-7b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-7B-Instruct"),
"qwen2.5-coder-1.5b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct"
),
"qwen2.5-coder-0.5b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-0.5B-Instruct"
),
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf