mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-06 11:31:12 +00:00
feat(model): Support Qwen2MoE (#1439)
Co-authored-by: Fangyin Cheng <staneyffer@gmail.com>
This commit is contained in:
@@ -158,6 +158,7 @@ At present, we have introduced several key features to showcase our current capa
|
||||
We offer extensive model support, including dozens of large language models (LLMs) from both open-source and API agents, such as LLaMA/LLaMA2, Baichuan, ChatGLM, Wenxin, Tongyi, Zhipu, and many more.
|
||||
|
||||
- News
|
||||
- 🔥🔥🔥 [Qwen1.5-MoE-A2.7B-Chat](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B-Chat)
|
||||
- 🔥🔥🔥 [Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)
|
||||
- 🔥🔥🔥 [Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct)
|
||||
- 🔥🔥🔥 [CodeQwen1.5-7B-Chat](https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat)
|
||||
|
@@ -152,6 +152,7 @@
|
||||
海量模型支持,包括开源、API代理等几十种大语言模型。如LLaMA/LLaMA2、Baichuan、ChatGLM、文心、通义、智谱等。当前已支持如下模型:
|
||||
|
||||
- 新增支持模型
|
||||
- 🔥🔥🔥 [Qwen1.5-MoE-A2.7B-Chat](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B-Chat)
|
||||
- 🔥🔥🔥 [Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)
|
||||
- 🔥🔥🔥 [Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct)
|
||||
- 🔥🔥🔥 [CodeQwen1.5-7B-Chat](https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat)
|
||||
|
@@ -114,6 +114,8 @@ LLM_MODEL_CONFIG = {
|
||||
"qwen1.5-72b-chat": os.path.join(MODEL_PATH, "Qwen1.5-72B-Chat"),
|
||||
# https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat
|
||||
"codeqwen1.5-7b-chat": os.path.join(MODEL_PATH, "CodeQwen1.5-7B-Chat"),
|
||||
# https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B-Chat
|
||||
"qwen1.5-moe-a2.7b-chat": os.path.join(MODEL_PATH, "Qwen1.5-MoE-A2.7B-Chat"),
|
||||
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
|
||||
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
|
||||
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf
|
||||
|
@@ -267,6 +267,33 @@ class QwenAdapter(NewHFChatModelAdapter):
|
||||
lower_model_name_or_path
|
||||
and "qwen" in lower_model_name_or_path
|
||||
and "1.5" in lower_model_name_or_path
|
||||
and "moe" not in lower_model_name_or_path
|
||||
)
|
||||
|
||||
|
||||
class QwenMoeAdapter(NewHFChatModelAdapter):
|
||||
"""
|
||||
https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B
|
||||
|
||||
TODO: There are problems with quantization.
|
||||
"""
|
||||
|
||||
support_4bit: bool = False
|
||||
support_8bit: bool = False
|
||||
|
||||
def check_transformer_version(self, current_version: str) -> None:
|
||||
print(f"Checking version: Current version {current_version}")
|
||||
if not current_version >= "4.40.0":
|
||||
raise ValueError(
|
||||
"Qwen 1.5 Moe require transformers.__version__>=4.40.0, please upgrade your transformers package."
|
||||
)
|
||||
|
||||
def do_match(self, lower_model_name_or_path: Optional[str] = None):
|
||||
return (
|
||||
lower_model_name_or_path
|
||||
and "qwen" in lower_model_name_or_path
|
||||
and "1.5" in lower_model_name_or_path
|
||||
and "moe" in lower_model_name_or_path
|
||||
)
|
||||
|
||||
|
||||
@@ -314,4 +341,5 @@ register_model_adapter(SOLARAdapter)
|
||||
register_model_adapter(GemmaAdapter)
|
||||
register_model_adapter(StarlingLMAdapter)
|
||||
register_model_adapter(QwenAdapter)
|
||||
register_model_adapter(QwenMoeAdapter)
|
||||
register_model_adapter(Llama3Adapter)
|
||||
|
Reference in New Issue
Block a user