feat(core): Support system code for feedback and prompt (#873)

This commit is contained in:
FangYin Cheng 2023-11-30 19:38:43 +08:00 committed by GitHub
parent 711032c480
commit f4b91c53b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 65 additions and 20 deletions

View File

@ -127,6 +127,7 @@ DB-GPT是一个开源的数据库领域大模型框架。目的是构建大模
- [internlm-chat-20b](https://huggingface.co/internlm/internlm-chat-20b)
- [qwen-7b-chat](https://huggingface.co/Qwen/Qwen-7B-Chat)
- [qwen-14b-chat](https://huggingface.co/Qwen/Qwen-14B-Chat)
- [qwen-72b-chat](https://huggingface.co/Qwen/Qwen-72B-Chat)
- [wizardlm-13b](https://huggingface.co/WizardLM/WizardLM-13B-V1.2)
- [orca-2-7b](https://huggingface.co/microsoft/Orca-2-7b)
- [orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b)

View File

@ -88,6 +88,18 @@ LLM_MODEL_CONFIG = {
"qwen-14b-chat-int8": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int8"),
# https://huggingface.co/Qwen/Qwen-14B-Chat-Int4
"qwen-14b-chat-int4": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int4"),
# https://huggingface.co/Qwen/Qwen-72B-Chat
"qwen-72b-chat": os.path.join(MODEL_PATH, "Qwen-72B-Chat"),
# https://huggingface.co/Qwen/Qwen-72B-Chat-Int8
"qwen-72b-chat-int8": os.path.join(MODEL_PATH, "Qwen-72B-Chat-Int8"),
# https://huggingface.co/Qwen/Qwen-72B-Chat-Int4
"qwen-72b-chat-int4": os.path.join(MODEL_PATH, "Qwen-72B-Chat-Int4"),
# https://huggingface.co/Qwen/Qwen-1_8B-Chat
"qwen-1.8b-chat": os.path.join(MODEL_PATH, "Qwen-1_8B-Chat"),
# https://huggingface.co/Qwen/Qwen-1_8B-Chat-Int8
"qwen-1.8b-chat-int8": os.path.join(MODEL_PATH, "wen-1_8B-Chat-Int8"),
# https://huggingface.co/Qwen/Qwen-1_8B-Chat-Int4
"qwen-1.8b-chat-int4": os.path.join(MODEL_PATH, "Qwen-1_8B-Chat-Int4"),
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf

View File

@ -49,7 +49,6 @@ class ChatFeedBackDao(BaseDao):
def create_or_update_chat_feed_back(self, feed_back: FeedBackBody):
# Todo: We need to have user information first.
def_user_name = ""
session = self.get_session()
chat_feed_back = ChatFeedBackEntity(
@ -60,7 +59,7 @@ class ChatFeedBackDao(BaseDao):
question=feed_back.question,
knowledge_space=feed_back.knowledge_space,
messages=feed_back.messages,
user_name=def_user_name,
user_name=feed_back.user_name,
gmt_created=datetime.now(),
gmt_modified=datetime.now(),
)
@ -76,7 +75,7 @@ class ChatFeedBackDao(BaseDao):
result.question = feed_back.question
result.knowledge_space = feed_back.knowledge_space
result.messages = feed_back.messages
result.user_name = def_user_name
result.user_name = feed_back.user_name
result.gmt_created = datetime.now()
result.gmt_modified = datetime.now()
else:

View File

@ -1,4 +1,5 @@
from pydantic.main import BaseModel
from typing import Optional
class FeedBackBody(BaseModel):
@ -12,14 +13,16 @@ class FeedBackBody(BaseModel):
"""question: human question"""
question: str
"""knowledge_space: knowledge space"""
knowledge_space: str
"""score: rating of the llm's answer"""
score: int
"""ques_type: question type"""
ques_type: str
user_name: Optional[str] = None
"""messages: rating detail"""
messages: str
messages: Optional[str] = None
"""knowledge_space: knowledge space"""
knowledge_space: Optional[str] = None

View File

@ -55,6 +55,7 @@ class PromptManageDao(BaseDao):
prompt_name=prompt.prompt_name,
content=prompt.content,
user_name=prompt.user_name,
sys_code=prompt.sys_code,
gmt_created=datetime.now(),
gmt_modified=datetime.now(),
)
@ -83,6 +84,8 @@ class PromptManageDao(BaseDao):
prompts = prompts.filter(
PromptManageEntity.prompt_name == query.prompt_name
)
if query.sys_code is not None:
prompts = prompts.filter(PromptManageEntity.sys_code == query.sys_code)
prompts = prompts.order_by(PromptManageEntity.gmt_created.desc())
result = prompts.all()

View File

@ -1,24 +1,44 @@
from typing import List
from pydantic import BaseModel
from typing import Optional
from pydantic import BaseModel
class PromptManageRequest(BaseModel):
"""chat_scene: for example: chat_with_db_execute, chat_excel, chat_with_db_qa"""
"""Model for managing prompts."""
chat_scene: str = None
chat_scene: Optional[str] = None
"""
The chat scene, e.g. chat_with_db_execute, chat_excel, chat_with_db_qa.
"""
"""sub_chat_scene: sub chat scene"""
sub_chat_scene: str = None
sub_chat_scene: Optional[str] = None
"""
The sub chat scene.
"""
"""prompt_type: common or private"""
prompt_type: str = None
prompt_type: Optional[str] = None
"""
The prompt type, either common or private.
"""
"""content: prompt content"""
content: str = None
content: Optional[str] = None
"""
The prompt content.
"""
"""user_name: user name"""
user_name: str = None
user_name: Optional[str] = None
"""
The user name.
"""
"""prompt_name: prompt name"""
prompt_name: str = None
sys_code: Optional[str] = None
"""
System code
"""
prompt_name: Optional[str] = None
"""
The prompt name.
"""

View File

@ -17,9 +17,15 @@ class PromptManageService:
query = PromptManageRequest(
prompt_name=request.prompt_name,
)
err_sys_str = ""
if query.sys_code:
query.sys_code = request.sys_code
err_sys_str = f" and sys_code: {request.sys_code}"
prompt_name = prompt_manage_dao.get_prompts(query)
if len(prompt_name) > 0:
raise Exception(f"prompt name:{request.prompt_name} have already named")
raise Exception(
f"prompt name: {request.prompt_name}{err_sys_str} have already named"
)
prompt_manage_dao.create_prompt(request)
return True
@ -32,6 +38,7 @@ class PromptManageService:
prompt_type=request.prompt_type,
prompt_name=request.prompt_name,
user_name=request.user_name,
sys_code=request.sys_code,
)
responses = []
prompts = prompt_manage_dao.get_prompts(query)