mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-26 11:59:32 +00:00
feat(core): Support system code for feedback and prompt (#873)
This commit is contained in:
parent
711032c480
commit
f4b91c53b2
@ -127,6 +127,7 @@ DB-GPT是一个开源的数据库领域大模型框架。目的是构建大模
|
|||||||
- [internlm-chat-20b](https://huggingface.co/internlm/internlm-chat-20b)
|
- [internlm-chat-20b](https://huggingface.co/internlm/internlm-chat-20b)
|
||||||
- [qwen-7b-chat](https://huggingface.co/Qwen/Qwen-7B-Chat)
|
- [qwen-7b-chat](https://huggingface.co/Qwen/Qwen-7B-Chat)
|
||||||
- [qwen-14b-chat](https://huggingface.co/Qwen/Qwen-14B-Chat)
|
- [qwen-14b-chat](https://huggingface.co/Qwen/Qwen-14B-Chat)
|
||||||
|
- [qwen-72b-chat](https://huggingface.co/Qwen/Qwen-72B-Chat)
|
||||||
- [wizardlm-13b](https://huggingface.co/WizardLM/WizardLM-13B-V1.2)
|
- [wizardlm-13b](https://huggingface.co/WizardLM/WizardLM-13B-V1.2)
|
||||||
- [orca-2-7b](https://huggingface.co/microsoft/Orca-2-7b)
|
- [orca-2-7b](https://huggingface.co/microsoft/Orca-2-7b)
|
||||||
- [orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b)
|
- [orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b)
|
||||||
|
@ -88,6 +88,18 @@ LLM_MODEL_CONFIG = {
|
|||||||
"qwen-14b-chat-int8": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int8"),
|
"qwen-14b-chat-int8": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int8"),
|
||||||
# https://huggingface.co/Qwen/Qwen-14B-Chat-Int4
|
# https://huggingface.co/Qwen/Qwen-14B-Chat-Int4
|
||||||
"qwen-14b-chat-int4": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int4"),
|
"qwen-14b-chat-int4": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int4"),
|
||||||
|
# https://huggingface.co/Qwen/Qwen-72B-Chat
|
||||||
|
"qwen-72b-chat": os.path.join(MODEL_PATH, "Qwen-72B-Chat"),
|
||||||
|
# https://huggingface.co/Qwen/Qwen-72B-Chat-Int8
|
||||||
|
"qwen-72b-chat-int8": os.path.join(MODEL_PATH, "Qwen-72B-Chat-Int8"),
|
||||||
|
# https://huggingface.co/Qwen/Qwen-72B-Chat-Int4
|
||||||
|
"qwen-72b-chat-int4": os.path.join(MODEL_PATH, "Qwen-72B-Chat-Int4"),
|
||||||
|
# https://huggingface.co/Qwen/Qwen-1_8B-Chat
|
||||||
|
"qwen-1.8b-chat": os.path.join(MODEL_PATH, "Qwen-1_8B-Chat"),
|
||||||
|
# https://huggingface.co/Qwen/Qwen-1_8B-Chat-Int8
|
||||||
|
"qwen-1.8b-chat-int8": os.path.join(MODEL_PATH, "wen-1_8B-Chat-Int8"),
|
||||||
|
# https://huggingface.co/Qwen/Qwen-1_8B-Chat-Int4
|
||||||
|
"qwen-1.8b-chat-int4": os.path.join(MODEL_PATH, "Qwen-1_8B-Chat-Int4"),
|
||||||
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
|
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
|
||||||
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
|
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
|
||||||
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf
|
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf
|
||||||
|
@ -49,7 +49,6 @@ class ChatFeedBackDao(BaseDao):
|
|||||||
|
|
||||||
def create_or_update_chat_feed_back(self, feed_back: FeedBackBody):
|
def create_or_update_chat_feed_back(self, feed_back: FeedBackBody):
|
||||||
# Todo: We need to have user information first.
|
# Todo: We need to have user information first.
|
||||||
def_user_name = ""
|
|
||||||
|
|
||||||
session = self.get_session()
|
session = self.get_session()
|
||||||
chat_feed_back = ChatFeedBackEntity(
|
chat_feed_back = ChatFeedBackEntity(
|
||||||
@ -60,7 +59,7 @@ class ChatFeedBackDao(BaseDao):
|
|||||||
question=feed_back.question,
|
question=feed_back.question,
|
||||||
knowledge_space=feed_back.knowledge_space,
|
knowledge_space=feed_back.knowledge_space,
|
||||||
messages=feed_back.messages,
|
messages=feed_back.messages,
|
||||||
user_name=def_user_name,
|
user_name=feed_back.user_name,
|
||||||
gmt_created=datetime.now(),
|
gmt_created=datetime.now(),
|
||||||
gmt_modified=datetime.now(),
|
gmt_modified=datetime.now(),
|
||||||
)
|
)
|
||||||
@ -76,7 +75,7 @@ class ChatFeedBackDao(BaseDao):
|
|||||||
result.question = feed_back.question
|
result.question = feed_back.question
|
||||||
result.knowledge_space = feed_back.knowledge_space
|
result.knowledge_space = feed_back.knowledge_space
|
||||||
result.messages = feed_back.messages
|
result.messages = feed_back.messages
|
||||||
result.user_name = def_user_name
|
result.user_name = feed_back.user_name
|
||||||
result.gmt_created = datetime.now()
|
result.gmt_created = datetime.now()
|
||||||
result.gmt_modified = datetime.now()
|
result.gmt_modified = datetime.now()
|
||||||
else:
|
else:
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from pydantic.main import BaseModel
|
from pydantic.main import BaseModel
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
class FeedBackBody(BaseModel):
|
class FeedBackBody(BaseModel):
|
||||||
@ -12,14 +13,16 @@ class FeedBackBody(BaseModel):
|
|||||||
"""question: human question"""
|
"""question: human question"""
|
||||||
question: str
|
question: str
|
||||||
|
|
||||||
"""knowledge_space: knowledge space"""
|
|
||||||
knowledge_space: str
|
|
||||||
|
|
||||||
"""score: rating of the llm's answer"""
|
"""score: rating of the llm's answer"""
|
||||||
score: int
|
score: int
|
||||||
|
|
||||||
"""ques_type: question type"""
|
"""ques_type: question type"""
|
||||||
ques_type: str
|
ques_type: str
|
||||||
|
|
||||||
|
user_name: Optional[str] = None
|
||||||
|
|
||||||
"""messages: rating detail"""
|
"""messages: rating detail"""
|
||||||
messages: str
|
messages: Optional[str] = None
|
||||||
|
|
||||||
|
"""knowledge_space: knowledge space"""
|
||||||
|
knowledge_space: Optional[str] = None
|
||||||
|
@ -55,6 +55,7 @@ class PromptManageDao(BaseDao):
|
|||||||
prompt_name=prompt.prompt_name,
|
prompt_name=prompt.prompt_name,
|
||||||
content=prompt.content,
|
content=prompt.content,
|
||||||
user_name=prompt.user_name,
|
user_name=prompt.user_name,
|
||||||
|
sys_code=prompt.sys_code,
|
||||||
gmt_created=datetime.now(),
|
gmt_created=datetime.now(),
|
||||||
gmt_modified=datetime.now(),
|
gmt_modified=datetime.now(),
|
||||||
)
|
)
|
||||||
@ -83,6 +84,8 @@ class PromptManageDao(BaseDao):
|
|||||||
prompts = prompts.filter(
|
prompts = prompts.filter(
|
||||||
PromptManageEntity.prompt_name == query.prompt_name
|
PromptManageEntity.prompt_name == query.prompt_name
|
||||||
)
|
)
|
||||||
|
if query.sys_code is not None:
|
||||||
|
prompts = prompts.filter(PromptManageEntity.sys_code == query.sys_code)
|
||||||
|
|
||||||
prompts = prompts.order_by(PromptManageEntity.gmt_created.desc())
|
prompts = prompts.order_by(PromptManageEntity.gmt_created.desc())
|
||||||
result = prompts.all()
|
result = prompts.all()
|
||||||
|
@ -1,24 +1,44 @@
|
|||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
class PromptManageRequest(BaseModel):
|
class PromptManageRequest(BaseModel):
|
||||||
"""chat_scene: for example: chat_with_db_execute, chat_excel, chat_with_db_qa"""
|
"""Model for managing prompts."""
|
||||||
|
|
||||||
chat_scene: str = None
|
chat_scene: Optional[str] = None
|
||||||
|
"""
|
||||||
|
The chat scene, e.g. chat_with_db_execute, chat_excel, chat_with_db_qa.
|
||||||
|
"""
|
||||||
|
|
||||||
"""sub_chat_scene: sub chat scene"""
|
sub_chat_scene: Optional[str] = None
|
||||||
sub_chat_scene: str = None
|
"""
|
||||||
|
The sub chat scene.
|
||||||
|
"""
|
||||||
|
|
||||||
"""prompt_type: common or private"""
|
prompt_type: Optional[str] = None
|
||||||
prompt_type: str = None
|
"""
|
||||||
|
The prompt type, either common or private.
|
||||||
|
"""
|
||||||
|
|
||||||
"""content: prompt content"""
|
content: Optional[str] = None
|
||||||
content: str = None
|
"""
|
||||||
|
The prompt content.
|
||||||
|
"""
|
||||||
|
|
||||||
"""user_name: user name"""
|
user_name: Optional[str] = None
|
||||||
user_name: str = None
|
"""
|
||||||
|
The user name.
|
||||||
|
"""
|
||||||
|
|
||||||
"""prompt_name: prompt name"""
|
sys_code: Optional[str] = None
|
||||||
prompt_name: str = None
|
"""
|
||||||
|
System code
|
||||||
|
"""
|
||||||
|
|
||||||
|
prompt_name: Optional[str] = None
|
||||||
|
"""
|
||||||
|
The prompt name.
|
||||||
|
"""
|
||||||
|
@ -17,9 +17,15 @@ class PromptManageService:
|
|||||||
query = PromptManageRequest(
|
query = PromptManageRequest(
|
||||||
prompt_name=request.prompt_name,
|
prompt_name=request.prompt_name,
|
||||||
)
|
)
|
||||||
|
err_sys_str = ""
|
||||||
|
if query.sys_code:
|
||||||
|
query.sys_code = request.sys_code
|
||||||
|
err_sys_str = f" and sys_code: {request.sys_code}"
|
||||||
prompt_name = prompt_manage_dao.get_prompts(query)
|
prompt_name = prompt_manage_dao.get_prompts(query)
|
||||||
if len(prompt_name) > 0:
|
if len(prompt_name) > 0:
|
||||||
raise Exception(f"prompt name:{request.prompt_name} have already named")
|
raise Exception(
|
||||||
|
f"prompt name: {request.prompt_name}{err_sys_str} have already named"
|
||||||
|
)
|
||||||
prompt_manage_dao.create_prompt(request)
|
prompt_manage_dao.create_prompt(request)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -32,6 +38,7 @@ class PromptManageService:
|
|||||||
prompt_type=request.prompt_type,
|
prompt_type=request.prompt_type,
|
||||||
prompt_name=request.prompt_name,
|
prompt_name=request.prompt_name,
|
||||||
user_name=request.user_name,
|
user_name=request.user_name,
|
||||||
|
sys_code=request.sys_code,
|
||||||
)
|
)
|
||||||
responses = []
|
responses = []
|
||||||
prompts = prompt_manage_dao.get_prompts(query)
|
prompts = prompt_manage_dao.get_prompts(query)
|
||||||
|
Loading…
Reference in New Issue
Block a user