mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-05 02:51:07 +00:00
model server fix message model
This commit is contained in:
@@ -13,14 +13,16 @@ logger = build_logger("webserver", LOGDIR + "DbChatOutputParser.log")
|
|||||||
|
|
||||||
class PluginAction(NamedTuple):
|
class PluginAction(NamedTuple):
|
||||||
command: Dict
|
command: Dict
|
||||||
thoughts: Dict
|
speak: str
|
||||||
|
reasoning:str
|
||||||
|
thoughts: str
|
||||||
|
|
||||||
|
|
||||||
class PluginChatOutputParser(BaseOutputParser):
|
class PluginChatOutputParser(BaseOutputParser):
|
||||||
def parse_prompt_response(self, model_out_text) -> T:
|
def parse_prompt_response(self, model_out_text) -> T:
|
||||||
response = json.loads(super().parse_prompt_response(model_out_text))
|
response = json.loads(super().parse_prompt_response(model_out_text))
|
||||||
command, thoughts = response["command"], response["thoughts"]
|
command, thoughts, speak, reasoning = response["command"], response["thoughts"], response["speak"], response["reasoning"]
|
||||||
return PluginAction(command, thoughts)
|
return PluginAction(command, speak, reasoning, thoughts)
|
||||||
|
|
||||||
def parse_view_response(self, speak, data) -> str:
|
def parse_view_response(self, speak, data) -> str:
|
||||||
### tool out data to table view
|
### tool out data to table view
|
||||||
|
@@ -37,13 +37,9 @@ Ensure the response is correct json and can be parsed by Python json.loads
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
RESPONSE_FORMAT = {
|
RESPONSE_FORMAT = {
|
||||||
"thoughts": {
|
"thoughts": "thought text",
|
||||||
"text": "thought",
|
|
||||||
"reasoning": "reasoning",
|
"reasoning": "reasoning",
|
||||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
|
||||||
"criticism": "constructive self-criticism",
|
|
||||||
"speak": "thoughts summary to say to user",
|
"speak": "thoughts summary to say to user",
|
||||||
},
|
|
||||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -8,6 +8,8 @@ from pilot.common.schema import SeparatorStyle
|
|||||||
|
|
||||||
from pilot.scene.chat_normal.out_parser import NormalChatOutputParser
|
from pilot.scene.chat_normal.out_parser import NormalChatOutputParser
|
||||||
|
|
||||||
|
PROMPT_SCENE_DEFINE = """A chat between a curious user and an artificial intelligence assistant, who very familiar with database related knowledge.
|
||||||
|
The assistant gives helpful, detailed, professional and polite answers to the user's questions. """
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
|
|
||||||
@@ -19,7 +21,7 @@ prompt = PromptTemplate(
|
|||||||
template_scene=ChatScene.ChatNormal.value,
|
template_scene=ChatScene.ChatNormal.value,
|
||||||
input_variables=["input"],
|
input_variables=["input"],
|
||||||
response_format=None,
|
response_format=None,
|
||||||
template_define=None,
|
template_define=PROMPT_SCENE_DEFINE,
|
||||||
template=None,
|
template=None,
|
||||||
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
|
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
|
||||||
output_parser=NormalChatOutputParser(
|
output_parser=NormalChatOutputParser(
|
||||||
@@ -27,5 +29,4 @@ prompt = PromptTemplate(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
CFG.prompt_templates.update({prompt.template_scene: prompt})
|
CFG.prompt_templates.update({prompt.template_scene: prompt})
|
||||||
|
Reference in New Issue
Block a user