feat:update summary prompt

This commit is contained in:
aries_ckt 2023-11-17 18:12:28 +08:00
parent a4c8acac31
commit 439cd90e92
5 changed files with 35 additions and 6 deletions

View File

@ -12,7 +12,7 @@ CFG = Config()
class ExtractRefineSummary(BaseChat):
chat_scene: str = ChatScene.ExtractRefineSummary.value()
"""get summary by llm"""
"""extract final summary by llm"""
def __init__(self, chat_param: Dict):
""" """
@ -43,7 +43,7 @@ class ExtractRefineSummary(BaseChat):
yield last_output
def stream_call_reinforce_fn(self, text):
"""return reference"""
"""return summary label"""
return f"<summary>{text}</summary>"
@property

View File

@ -10,7 +10,8 @@ from pilot.scene.chat_knowledge.refine_summary.out_parser import (
CFG = Config()
PROMPT_SCENE_DEFINE = """"""
PROMPT_SCENE_DEFINE = """A chat between a curious user and an artificial intelligence assistant, who very familiar with database related knowledge.
The assistant gives helpful, detailed, professional and polite answers to the user's questions."""
_DEFAULT_TEMPLATE_ZH = """根据提供的上下文信息,我们已经提供了一个到某一点的现有总结:{existing_answer}\n 请根据你之前推理的内容进行最终的总结,并且总结回答的时候最好按照1.2.3.进行总结."""
@ -43,3 +44,4 @@ prompt = PromptTemplate(
)
CFG.prompt_template_registry.register(prompt, is_default=True)
from ..v1 import prompt_chatglm

View File

@ -26,3 +26,28 @@ class ExtractSummaryParser(BaseOutputParser):
def parse_view_response(self, speak, data) -> str:
### tool out data to table view
return data
def parse_model_nostream_resp(self, response: ResponseTye, sep: str) -> str:
### tool out data to table view
resp_obj_ex = _parse_model_response(response)
if isinstance(resp_obj_ex, str):
resp_obj_ex = json.loads(resp_obj_ex)
if resp_obj_ex["error_code"] == 0:
all_text = resp_obj_ex["text"]
tmp_resp = all_text.split(sep)
last_index = -1
for i in range(len(tmp_resp)):
if tmp_resp[i].find("assistant:") != -1:
last_index = i
ai_response = tmp_resp[last_index]
ai_response = ai_response.replace("assistant:", "")
ai_response = ai_response.replace("Assistant:", "")
ai_response = ai_response.replace("ASSISTANT:", "")
ai_response = ai_response.replace("\_", "_")
ai_response = ai_response.replace("\*", "*")
ai_response = ai_response.replace("\t", "")
ai_response = ai_response.strip().replace("\\n", " ").replace("\n", " ")
print("un_stream ai response:", ai_response)
return ai_response
else:
raise ValueError("Model server error!code=" + resp_obj_ex["error_code"])

View File

@ -9,7 +9,8 @@ CFG = Config()
# PROMPT_SCENE_DEFINE = """You are an expert Q&A system that is trusted around the world.\nAlways answer the query using the provided context information, and not prior knowledge.\nSome rules to follow:\n1. Never directly reference the given context in your answer.\n2. Avoid statements like 'Based on the context, ...' or 'The context information ...' or anything along those lines."""
PROMPT_SCENE_DEFINE = """"""
PROMPT_SCENE_DEFINE = """A chat between a curious user and an artificial intelligence assistant, who very familiar with database related knowledge.
The assistant gives helpful, detailed, professional and polite answers to the user's questions."""
_DEFAULT_TEMPLATE_ZH = """请根据提供的上下文信息的进行精简地总结:
{context}
@ -49,3 +50,4 @@ prompt = PromptTemplate(
)
CFG.prompt_template_registry.register(prompt, is_default=True)
from ..v1 import prompt_chatglm

View File

@ -547,7 +547,7 @@ class KnowledgeService:
chat_param = {
"chat_session_id": conn_uid,
"current_user_input": "summary",
"current_user_input": "",
"select_param": doc,
"model_name": model_name,
}
@ -592,7 +592,7 @@ class KnowledgeService:
for doc in docs[0:max_iteration]:
chat_param = {
"chat_session_id": uuid.uuid1(),
"current_user_input": "summary",
"current_user_input": "",
"select_param": doc,
"model_name": model_name,
}