diff --git a/pilot/scene/chat_knowledge/refine_summary/prompt.py b/pilot/scene/chat_knowledge/refine_summary/prompt.py index c2d7fb2cb..7d795b20a 100644 --- a/pilot/scene/chat_knowledge/refine_summary/prompt.py +++ b/pilot/scene/chat_knowledge/refine_summary/prompt.py @@ -12,11 +12,11 @@ CFG = Config() PROMPT_SCENE_DEFINE = """""" -_DEFAULT_TEMPLATE_ZH = """根据提供的上下文信息,我们已经提供了一个到某一点的现有总结:{existing_answer}\n 请根据你之前推理的内容进行最终的总结,总结的时候可以详细点,回答的时候最好按照1.2.3.进行总结.""" +_DEFAULT_TEMPLATE_ZH = """根据提供的上下文信息,我们已经提供了一个到某一点的现有总结:{existing_answer}\n 请根据你之前推理的内容进行最终的总结,总结的时候需要有关键见解,并且总结回答的时候最好按照1.2.3.进行总结.""" _DEFAULT_TEMPLATE_EN = """ We have provided an existing summary up to a certain point: {existing_answer}\nWe have the opportunity to refine the existing summary (only if needed) with some more context below. -\nBased on the previous reasoning, please summarize the final conclusion in accordance with points 1, 2, and 3. and etc. +\nBased on the previous reasoning, please summarize the final conclusion in accordance with points 1, 2, and 3. and give me a list of bullet points with key insights and the most important facts. """ diff --git a/pilot/scene/chat_knowledge/v1/chat.py b/pilot/scene/chat_knowledge/v1/chat.py index 62301ad12..150a562da 100644 --- a/pilot/scene/chat_knowledge/v1/chat.py +++ b/pilot/scene/chat_knowledge/v1/chat.py @@ -49,7 +49,7 @@ class ChatKnowledge(BaseChat): ) self.max_token = ( CFG.KNOWLEDGE_SEARCH_MAX_TOKEN - if self.space_context is None + if self.space_context is None or self.space_context.get("prompt") is None else int(self.space_context["prompt"]["max_token"]) ) vector_store_config = {