diff --git a/pilot/scene/chat_knowledge/custom/chat.py b/pilot/scene/chat_knowledge/custom/chat.py index a094b9d6f..7600bab79 100644 --- a/pilot/scene/chat_knowledge/custom/chat.py +++ b/pilot/scene/chat_knowledge/custom/chat.py @@ -56,8 +56,9 @@ class ChatNewKnowledge(BaseChat): docs = self.knowledge_embedding_client.similar_search( self.current_user_input, VECTOR_SEARCH_TOP_K ) - docs = docs[:2000] - input_values = {"context": docs, "question": self.current_user_input} + context = [d.page_content for d in docs] + context = context[:2000] + input_values = {"context": context, "question": self.current_user_input} return input_values def do_with_prompt_response(self, prompt_response): diff --git a/pilot/scene/chat_knowledge/default/chat.py b/pilot/scene/chat_knowledge/default/chat.py index 5d9c3ccf4..9652ac0ee 100644 --- a/pilot/scene/chat_knowledge/default/chat.py +++ b/pilot/scene/chat_knowledge/default/chat.py @@ -52,8 +52,9 @@ class ChatDefaultKnowledge(BaseChat): docs = self.knowledge_embedding_client.similar_search( self.current_user_input, VECTOR_SEARCH_TOP_K ) - docs = docs[:2000] - input_values = {"context": docs, "question": self.current_user_input} + context = [d.page_content for d in docs] + context = context[:2000] + input_values = {"context": context, "question": self.current_user_input} return input_values def do_with_prompt_response(self, prompt_response): diff --git a/pilot/scene/chat_knowledge/url/chat.py b/pilot/scene/chat_knowledge/url/chat.py index 096df92cb..cc8d89d4a 100644 --- a/pilot/scene/chat_knowledge/url/chat.py +++ b/pilot/scene/chat_knowledge/url/chat.py @@ -52,14 +52,17 @@ class ChatUrlKnowledge(BaseChat): ) # url soruce in vector - self.knowledge_embedding_client.knowledge_embedding() + if not self.knowledge_embedding_client.vector_exist(): + self.knowledge_embedding_client.knowledge_embedding() + logger.info("url embedding success") def generate_input_values(self): docs = self.knowledge_embedding_client.similar_search( self.current_user_input, VECTOR_SEARCH_TOP_K ) - docs = docs[:2000] - input_values = {"context": docs, "question": self.current_user_input} + context = [d.page_content for d in docs] + context = context[:2000] + input_values = {"context": context, "question": self.current_user_input} return input_values def do_with_prompt_response(self, prompt_response): diff --git a/pilot/scene/chat_knowledge/url/prompt.py b/pilot/scene/chat_knowledge/url/prompt.py index 8eaafd61e..20a69d8b2 100644 --- a/pilot/scene/chat_knowledge/url/prompt.py +++ b/pilot/scene/chat_knowledge/url/prompt.py @@ -11,11 +11,10 @@ from pilot.scene.chat_normal.out_parser import NormalChatOutputParser CFG = Config() -_DEFAULT_TEMPLATE = """ 基于以下已知的信息, 专业、简要的回答用户的问题, - 如果无法从提供的内容中获取答案, 请说: "知识库中提供的内容不足以回答此问题" 禁止胡乱编造。 - 已知内容: +_DEFAULT_TEMPLATE = """ Based on the known information, provide professional and concise answers to the user's questions. If the answer cannot be obtained from the provided content, please say: 'The information provided in the knowledge base is not sufficient to answer this question.' Fabrication is prohibited.。 + known information: {context} - 问题: + question: {question} """ diff --git a/pilot/server/webserver.py b/pilot/server/webserver.py index 9b892595a..f7655fd7d 100644 --- a/pilot/server/webserver.py +++ b/pilot/server/webserver.py @@ -643,7 +643,7 @@ def knowledge_embedding_store(vs_id, files): knowledge_embedding_client.knowledge_embedding() logger.info("knowledge embedding success") - return os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, vs_id + ".vectordb") + return vs_id if __name__ == "__main__":