diff --git a/.env.template b/.env.template index b06ef215e..8c5f55ca9 100644 --- a/.env.template +++ b/.env.template @@ -108,4 +108,4 @@ PROXY_SERVER_URL=http://127.0.0.1:3000/proxy_address #*******************************************************************# # ** SUMMARY_CONFIG #*******************************************************************# -SUMMARY_CONFIG=VECTOR \ No newline at end of file +SUMMARY_CONFIG=FAST \ No newline at end of file diff --git a/pilot/scene/chat_knowledge/custom/chat.py b/pilot/scene/chat_knowledge/custom/chat.py index a094b9d6f..7600bab79 100644 --- a/pilot/scene/chat_knowledge/custom/chat.py +++ b/pilot/scene/chat_knowledge/custom/chat.py @@ -56,8 +56,9 @@ class ChatNewKnowledge(BaseChat): docs = self.knowledge_embedding_client.similar_search( self.current_user_input, VECTOR_SEARCH_TOP_K ) - docs = docs[:2000] - input_values = {"context": docs, "question": self.current_user_input} + context = [d.page_content for d in docs] + context = context[:2000] + input_values = {"context": context, "question": self.current_user_input} return input_values def do_with_prompt_response(self, prompt_response): diff --git a/pilot/scene/chat_knowledge/default/chat.py b/pilot/scene/chat_knowledge/default/chat.py index 5d9c3ccf4..1a482b154 100644 --- a/pilot/scene/chat_knowledge/default/chat.py +++ b/pilot/scene/chat_knowledge/default/chat.py @@ -17,7 +17,7 @@ from pilot.configs.model_config import ( VECTOR_SEARCH_TOP_K, ) -from pilot.scene.chat_normal.prompt import prompt +from pilot.scene.chat_knowledge.default.prompt import prompt from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding CFG = Config() @@ -52,8 +52,9 @@ class ChatDefaultKnowledge(BaseChat): docs = self.knowledge_embedding_client.similar_search( self.current_user_input, VECTOR_SEARCH_TOP_K ) - docs = docs[:2000] - input_values = {"context": docs, "question": self.current_user_input} + context = [d.page_content for d in docs] + context = context[:2000] + input_values = {"context": context, "question": self.current_user_input} return input_values def do_with_prompt_response(self, prompt_response): diff --git a/pilot/scene/chat_knowledge/url/chat.py b/pilot/scene/chat_knowledge/url/chat.py index 096df92cb..cc8d89d4a 100644 --- a/pilot/scene/chat_knowledge/url/chat.py +++ b/pilot/scene/chat_knowledge/url/chat.py @@ -52,14 +52,17 @@ class ChatUrlKnowledge(BaseChat): ) # url soruce in vector - self.knowledge_embedding_client.knowledge_embedding() + if not self.knowledge_embedding_client.vector_exist(): + self.knowledge_embedding_client.knowledge_embedding() + logger.info("url embedding success") def generate_input_values(self): docs = self.knowledge_embedding_client.similar_search( self.current_user_input, VECTOR_SEARCH_TOP_K ) - docs = docs[:2000] - input_values = {"context": docs, "question": self.current_user_input} + context = [d.page_content for d in docs] + context = context[:2000] + input_values = {"context": context, "question": self.current_user_input} return input_values def do_with_prompt_response(self, prompt_response): diff --git a/pilot/scene/chat_knowledge/url/prompt.py b/pilot/scene/chat_knowledge/url/prompt.py index 8eaafd61e..20a69d8b2 100644 --- a/pilot/scene/chat_knowledge/url/prompt.py +++ b/pilot/scene/chat_knowledge/url/prompt.py @@ -11,11 +11,10 @@ from pilot.scene.chat_normal.out_parser import NormalChatOutputParser CFG = Config() -_DEFAULT_TEMPLATE = """ 基于以下已知的信息, 专业、简要的回答用户的问题, - 如果无法从提供的内容中获取答案, 请说: "知识库中提供的内容不足以回答此问题" 禁止胡乱编造。 - 已知内容: +_DEFAULT_TEMPLATE = """ Based on the known information, provide professional and concise answers to the user's questions. If the answer cannot be obtained from the provided content, please say: 'The information provided in the knowledge base is not sufficient to answer this question.' Fabrication is prohibited.。 + known information: {context} - 问题: + question: {question} """ diff --git a/pilot/server/webserver.py b/pilot/server/webserver.py index 9b892595a..f7655fd7d 100644 --- a/pilot/server/webserver.py +++ b/pilot/server/webserver.py @@ -643,7 +643,7 @@ def knowledge_embedding_store(vs_id, files): knowledge_embedding_client.knowledge_embedding() logger.info("knowledge embedding success") - return os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, vs_id + ".vectordb") + return vs_id if __name__ == "__main__": diff --git a/pilot/source_embedding/knowledge_embedding.py b/pilot/source_embedding/knowledge_embedding.py index bb5331434..f58742ee9 100644 --- a/pilot/source_embedding/knowledge_embedding.py +++ b/pilot/source_embedding/knowledge_embedding.py @@ -82,6 +82,9 @@ class KnowledgeEmbedding: def similar_search(self, text, topk): return self.knowledge_embedding_client.similar_search(text, topk) + def vector_exist(self): + return self.knowledge_embedding_client.vector_name_exist() + def knowledge_persist_initialization(self, append_mode): documents = self._load_knownlege(self.file_path) self.vector_client = VectorStoreConnector(