mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-12 13:42:23 +00:00
Merge branch 'dev' into dbgpt_doc
This commit is contained in:
commit
f9366cb395
@ -108,4 +108,4 @@ PROXY_SERVER_URL=http://127.0.0.1:3000/proxy_address
|
|||||||
#*******************************************************************#
|
#*******************************************************************#
|
||||||
# ** SUMMARY_CONFIG
|
# ** SUMMARY_CONFIG
|
||||||
#*******************************************************************#
|
#*******************************************************************#
|
||||||
SUMMARY_CONFIG=VECTOR
|
SUMMARY_CONFIG=FAST
|
@ -56,8 +56,9 @@ class ChatNewKnowledge(BaseChat):
|
|||||||
docs = self.knowledge_embedding_client.similar_search(
|
docs = self.knowledge_embedding_client.similar_search(
|
||||||
self.current_user_input, VECTOR_SEARCH_TOP_K
|
self.current_user_input, VECTOR_SEARCH_TOP_K
|
||||||
)
|
)
|
||||||
docs = docs[:2000]
|
context = [d.page_content for d in docs]
|
||||||
input_values = {"context": docs, "question": self.current_user_input}
|
context = context[:2000]
|
||||||
|
input_values = {"context": context, "question": self.current_user_input}
|
||||||
return input_values
|
return input_values
|
||||||
|
|
||||||
def do_with_prompt_response(self, prompt_response):
|
def do_with_prompt_response(self, prompt_response):
|
||||||
|
@ -17,7 +17,7 @@ from pilot.configs.model_config import (
|
|||||||
VECTOR_SEARCH_TOP_K,
|
VECTOR_SEARCH_TOP_K,
|
||||||
)
|
)
|
||||||
|
|
||||||
from pilot.scene.chat_normal.prompt import prompt
|
from pilot.scene.chat_knowledge.default.prompt import prompt
|
||||||
from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding
|
from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
@ -52,8 +52,9 @@ class ChatDefaultKnowledge(BaseChat):
|
|||||||
docs = self.knowledge_embedding_client.similar_search(
|
docs = self.knowledge_embedding_client.similar_search(
|
||||||
self.current_user_input, VECTOR_SEARCH_TOP_K
|
self.current_user_input, VECTOR_SEARCH_TOP_K
|
||||||
)
|
)
|
||||||
docs = docs[:2000]
|
context = [d.page_content for d in docs]
|
||||||
input_values = {"context": docs, "question": self.current_user_input}
|
context = context[:2000]
|
||||||
|
input_values = {"context": context, "question": self.current_user_input}
|
||||||
return input_values
|
return input_values
|
||||||
|
|
||||||
def do_with_prompt_response(self, prompt_response):
|
def do_with_prompt_response(self, prompt_response):
|
||||||
|
@ -52,14 +52,17 @@ class ChatUrlKnowledge(BaseChat):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# url soruce in vector
|
# url soruce in vector
|
||||||
self.knowledge_embedding_client.knowledge_embedding()
|
if not self.knowledge_embedding_client.vector_exist():
|
||||||
|
self.knowledge_embedding_client.knowledge_embedding()
|
||||||
|
logger.info("url embedding success")
|
||||||
|
|
||||||
def generate_input_values(self):
|
def generate_input_values(self):
|
||||||
docs = self.knowledge_embedding_client.similar_search(
|
docs = self.knowledge_embedding_client.similar_search(
|
||||||
self.current_user_input, VECTOR_SEARCH_TOP_K
|
self.current_user_input, VECTOR_SEARCH_TOP_K
|
||||||
)
|
)
|
||||||
docs = docs[:2000]
|
context = [d.page_content for d in docs]
|
||||||
input_values = {"context": docs, "question": self.current_user_input}
|
context = context[:2000]
|
||||||
|
input_values = {"context": context, "question": self.current_user_input}
|
||||||
return input_values
|
return input_values
|
||||||
|
|
||||||
def do_with_prompt_response(self, prompt_response):
|
def do_with_prompt_response(self, prompt_response):
|
||||||
|
@ -11,11 +11,10 @@ from pilot.scene.chat_normal.out_parser import NormalChatOutputParser
|
|||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
|
|
||||||
_DEFAULT_TEMPLATE = """ 基于以下已知的信息, 专业、简要的回答用户的问题,
|
_DEFAULT_TEMPLATE = """ Based on the known information, provide professional and concise answers to the user's questions. If the answer cannot be obtained from the provided content, please say: 'The information provided in the knowledge base is not sufficient to answer this question.' Fabrication is prohibited.。
|
||||||
如果无法从提供的内容中获取答案, 请说: "知识库中提供的内容不足以回答此问题" 禁止胡乱编造。
|
known information:
|
||||||
已知内容:
|
|
||||||
{context}
|
{context}
|
||||||
问题:
|
question:
|
||||||
{question}
|
{question}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -643,7 +643,7 @@ def knowledge_embedding_store(vs_id, files):
|
|||||||
knowledge_embedding_client.knowledge_embedding()
|
knowledge_embedding_client.knowledge_embedding()
|
||||||
|
|
||||||
logger.info("knowledge embedding success")
|
logger.info("knowledge embedding success")
|
||||||
return os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, vs_id + ".vectordb")
|
return vs_id
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -82,6 +82,9 @@ class KnowledgeEmbedding:
|
|||||||
def similar_search(self, text, topk):
|
def similar_search(self, text, topk):
|
||||||
return self.knowledge_embedding_client.similar_search(text, topk)
|
return self.knowledge_embedding_client.similar_search(text, topk)
|
||||||
|
|
||||||
|
def vector_exist(self):
|
||||||
|
return self.knowledge_embedding_client.vector_name_exist()
|
||||||
|
|
||||||
def knowledge_persist_initialization(self, append_mode):
|
def knowledge_persist_initialization(self, append_mode):
|
||||||
documents = self._load_knownlege(self.file_path)
|
documents = self._load_knownlege(self.file_path)
|
||||||
self.vector_client = VectorStoreConnector(
|
self.vector_client = VectorStoreConnector(
|
||||||
|
Loading…
Reference in New Issue
Block a user