fix: custom chat exception (#257)

fix custom chat exception
raise user can understand error
This commit is contained in:
magic.chen 2023-06-20 20:00:36 +08:00 committed by GitHub
commit 31e7ea8df9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 16 additions and 9 deletions

View File

@ -1,3 +1,5 @@
from chromadb.errors import NoIndexException
from pilot.scene.base_chat import BaseChat, logger, headers from pilot.scene.base_chat import BaseChat, logger, headers
from pilot.scene.base import ChatScene from pilot.scene.base import ChatScene
from pilot.common.sql_database import Database from pilot.common.sql_database import Database
@ -50,6 +52,7 @@ class ChatNewKnowledge(BaseChat):
) )
def generate_input_values(self): def generate_input_values(self):
try:
docs = self.knowledge_embedding_client.similar_search( docs = self.knowledge_embedding_client.similar_search(
self.current_user_input, CFG.KNOWLEDGE_SEARCH_TOP_SIZE self.current_user_input, CFG.KNOWLEDGE_SEARCH_TOP_SIZE
) )
@ -57,6 +60,11 @@ class ChatNewKnowledge(BaseChat):
self.metadata = [d.metadata for d in docs] self.metadata = [d.metadata for d in docs]
context = context[:2000] context = context[:2000]
input_values = {"context": context, "question": self.current_user_input} input_values = {"context": context, "question": self.current_user_input}
except NoIndexException:
raise ValueError(
f"you have no {self.knowledge_name} knowledge store, please upload your knowledge"
)
return input_values return input_values
def do_with_prompt_response(self, prompt_response): def do_with_prompt_response(self, prompt_response):

View File

@ -25,7 +25,6 @@ from pilot.configs.config import Config
from pilot.configs.model_config import ( from pilot.configs.model_config import (
DATASETS_DIR, DATASETS_DIR,
KNOWLEDGE_UPLOAD_ROOT_PATH, KNOWLEDGE_UPLOAD_ROOT_PATH,
LLM_MODEL_CONFIG,
LOGDIR, LOGDIR,
) )
@ -632,7 +631,7 @@ def knowledge_embedding_store(vs_id, files):
) )
knowledge_embedding_client = KnowledgeEmbedding( knowledge_embedding_client = KnowledgeEmbedding(
file_path=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename), file_path=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename),
model_name=LLM_MODEL_CONFIG["text2vec"], model_name=CFG.EMBEDDING_MODEL,
vector_store_config={ vector_store_config={
"vector_store_name": vector_store_name["vs_name"], "vector_store_name": vector_store_name["vs_name"],
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH, "vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,