fix(LLM extractor): handle empty response in LLM extractor (#2480)

Co-authored-by: tam <tanwe@fulan.com.cn>
This commit is contained in:
Tam
2025-03-18 11:53:41 +08:00
committed by GitHub
parent f209b4c643
commit 88bbd695d0
2 changed files with 5 additions and 2 deletions

View File

@@ -99,7 +99,7 @@ class KnowledgeService:
if request.vector_type == "VectorStore":
request.vector_type = self.rag_config.storage.vector.get_type_value()
if request.vector_type == "KnowledgeGraph":
knowledge_space_name_pattern = r"^[a-zA-Z0-9\u4e00-\u9fa5]+$"
knowledge_space_name_pattern = r"^[_a-zA-Z0-9\u4e00-\u9fa5]+$"
if not re.match(knowledge_space_name_pattern, request.name):
raise Exception(f"space name:{request.name} invalid")
spaces = knowledge_space_dao.get_knowledge_space(query)

View File

@@ -84,7 +84,10 @@ class LLMExtractor(ExtractorBase, ABC):
logger.error(f"request llm failed ({code}) {reason}")
return []
return self._parse_response(response.text, limit)
if response.has_text:
return self._parse_response(response.text, limit)
else:
return []
def truncate(self):
"""Do nothing by default."""