mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-17 07:00:15 +00:00
feat:embedding_engine add text_splitter param
This commit is contained in:
@@ -2,6 +2,7 @@ from typing import Optional
|
||||
|
||||
from chromadb.errors import NotEnoughElementsException
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.text_splitter import TextSplitter
|
||||
|
||||
from pilot.embedding_engine.knowledge_type import get_knowledge_embedding, KnowledgeType
|
||||
from pilot.vector_store.connector import VectorStoreConnector
|
||||
@@ -21,6 +22,7 @@ class EmbeddingEngine:
|
||||
vector_store_config,
|
||||
knowledge_type: Optional[str] = KnowledgeType.DOCUMENT.value,
|
||||
knowledge_source: Optional[str] = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize with knowledge embedding client, model_name, vector_store_config, knowledge_type, knowledge_source"""
|
||||
self.knowledge_source = knowledge_source
|
||||
@@ -29,6 +31,7 @@ class EmbeddingEngine:
|
||||
self.knowledge_type = knowledge_type
|
||||
self.embeddings = HuggingFaceEmbeddings(model_name=self.model_name)
|
||||
self.vector_store_config["embeddings"] = self.embeddings
|
||||
self.text_splitter = text_splitter
|
||||
|
||||
def knowledge_embedding(self):
|
||||
"""source embedding is chain process.read->text_split->data_process->index_store"""
|
||||
@@ -47,7 +50,10 @@ class EmbeddingEngine:
|
||||
|
||||
def init_knowledge_embedding(self):
|
||||
return get_knowledge_embedding(
|
||||
self.knowledge_type, self.knowledge_source, self.vector_store_config
|
||||
self.knowledge_type,
|
||||
self.knowledge_source,
|
||||
self.vector_store_config,
|
||||
self.text_splitter,
|
||||
)
|
||||
|
||||
def similar_search(self, text, topk):
|
||||
|
@@ -40,7 +40,9 @@ class KnowledgeType(Enum):
|
||||
YOUTUBE = "YOUTUBE"
|
||||
|
||||
|
||||
def get_knowledge_embedding(knowledge_type, knowledge_source, vector_store_config):
|
||||
def get_knowledge_embedding(
|
||||
knowledge_type, knowledge_source, vector_store_config, text_splitter
|
||||
):
|
||||
match knowledge_type:
|
||||
case KnowledgeType.DOCUMENT.value:
|
||||
extension = "." + knowledge_source.rsplit(".", 1)[-1]
|
||||
@@ -49,6 +51,7 @@ def get_knowledge_embedding(knowledge_type, knowledge_source, vector_store_confi
|
||||
embedding = knowledge_class(
|
||||
knowledge_source,
|
||||
vector_store_config=vector_store_config,
|
||||
text_splitter=text_splitter,
|
||||
**knowledge_args,
|
||||
)
|
||||
return embedding
|
||||
@@ -57,12 +60,14 @@ def get_knowledge_embedding(knowledge_type, knowledge_source, vector_store_confi
|
||||
embedding = URLEmbedding(
|
||||
file_path=knowledge_source,
|
||||
vector_store_config=vector_store_config,
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
return embedding
|
||||
case KnowledgeType.TEXT.value:
|
||||
embedding = StringEmbedding(
|
||||
file_path=knowledge_source,
|
||||
vector_store_config=vector_store_config,
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
return embedding
|
||||
case KnowledgeType.OSS.value:
|
||||
|
@@ -1,6 +1,8 @@
|
||||
import threading
|
||||
from datetime import datetime
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter, SpacyTextSplitter
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.configs.model_config import LLM_MODEL_CONFIG, KNOWLEDGE_UPLOAD_ROOT_PATH
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
@@ -122,6 +124,24 @@ class KnowledgeService:
|
||||
raise Exception(
|
||||
f" doc:{doc.doc_name} status is {doc.status}, can not sync"
|
||||
)
|
||||
|
||||
if CFG.LANGUAGE == "en":
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=20,
|
||||
length_function=len,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=50
|
||||
)
|
||||
client = EmbeddingEngine(
|
||||
knowledge_source=doc.content,
|
||||
knowledge_type=doc.doc_type.upper(),
|
||||
@@ -131,6 +151,7 @@ class KnowledgeService:
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
},
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
chunk_docs = client.read()
|
||||
# update document status
|
||||
|
Reference in New Issue
Block a user