feat:chunk split method replace

chunk split method replace
This commit is contained in:
aries_ckt 2023-06-29 17:26:05 +08:00
parent a50694bf6e
commit aad45a6b70
4 changed files with 57 additions and 25 deletions

View File

@ -6,7 +6,7 @@ from typing import List
import markdown import markdown
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from langchain.schema import Document from langchain.schema import Document
from langchain.text_splitter import SpacyTextSplitter from langchain.text_splitter import SpacyTextSplitter, CharacterTextSplitter
from pilot.configs.config import Config from pilot.configs.config import Config
from pilot.embedding_engine import SourceEmbedding, register from pilot.embedding_engine import SourceEmbedding, register
@ -30,12 +30,20 @@ class MarkdownEmbedding(SourceEmbedding):
def read(self): def read(self):
"""Load from markdown path.""" """Load from markdown path."""
loader = EncodeTextLoader(self.file_path) loader = EncodeTextLoader(self.file_path)
textsplitter = SpacyTextSplitter( # text_splitter = SpacyTextSplitter(
pipeline="zh_core_web_sm", # pipeline="zh_core_web_sm",
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, # chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=100, # chunk_overlap=100,
) # )
return loader.load_and_split(textsplitter) if CFG.LANGUAGE == "en":
text_splitter = CharacterTextSplitter(
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=20,
length_function=len,
)
else:
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=1000)
return loader.load_and_split(text_splitter)
@register @register
def data_process(self, documents: List[Document]): def data_process(self, documents: List[Document]):

View File

@ -4,10 +4,11 @@ from typing import List
from langchain.document_loaders import PyPDFLoader from langchain.document_loaders import PyPDFLoader
from langchain.schema import Document from langchain.schema import Document
from langchain.text_splitter import SpacyTextSplitter from langchain.text_splitter import SpacyTextSplitter, CharacterTextSplitter
from pilot.configs.config import Config from pilot.configs.config import Config
from pilot.embedding_engine import SourceEmbedding, register from pilot.embedding_engine import SourceEmbedding, register
from pilot.embedding_engine.chn_document_splitter import CHNDocumentSplitter
CFG = Config() CFG = Config()
@ -28,12 +29,20 @@ class PDFEmbedding(SourceEmbedding):
# textsplitter = CHNDocumentSplitter( # textsplitter = CHNDocumentSplitter(
# pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE # pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE
# ) # )
textsplitter = SpacyTextSplitter( # textsplitter = SpacyTextSplitter(
pipeline="zh_core_web_sm", # pipeline="zh_core_web_sm",
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, # chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=100, # chunk_overlap=100,
) # )
return loader.load_and_split(textsplitter) if CFG.LANGUAGE == "en":
text_splitter = CharacterTextSplitter(
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=20,
length_function=len,
)
else:
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=1000)
return loader.load_and_split(text_splitter)
@register @register
def data_process(self, documents: List[Document]): def data_process(self, documents: List[Document]):

View File

@ -4,10 +4,11 @@ from typing import List
from langchain.document_loaders import UnstructuredPowerPointLoader from langchain.document_loaders import UnstructuredPowerPointLoader
from langchain.schema import Document from langchain.schema import Document
from langchain.text_splitter import SpacyTextSplitter from langchain.text_splitter import SpacyTextSplitter, CharacterTextSplitter
from pilot.configs.config import Config from pilot.configs.config import Config
from pilot.embedding_engine import SourceEmbedding, register from pilot.embedding_engine import SourceEmbedding, register
from pilot.embedding_engine.chn_document_splitter import CHNDocumentSplitter
CFG = Config() CFG = Config()
@ -25,12 +26,20 @@ class PPTEmbedding(SourceEmbedding):
def read(self): def read(self):
"""Load from ppt path.""" """Load from ppt path."""
loader = UnstructuredPowerPointLoader(self.file_path) loader = UnstructuredPowerPointLoader(self.file_path)
textsplitter = SpacyTextSplitter( # textsplitter = SpacyTextSplitter(
pipeline="zh_core_web_sm", # pipeline="zh_core_web_sm",
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, # chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=200, # chunk_overlap=200,
) # )
return loader.load_and_split(textsplitter) if CFG.LANGUAGE == "en":
text_splitter = CharacterTextSplitter(
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=20,
length_function=len,
)
else:
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=1000)
return loader.load_and_split(text_splitter)
@register @register
def data_process(self, documents: List[Document]): def data_process(self, documents: List[Document]):

View File

@ -4,6 +4,7 @@ from typing import List
from langchain.document_loaders import PyPDFLoader, UnstructuredWordDocumentLoader from langchain.document_loaders import PyPDFLoader, UnstructuredWordDocumentLoader
from langchain.schema import Document from langchain.schema import Document
from langchain.text_splitter import CharacterTextSplitter
from pilot.configs.config import Config from pilot.configs.config import Config
from pilot.embedding_engine import SourceEmbedding, register from pilot.embedding_engine import SourceEmbedding, register
@ -25,10 +26,15 @@ class WordEmbedding(SourceEmbedding):
def read(self): def read(self):
"""Load from word path.""" """Load from word path."""
loader = UnstructuredWordDocumentLoader(self.file_path) loader = UnstructuredWordDocumentLoader(self.file_path)
textsplitter = CHNDocumentSplitter( if CFG.LANGUAGE == "en":
pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE text_splitter = CharacterTextSplitter(
) chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
return loader.load_and_split(textsplitter) chunk_overlap=20,
length_function=len,
)
else:
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=1000)
return loader.load_and_split(text_splitter)
@register @register
def data_process(self, documents: List[Document]): def data_process(self, documents: List[Document]):