Files
DB-GPT/pilot/embedding_engine/url_embedding.py
aries_ckt e6aa46fc87 refactor:refactor knowledge api
1.delete CFG in embedding_engine api
2.add a text_splitter param in embedding_engine api
2023-07-11 16:33:48 +08:00

51 lines
1.7 KiB
Python

from typing import List
from bs4 import BeautifulSoup
from langchain.document_loaders import WebBaseLoader
from langchain.schema import Document
from langchain.text_splitter import SpacyTextSplitter, RecursiveCharacterTextSplitter
from pilot.embedding_engine import SourceEmbedding, register
class URLEmbedding(SourceEmbedding):
"""url embedding for read url document."""
def __init__(self, file_path, vector_store_config, text_splitter=None):
"""Initialize url word path."""
super().__init__(file_path, vector_store_config, text_splitter=None)
self.file_path = file_path
self.vector_store_config = vector_store_config
self.text_splitter = text_splitter or None
@register
def read(self):
"""Load from url path."""
loader = WebBaseLoader(web_path=self.file_path)
if self.text_splitter is None:
try:
self.text_splitter = SpacyTextSplitter(
pipeline="zh_core_web_sm",
chunk_size=100,
chunk_overlap=100,
)
except Exception:
self.text_splitter = RecursiveCharacterTextSplitter(
chunk_size=100, chunk_overlap=50
)
return loader.load_and_split(self.text_splitter)
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
content = d.page_content.replace("\n", "")
soup = BeautifulSoup(content, "html.parser")
for tag in soup(["!doctype", "meta"]):
tag.extract()
documents[i].page_content = soup.get_text()
i += 1
return documents