mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-10 13:29:35 +00:00
feature:knowledge embedding update
This commit is contained in:
@@ -1,20 +1,35 @@
|
||||
import os
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from langchain.document_loaders import PyPDFLoader, TextLoader, markdown
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.vectorstores import Chroma
|
||||
from pilot.configs.model_config import DATASETS_DIR
|
||||
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
|
||||
from pilot.source_embedding.csv_embedding import CSVEmbedding
|
||||
from pilot.source_embedding.markdown_embedding import MarkdownEmbedding
|
||||
from pilot.source_embedding.pdf_embedding import PDFEmbedding
|
||||
import markdown
|
||||
|
||||
|
||||
class KnowledgeEmbedding:
|
||||
def __init__(self, file_path, model_name, vector_store_config):
|
||||
def __init__(self, file_path, model_name, vector_store_config, local_persist=True):
|
||||
"""Initialize with Loader url, model_name, vector_store_config"""
|
||||
self.file_path = file_path
|
||||
self.model_name = model_name
|
||||
self.vector_store_config = vector_store_config
|
||||
self.vector_store_type = "default"
|
||||
self.knowledge_embedding_client = self.init_knowledge_embedding()
|
||||
self.embeddings = HuggingFaceEmbeddings(model_name=self.model_name)
|
||||
self.local_persist = local_persist
|
||||
if not self.local_persist:
|
||||
self.knowledge_embedding_client = self.init_knowledge_embedding()
|
||||
|
||||
def knowledge_embedding(self):
|
||||
self.knowledge_embedding_client.source_embedding()
|
||||
|
||||
def knowledge_embedding_batch(self):
|
||||
self.knowledge_embedding_client.batch_embedding()
|
||||
|
||||
def init_knowledge_embedding(self):
|
||||
if self.file_path.endswith(".pdf"):
|
||||
embedding = PDFEmbedding(file_path=self.file_path, model_name=self.model_name,
|
||||
@@ -31,4 +46,65 @@ class KnowledgeEmbedding:
|
||||
return embedding
|
||||
|
||||
def similar_search(self, text, topk):
|
||||
return self.knowledge_embedding_client.similar_search(text, topk)
|
||||
return self.knowledge_embedding_client.similar_search(text, topk)
|
||||
|
||||
def knowledge_persist_initialization(self, append_mode):
|
||||
vector_name = self.vector_store_config["vector_store_name"]
|
||||
persist_dir = os.path.join(self.vector_store_config["vector_store_path"], vector_name + ".vectordb")
|
||||
print("vector db path: ", persist_dir)
|
||||
if os.path.exists(persist_dir):
|
||||
if append_mode:
|
||||
print("append knowledge return vector store")
|
||||
new_documents = self._load_knownlege(self.file_path)
|
||||
vector_store = Chroma.from_documents(documents=new_documents,
|
||||
embedding=self.embeddings,
|
||||
persist_directory=persist_dir)
|
||||
else:
|
||||
print("directly return vector store")
|
||||
vector_store = Chroma(persist_directory=persist_dir, embedding_function=self.embeddings)
|
||||
else:
|
||||
print(vector_name + "is new vector store, knowledge begin load...")
|
||||
documents = self._load_knownlege(self.file_path)
|
||||
vector_store = Chroma.from_documents(documents=documents,
|
||||
embedding=self.embeddings,
|
||||
persist_directory=persist_dir)
|
||||
vector_store.persist()
|
||||
return vector_store
|
||||
|
||||
def _load_knownlege(self, path):
|
||||
docments = []
|
||||
for root, _, files in os.walk(path, topdown=False):
|
||||
for file in files:
|
||||
filename = os.path.join(root, file)
|
||||
docs = self._load_file(filename)
|
||||
new_docs = []
|
||||
for doc in docs:
|
||||
doc.metadata = {"source": doc.metadata["source"].replace(DATASETS_DIR, "")}
|
||||
print("doc is embedding...", doc.metadata)
|
||||
new_docs.append(doc)
|
||||
docments += new_docs
|
||||
return docments
|
||||
|
||||
def _load_file(self, filename):
|
||||
if filename.lower().endswith(".md"):
|
||||
loader = TextLoader(filename)
|
||||
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||
docs = loader.load_and_split(text_splitter)
|
||||
i = 0
|
||||
for d in docs:
|
||||
content = markdown.markdown(d.page_content)
|
||||
soup = BeautifulSoup(content, 'html.parser')
|
||||
for tag in soup(['!doctype', 'meta', 'i.fa']):
|
||||
tag.extract()
|
||||
docs[i].page_content = soup.get_text()
|
||||
docs[i].page_content = docs[i].page_content.replace("\n", " ")
|
||||
i += 1
|
||||
elif filename.lower().endswith(".pdf"):
|
||||
loader = PyPDFLoader(filename)
|
||||
textsplitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||
docs = loader.load_and_split(textsplitter)
|
||||
else:
|
||||
loader = TextLoader(filename)
|
||||
text_splitor = CHNDocumentSplitter(sentence_size=100)
|
||||
docs = loader.load_and_split(text_splitor)
|
||||
return docs
|
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -8,6 +9,7 @@ from langchain.schema import Document
|
||||
import markdown
|
||||
|
||||
from pilot.source_embedding import SourceEmbedding, register
|
||||
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
|
||||
|
||||
|
||||
class MarkdownEmbedding(SourceEmbedding):
|
||||
@@ -24,7 +26,28 @@ class MarkdownEmbedding(SourceEmbedding):
|
||||
def read(self):
|
||||
"""Load from markdown path."""
|
||||
loader = TextLoader(self.file_path)
|
||||
return loader.load()
|
||||
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||
return loader.load_and_split(text_splitter)
|
||||
|
||||
@register
|
||||
def read_batch(self):
|
||||
"""Load from markdown path."""
|
||||
docments = []
|
||||
for root, _, files in os.walk(self.file_path, topdown=False):
|
||||
for file in files:
|
||||
filename = os.path.join(root, file)
|
||||
loader = TextLoader(filename)
|
||||
# text_splitor = CHNDocumentSplitter(chunk_size=1000, chunk_overlap=20, length_function=len)
|
||||
# docs = loader.load_and_split()
|
||||
docs = loader.load()
|
||||
# 更新metadata数据
|
||||
new_docs = []
|
||||
for doc in docs:
|
||||
doc.metadata = {"source": doc.metadata["source"].replace(self.file_path, "")}
|
||||
print("doc is embedding ... ", doc.metadata)
|
||||
new_docs.append(doc)
|
||||
docments += new_docs
|
||||
return docments
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
@@ -35,7 +58,7 @@ class MarkdownEmbedding(SourceEmbedding):
|
||||
for tag in soup(['!doctype', 'meta', 'i.fa']):
|
||||
tag.extract()
|
||||
documents[i].page_content = soup.get_text()
|
||||
documents[i].page_content = documents[i].page_content.replace(" ", "").replace("\n", " ")
|
||||
documents[i].page_content = documents[i].page_content.replace("\n", " ")
|
||||
i += 1
|
||||
return documents
|
||||
|
||||
|
@@ -6,7 +6,7 @@ from langchain.document_loaders import PyPDFLoader
|
||||
from langchain.schema import Document
|
||||
|
||||
from pilot.source_embedding import SourceEmbedding, register
|
||||
from pilot.source_embedding.chinese_text_splitter import ChineseTextSplitter
|
||||
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
|
||||
|
||||
|
||||
class PDFEmbedding(SourceEmbedding):
|
||||
@@ -23,7 +23,7 @@ class PDFEmbedding(SourceEmbedding):
|
||||
def read(self):
|
||||
"""Load from pdf path."""
|
||||
loader = PyPDFLoader(self.file_path)
|
||||
textsplitter = ChineseTextSplitter(pdf=True, sentence_size=100)
|
||||
textsplitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||
return loader.load_and_split(textsplitter)
|
||||
|
||||
@register
|
||||
|
@@ -76,3 +76,15 @@ class SourceEmbedding(ABC):
|
||||
self.text_to_vector(text)
|
||||
if 'index_to_store' in registered_methods:
|
||||
self.index_to_store(text)
|
||||
|
||||
def batch_embedding(self):
|
||||
if 'read_batch' in registered_methods:
|
||||
text = self.read_batch()
|
||||
if 'data_process' in registered_methods:
|
||||
text = self.data_process(text)
|
||||
if 'text_split' in registered_methods:
|
||||
self.text_split(text)
|
||||
if 'text_to_vector' in registered_methods:
|
||||
self.text_to_vector(text)
|
||||
if 'index_to_store' in registered_methods:
|
||||
self.index_to_store(text)
|
||||
|
@@ -1,4 +1,7 @@
|
||||
from typing import List
|
||||
|
||||
from langchain.text_splitter import CharacterTextSplitter
|
||||
|
||||
from pilot.source_embedding import SourceEmbedding, register
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -20,7 +23,8 @@ class URLEmbedding(SourceEmbedding):
|
||||
def read(self):
|
||||
"""Load from url path."""
|
||||
loader = WebBaseLoader(web_path=self.file_path)
|
||||
return loader.load()
|
||||
text_splitor = CharacterTextSplitter(chunk_size=1000, chunk_overlap=20, length_function=len)
|
||||
return loader.load_and_split(text_splitor)
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
Reference in New Issue
Block a user