mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-02 00:28:00 +00:00
fix: merge embedding
This commit is contained in:
commit
85cb42f5f0
3
.gitignore
vendored
3
.gitignore
vendored
@ -136,4 +136,5 @@ dmypy.json
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
logs
|
logs
|
||||||
nltk_data
|
nltk_data
|
||||||
.vectordb
|
.vectordb
|
||||||
|
pilot/data/
|
@ -20,8 +20,10 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|||||||
LLM_MODEL_CONFIG = {
|
LLM_MODEL_CONFIG = {
|
||||||
"flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"),
|
"flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"),
|
||||||
"vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"),
|
"vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"),
|
||||||
|
"text2vec": os.path.join(MODEL_PATH, "text2vec-large-chinese"),
|
||||||
"sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2")
|
"sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2")
|
||||||
}
|
}
|
||||||
|
|
||||||
# Load model config
|
# Load model config
|
||||||
ISLOAD_8BIT = True
|
ISLOAD_8BIT = True
|
||||||
ISDEBUG = False
|
ISDEBUG = False
|
||||||
@ -29,4 +31,4 @@ ISDEBUG = False
|
|||||||
|
|
||||||
VECTOR_SEARCH_TOP_K = 3
|
VECTOR_SEARCH_TOP_K = 3
|
||||||
VS_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "vs_store")
|
VS_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "vs_store")
|
||||||
KNOWLEDGE_UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "knowledge")
|
KNOWLEDGE_UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
|
@ -247,6 +247,13 @@ conv_qa_prompt_template = """ 基于以下已知的信息, 专业、简要的回
|
|||||||
{question}
|
{question}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# conv_qa_prompt_template = """ Please provide the known information so that I can professionally and briefly answer the user's question. If the answer cannot be obtained from the provided content,
|
||||||
|
# please say: "The information provided in the knowledge base is insufficient to answer this question." Fabrication is prohibited.。
|
||||||
|
# known information:
|
||||||
|
# {context}
|
||||||
|
# question:
|
||||||
|
# {question}
|
||||||
|
# """
|
||||||
default_conversation = conv_one_shot
|
default_conversation = conv_one_shot
|
||||||
|
|
||||||
conversation_sql_mode ={
|
conversation_sql_mode ={
|
||||||
|
@ -19,7 +19,7 @@ from langchain import PromptTemplate
|
|||||||
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
sys.path.append(ROOT_PATH)
|
sys.path.append(ROOT_PATH)
|
||||||
|
|
||||||
from pilot.configs.model_config import KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG
|
from pilot.configs.model_config import DB_SETTINGS, KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG, TOP_RETURN_SIZE
|
||||||
from pilot.server.vectordb_qa import KnownLedgeBaseQA
|
from pilot.server.vectordb_qa import KnownLedgeBaseQA
|
||||||
from pilot.connections.mysql import MySQLOperator
|
from pilot.connections.mysql import MySQLOperator
|
||||||
from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding
|
from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding
|
||||||
@ -256,11 +256,13 @@ def http_bot(state, mode, sql_mode, db_selector, temperature, max_new_tokens, re
|
|||||||
|
|
||||||
if mode == conversation_types["custome"] and not db_selector:
|
if mode == conversation_types["custome"] and not db_selector:
|
||||||
persist_dir = os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vector_store_name["vs_name"] + ".vectordb")
|
persist_dir = os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vector_store_name["vs_name"] + ".vectordb")
|
||||||
print("向量数据库持久化地址: ", persist_dir)
|
print("vector store path: ", persist_dir)
|
||||||
knowledge_embedding_client = KnowledgeEmbedding(file_path="", model_name=LLM_MODEL_CONFIG["sentence-transforms"], vector_store_config={"vector_store_name": vector_store_name["vs_name"],
|
knowledge_embedding_client = KnowledgeEmbedding(file_path="", model_name=LLM_MODEL_CONFIG["text2vec"],
|
||||||
|
local_persist=False,
|
||||||
|
vector_store_config={"vector_store_name": vector_store_name["vs_name"],
|
||||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
||||||
query = state.messages[-2][1]
|
query = state.messages[-2][1]
|
||||||
docs = knowledge_embedding_client.similar_search(query, 1)
|
docs = knowledge_embedding_client.similar_search(query, TOP_RETURN_SIZE)
|
||||||
context = [d.page_content for d in docs]
|
context = [d.page_content for d in docs]
|
||||||
prompt_template = PromptTemplate(
|
prompt_template = PromptTemplate(
|
||||||
template=conv_qa_prompt_template,
|
template=conv_qa_prompt_template,
|
||||||
@ -269,6 +271,20 @@ def http_bot(state, mode, sql_mode, db_selector, temperature, max_new_tokens, re
|
|||||||
result = prompt_template.format(context="\n".join(context), question=query)
|
result = prompt_template.format(context="\n".join(context), question=query)
|
||||||
state.messages[-2][1] = result
|
state.messages[-2][1] = result
|
||||||
prompt = state.get_prompt()
|
prompt = state.get_prompt()
|
||||||
|
print("prompt length:" + str(len(prompt)))
|
||||||
|
|
||||||
|
if len(prompt) > 4000:
|
||||||
|
logger.info("prompt length greater than 4000, rebuild")
|
||||||
|
context = context[:2000]
|
||||||
|
prompt_template = PromptTemplate(
|
||||||
|
template=conv_qa_prompt_template,
|
||||||
|
input_variables=["context", "question"]
|
||||||
|
)
|
||||||
|
result = prompt_template.format(context="\n".join(context), question=query)
|
||||||
|
state.messages[-2][1] = result
|
||||||
|
prompt = state.get_prompt()
|
||||||
|
print("new prompt length:" + str(len(prompt)))
|
||||||
|
|
||||||
state.messages[-2][1] = query
|
state.messages[-2][1] = query
|
||||||
skip_echo_len = len(prompt.replace("</s>", " ")) + 1
|
skip_echo_len = len(prompt.replace("</s>", " ")) + 1
|
||||||
|
|
||||||
@ -435,7 +451,7 @@ def build_single_model_ui():
|
|||||||
max_output_tokens = gr.Slider(
|
max_output_tokens = gr.Slider(
|
||||||
minimum=0,
|
minimum=0,
|
||||||
maximum=1024,
|
maximum=1024,
|
||||||
value=1024,
|
value=512,
|
||||||
step=64,
|
step=64,
|
||||||
interactive=True,
|
interactive=True,
|
||||||
label="最大输出Token数",
|
label="最大输出Token数",
|
||||||
@ -585,7 +601,8 @@ def knowledge_embedding_store(vs_id, files):
|
|||||||
shutil.move(file.name, os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename))
|
shutil.move(file.name, os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename))
|
||||||
knowledge_embedding_client = KnowledgeEmbedding(
|
knowledge_embedding_client = KnowledgeEmbedding(
|
||||||
file_path=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename),
|
file_path=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename),
|
||||||
model_name=LLM_MODEL_CONFIG["sentence-transforms"],
|
model_name=LLM_MODEL_CONFIG["text2vec"],
|
||||||
|
local_persist=False,
|
||||||
vector_store_config={
|
vector_store_config={
|
||||||
"vector_store_name": vector_store_name["vs_name"],
|
"vector_store_name": vector_store_name["vs_name"],
|
||||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
||||||
@ -610,8 +627,7 @@ if __name__ == "__main__":
|
|||||||
# 配置初始化
|
# 配置初始化
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
dbs = get_database_list()
|
# dbs = get_database_list()
|
||||||
|
|
||||||
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
|
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
|
||||||
|
|
||||||
# 加载插件可执行命令
|
# 加载插件可执行命令
|
||||||
|
59
pilot/source_embedding/chn_document_splitter.py
Normal file
59
pilot/source_embedding/chn_document_splitter.py
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
import re
|
||||||
|
from typing import List
|
||||||
|
from langchain.text_splitter import CharacterTextSplitter
|
||||||
|
|
||||||
|
|
||||||
|
class CHNDocumentSplitter(CharacterTextSplitter):
|
||||||
|
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self.pdf = pdf
|
||||||
|
self.sentence_size = sentence_size
|
||||||
|
|
||||||
|
# def split_text_version2(self, text: str) -> List[str]:
|
||||||
|
# if self.pdf:
|
||||||
|
# text = re.sub(r"\n{3,}", "\n", text)
|
||||||
|
# text = re.sub('\s', ' ', text)
|
||||||
|
# text = text.replace("\n\n", "")
|
||||||
|
# sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
|
||||||
|
# sent_list = []
|
||||||
|
# for ele in sent_sep_pattern.split(text):
|
||||||
|
# if sent_sep_pattern.match(ele) and sent_list:
|
||||||
|
# sent_list[-1] += ele
|
||||||
|
# elif ele:
|
||||||
|
# sent_list.append(ele)
|
||||||
|
# return sent_list
|
||||||
|
|
||||||
|
def split_text(self, text: str) -> List[str]:
|
||||||
|
if self.pdf:
|
||||||
|
text = re.sub(r"\n{3,}", r"\n", text)
|
||||||
|
text = re.sub('\s', " ", text)
|
||||||
|
text = re.sub("\n\n", "", text)
|
||||||
|
|
||||||
|
text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
|
||||||
|
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
|
||||||
|
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
|
||||||
|
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
|
||||||
|
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
|
||||||
|
text = text.rstrip() # 段尾如果有多余的\n就去掉它
|
||||||
|
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
|
||||||
|
ls = [i for i in text.split("\n") if i]
|
||||||
|
for ele in ls:
|
||||||
|
if len(ele) > self.sentence_size:
|
||||||
|
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
|
||||||
|
ele1_ls = ele1.split("\n")
|
||||||
|
for ele_ele1 in ele1_ls:
|
||||||
|
if len(ele_ele1) > self.sentence_size:
|
||||||
|
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
|
||||||
|
ele2_ls = ele_ele2.split("\n")
|
||||||
|
for ele_ele2 in ele2_ls:
|
||||||
|
if len(ele_ele2) > self.sentence_size:
|
||||||
|
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
|
||||||
|
ele2_id = ele2_ls.index(ele_ele2)
|
||||||
|
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
|
||||||
|
ele2_id + 1:]
|
||||||
|
ele_id = ele1_ls.index(ele_ele1)
|
||||||
|
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
|
||||||
|
|
||||||
|
id = ls.index(ele)
|
||||||
|
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
|
||||||
|
return ls
|
@ -1,20 +1,35 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from langchain.document_loaders import PyPDFLoader, TextLoader, markdown
|
||||||
|
from langchain.embeddings import HuggingFaceEmbeddings
|
||||||
|
from langchain.vectorstores import Chroma
|
||||||
|
from pilot.configs.model_config import DATASETS_DIR
|
||||||
|
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
|
||||||
from pilot.source_embedding.csv_embedding import CSVEmbedding
|
from pilot.source_embedding.csv_embedding import CSVEmbedding
|
||||||
from pilot.source_embedding.markdown_embedding import MarkdownEmbedding
|
from pilot.source_embedding.markdown_embedding import MarkdownEmbedding
|
||||||
from pilot.source_embedding.pdf_embedding import PDFEmbedding
|
from pilot.source_embedding.pdf_embedding import PDFEmbedding
|
||||||
|
import markdown
|
||||||
|
|
||||||
|
|
||||||
class KnowledgeEmbedding:
|
class KnowledgeEmbedding:
|
||||||
def __init__(self, file_path, model_name, vector_store_config):
|
def __init__(self, file_path, model_name, vector_store_config, local_persist=True):
|
||||||
"""Initialize with Loader url, model_name, vector_store_config"""
|
"""Initialize with Loader url, model_name, vector_store_config"""
|
||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
self.vector_store_config = vector_store_config
|
self.vector_store_config = vector_store_config
|
||||||
self.vector_store_type = "default"
|
self.vector_store_type = "default"
|
||||||
self.knowledge_embedding_client = self.init_knowledge_embedding()
|
self.embeddings = HuggingFaceEmbeddings(model_name=self.model_name)
|
||||||
|
self.local_persist = local_persist
|
||||||
|
if not self.local_persist:
|
||||||
|
self.knowledge_embedding_client = self.init_knowledge_embedding()
|
||||||
|
|
||||||
def knowledge_embedding(self):
|
def knowledge_embedding(self):
|
||||||
self.knowledge_embedding_client.source_embedding()
|
self.knowledge_embedding_client.source_embedding()
|
||||||
|
|
||||||
|
def knowledge_embedding_batch(self):
|
||||||
|
self.knowledge_embedding_client.batch_embedding()
|
||||||
|
|
||||||
def init_knowledge_embedding(self):
|
def init_knowledge_embedding(self):
|
||||||
if self.file_path.endswith(".pdf"):
|
if self.file_path.endswith(".pdf"):
|
||||||
embedding = PDFEmbedding(file_path=self.file_path, model_name=self.model_name,
|
embedding = PDFEmbedding(file_path=self.file_path, model_name=self.model_name,
|
||||||
@ -31,4 +46,65 @@ class KnowledgeEmbedding:
|
|||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
def similar_search(self, text, topk):
|
def similar_search(self, text, topk):
|
||||||
return self.knowledge_embedding_client.similar_search(text, topk)
|
return self.knowledge_embedding_client.similar_search(text, topk)
|
||||||
|
|
||||||
|
def knowledge_persist_initialization(self, append_mode):
|
||||||
|
vector_name = self.vector_store_config["vector_store_name"]
|
||||||
|
persist_dir = os.path.join(self.vector_store_config["vector_store_path"], vector_name + ".vectordb")
|
||||||
|
print("vector db path: ", persist_dir)
|
||||||
|
if os.path.exists(persist_dir):
|
||||||
|
if append_mode:
|
||||||
|
print("append knowledge return vector store")
|
||||||
|
new_documents = self._load_knownlege(self.file_path)
|
||||||
|
vector_store = Chroma.from_documents(documents=new_documents,
|
||||||
|
embedding=self.embeddings,
|
||||||
|
persist_directory=persist_dir)
|
||||||
|
else:
|
||||||
|
print("directly return vector store")
|
||||||
|
vector_store = Chroma(persist_directory=persist_dir, embedding_function=self.embeddings)
|
||||||
|
else:
|
||||||
|
print(vector_name + "is new vector store, knowledge begin load...")
|
||||||
|
documents = self._load_knownlege(self.file_path)
|
||||||
|
vector_store = Chroma.from_documents(documents=documents,
|
||||||
|
embedding=self.embeddings,
|
||||||
|
persist_directory=persist_dir)
|
||||||
|
vector_store.persist()
|
||||||
|
return vector_store
|
||||||
|
|
||||||
|
def _load_knownlege(self, path):
|
||||||
|
docments = []
|
||||||
|
for root, _, files in os.walk(path, topdown=False):
|
||||||
|
for file in files:
|
||||||
|
filename = os.path.join(root, file)
|
||||||
|
docs = self._load_file(filename)
|
||||||
|
new_docs = []
|
||||||
|
for doc in docs:
|
||||||
|
doc.metadata = {"source": doc.metadata["source"].replace(DATASETS_DIR, "")}
|
||||||
|
print("doc is embedding...", doc.metadata)
|
||||||
|
new_docs.append(doc)
|
||||||
|
docments += new_docs
|
||||||
|
return docments
|
||||||
|
|
||||||
|
def _load_file(self, filename):
|
||||||
|
if filename.lower().endswith(".md"):
|
||||||
|
loader = TextLoader(filename)
|
||||||
|
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||||
|
docs = loader.load_and_split(text_splitter)
|
||||||
|
i = 0
|
||||||
|
for d in docs:
|
||||||
|
content = markdown.markdown(d.page_content)
|
||||||
|
soup = BeautifulSoup(content, 'html.parser')
|
||||||
|
for tag in soup(['!doctype', 'meta', 'i.fa']):
|
||||||
|
tag.extract()
|
||||||
|
docs[i].page_content = soup.get_text()
|
||||||
|
docs[i].page_content = docs[i].page_content.replace("\n", " ")
|
||||||
|
i += 1
|
||||||
|
elif filename.lower().endswith(".pdf"):
|
||||||
|
loader = PyPDFLoader(filename)
|
||||||
|
textsplitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||||
|
docs = loader.load_and_split(textsplitter)
|
||||||
|
else:
|
||||||
|
loader = TextLoader(filename)
|
||||||
|
text_splitor = CHNDocumentSplitter(sentence_size=100)
|
||||||
|
docs = loader.load_and_split(text_splitor)
|
||||||
|
return docs
|
@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
import os
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
@ -8,6 +9,7 @@ from langchain.schema import Document
|
|||||||
import markdown
|
import markdown
|
||||||
|
|
||||||
from pilot.source_embedding import SourceEmbedding, register
|
from pilot.source_embedding import SourceEmbedding, register
|
||||||
|
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
|
||||||
|
|
||||||
|
|
||||||
class MarkdownEmbedding(SourceEmbedding):
|
class MarkdownEmbedding(SourceEmbedding):
|
||||||
@ -24,7 +26,28 @@ class MarkdownEmbedding(SourceEmbedding):
|
|||||||
def read(self):
|
def read(self):
|
||||||
"""Load from markdown path."""
|
"""Load from markdown path."""
|
||||||
loader = TextLoader(self.file_path)
|
loader = TextLoader(self.file_path)
|
||||||
return loader.load()
|
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||||
|
return loader.load_and_split(text_splitter)
|
||||||
|
|
||||||
|
@register
|
||||||
|
def read_batch(self):
|
||||||
|
"""Load from markdown path."""
|
||||||
|
docments = []
|
||||||
|
for root, _, files in os.walk(self.file_path, topdown=False):
|
||||||
|
for file in files:
|
||||||
|
filename = os.path.join(root, file)
|
||||||
|
loader = TextLoader(filename)
|
||||||
|
# text_splitor = CHNDocumentSplitter(chunk_size=1000, chunk_overlap=20, length_function=len)
|
||||||
|
# docs = loader.load_and_split()
|
||||||
|
docs = loader.load()
|
||||||
|
# 更新metadata数据
|
||||||
|
new_docs = []
|
||||||
|
for doc in docs:
|
||||||
|
doc.metadata = {"source": doc.metadata["source"].replace(self.file_path, "")}
|
||||||
|
print("doc is embedding ... ", doc.metadata)
|
||||||
|
new_docs.append(doc)
|
||||||
|
docments += new_docs
|
||||||
|
return docments
|
||||||
|
|
||||||
@register
|
@register
|
||||||
def data_process(self, documents: List[Document]):
|
def data_process(self, documents: List[Document]):
|
||||||
@ -35,7 +58,7 @@ class MarkdownEmbedding(SourceEmbedding):
|
|||||||
for tag in soup(['!doctype', 'meta', 'i.fa']):
|
for tag in soup(['!doctype', 'meta', 'i.fa']):
|
||||||
tag.extract()
|
tag.extract()
|
||||||
documents[i].page_content = soup.get_text()
|
documents[i].page_content = soup.get_text()
|
||||||
documents[i].page_content = documents[i].page_content.replace(" ", "").replace("\n", " ")
|
documents[i].page_content = documents[i].page_content.replace("\n", " ")
|
||||||
i += 1
|
i += 1
|
||||||
return documents
|
return documents
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ from langchain.document_loaders import PyPDFLoader
|
|||||||
from langchain.schema import Document
|
from langchain.schema import Document
|
||||||
|
|
||||||
from pilot.source_embedding import SourceEmbedding, register
|
from pilot.source_embedding import SourceEmbedding, register
|
||||||
|
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
|
||||||
|
|
||||||
|
|
||||||
class PDFEmbedding(SourceEmbedding):
|
class PDFEmbedding(SourceEmbedding):
|
||||||
@ -17,20 +18,19 @@ class PDFEmbedding(SourceEmbedding):
|
|||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
self.vector_store_config = vector_store_config
|
self.vector_store_config = vector_store_config
|
||||||
# SourceEmbedding(file_path =file_path, );
|
|
||||||
SourceEmbedding(file_path, model_name, vector_store_config)
|
|
||||||
|
|
||||||
@register
|
@register
|
||||||
def read(self):
|
def read(self):
|
||||||
"""Load from pdf path."""
|
"""Load from pdf path."""
|
||||||
loader = PyPDFLoader(self.file_path)
|
loader = PyPDFLoader(self.file_path)
|
||||||
return loader.load()
|
textsplitter = CHNDocumentSplitter(pdf=True, sentence_size=100)
|
||||||
|
return loader.load_and_split(textsplitter)
|
||||||
|
|
||||||
@register
|
@register
|
||||||
def data_process(self, documents: List[Document]):
|
def data_process(self, documents: List[Document]):
|
||||||
i = 0
|
i = 0
|
||||||
for d in documents:
|
for d in documents:
|
||||||
documents[i].page_content = d.page_content.replace(" ", "").replace("\n", "")
|
documents[i].page_content = d.page_content.replace("\n", "")
|
||||||
i += 1
|
i += 1
|
||||||
return documents
|
return documents
|
||||||
|
|
||||||
|
53
pilot/source_embedding/pdf_loader.py
Normal file
53
pilot/source_embedding/pdf_loader.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
"""Loader that loads image files."""
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||||
|
from paddleocr import PaddleOCR
|
||||||
|
import os
|
||||||
|
import fitz
|
||||||
|
|
||||||
|
|
||||||
|
class UnstructuredPaddlePDFLoader(UnstructuredFileLoader):
|
||||||
|
"""Loader that uses unstructured to load image files, such as PNGs and JPGs."""
|
||||||
|
|
||||||
|
def _get_elements(self) -> List:
|
||||||
|
def pdf_ocr_txt(filepath, dir_path="tmp_files"):
|
||||||
|
full_dir_path = os.path.join(os.path.dirname(filepath), dir_path)
|
||||||
|
if not os.path.exists(full_dir_path):
|
||||||
|
os.makedirs(full_dir_path)
|
||||||
|
filename = os.path.split(filepath)[-1]
|
||||||
|
ocr = PaddleOCR(lang="ch", use_gpu=False, show_log=False)
|
||||||
|
doc = fitz.open(filepath)
|
||||||
|
txt_file_path = os.path.join(full_dir_path, "%s.txt" % (filename))
|
||||||
|
img_name = os.path.join(full_dir_path, '.tmp.png')
|
||||||
|
with open(txt_file_path, 'w', encoding='utf-8') as fout:
|
||||||
|
|
||||||
|
for i in range(doc.page_count):
|
||||||
|
page = doc[i]
|
||||||
|
text = page.get_text("")
|
||||||
|
fout.write(text)
|
||||||
|
fout.write("\n")
|
||||||
|
|
||||||
|
img_list = page.get_images()
|
||||||
|
for img in img_list:
|
||||||
|
pix = fitz.Pixmap(doc, img[0])
|
||||||
|
|
||||||
|
pix.save(img_name)
|
||||||
|
|
||||||
|
result = ocr.ocr(img_name)
|
||||||
|
ocr_result = [i[1][0] for line in result for i in line]
|
||||||
|
fout.write("\n".join(ocr_result))
|
||||||
|
os.remove(img_name)
|
||||||
|
return txt_file_path
|
||||||
|
|
||||||
|
txt_file_path = pdf_ocr_txt(self.file_path)
|
||||||
|
from unstructured.partition.text import partition_text
|
||||||
|
return partition_text(filename=txt_file_path, **self.unstructured_kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
filepath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "content", "samples", "test.pdf")
|
||||||
|
loader = UnstructuredPaddlePDFLoader(filepath, mode="elements")
|
||||||
|
docs = loader.load()
|
||||||
|
for doc in docs:
|
||||||
|
print(doc)
|
@ -76,3 +76,15 @@ class SourceEmbedding(ABC):
|
|||||||
self.text_to_vector(text)
|
self.text_to_vector(text)
|
||||||
if 'index_to_store' in registered_methods:
|
if 'index_to_store' in registered_methods:
|
||||||
self.index_to_store(text)
|
self.index_to_store(text)
|
||||||
|
|
||||||
|
def batch_embedding(self):
|
||||||
|
if 'read_batch' in registered_methods:
|
||||||
|
text = self.read_batch()
|
||||||
|
if 'data_process' in registered_methods:
|
||||||
|
text = self.data_process(text)
|
||||||
|
if 'text_split' in registered_methods:
|
||||||
|
self.text_split(text)
|
||||||
|
if 'text_to_vector' in registered_methods:
|
||||||
|
self.text_to_vector(text)
|
||||||
|
if 'index_to_store' in registered_methods:
|
||||||
|
self.index_to_store(text)
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
from langchain.text_splitter import CharacterTextSplitter
|
||||||
|
|
||||||
from pilot.source_embedding import SourceEmbedding, register
|
from pilot.source_embedding import SourceEmbedding, register
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
@ -20,7 +23,8 @@ class URLEmbedding(SourceEmbedding):
|
|||||||
def read(self):
|
def read(self):
|
||||||
"""Load from url path."""
|
"""Load from url path."""
|
||||||
loader = WebBaseLoader(web_path=self.file_path)
|
loader = WebBaseLoader(web_path=self.file_path)
|
||||||
return loader.load()
|
text_splitor = CharacterTextSplitter(chunk_size=1000, chunk_overlap=20, length_function=len)
|
||||||
|
return loader.load_and_split(text_splitor)
|
||||||
|
|
||||||
@register
|
@register
|
||||||
def data_process(self, documents: List[Document]):
|
def data_process(self, documents: List[Document]):
|
||||||
|
@ -48,12 +48,16 @@ class KnownLedge2Vector:
|
|||||||
# vector_store.add_documents(documents=documents)
|
# vector_store.add_documents(documents=documents)
|
||||||
else:
|
else:
|
||||||
documents = self.load_knownlege()
|
documents = self.load_knownlege()
|
||||||
|
<<<<<<< HEAD
|
||||||
# reinit
|
# reinit
|
||||||
|
=======
|
||||||
|
# reinit
|
||||||
|
>>>>>>> 31797ecdb53eff76cceb52454888c91c97572851
|
||||||
vector_store = Chroma.from_documents(documents=documents,
|
vector_store = Chroma.from_documents(documents=documents,
|
||||||
embedding=self.embeddings,
|
embedding=self.embeddings,
|
||||||
persist_directory=persist_dir)
|
persist_directory=persist_dir)
|
||||||
vector_store.persist()
|
vector_store.persist()
|
||||||
return vector_store
|
return vector_store
|
||||||
|
|
||||||
def load_knownlege(self):
|
def load_knownlege(self):
|
||||||
docments = []
|
docments = []
|
||||||
@ -61,7 +65,11 @@ class KnownLedge2Vector:
|
|||||||
for file in files:
|
for file in files:
|
||||||
filename = os.path.join(root, file)
|
filename = os.path.join(root, file)
|
||||||
docs = self._load_file(filename)
|
docs = self._load_file(filename)
|
||||||
|
<<<<<<< HEAD
|
||||||
# update metadata.
|
# update metadata.
|
||||||
|
=======
|
||||||
|
# update metadata.
|
||||||
|
>>>>>>> 31797ecdb53eff76cceb52454888c91c97572851
|
||||||
new_docs = []
|
new_docs = []
|
||||||
for doc in docs:
|
for doc in docs:
|
||||||
doc.metadata = {"source": doc.metadata["source"].replace(DATASETS_DIR, "")}
|
doc.metadata = {"source": doc.metadata["source"].replace(DATASETS_DIR, "")}
|
||||||
|
@ -75,4 +75,5 @@ chromadb
|
|||||||
markdown2
|
markdown2
|
||||||
colorama
|
colorama
|
||||||
playsound
|
playsound
|
||||||
distro
|
distro
|
||||||
|
pypdf
|
Loading…
Reference in New Issue
Block a user