diff --git a/.gitignore b/.gitignore index cb21ee557..5043f7db0 100644 --- a/.gitignore +++ b/.gitignore @@ -136,4 +136,5 @@ dmypy.json .DS_Store logs nltk_data -.vectordb \ No newline at end of file +.vectordb +pilot/data/ \ No newline at end of file diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py index ad81a6e79..2a4f5eac4 100644 --- a/pilot/configs/model_config.py +++ b/pilot/configs/model_config.py @@ -20,8 +20,10 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu" LLM_MODEL_CONFIG = { "flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"), "vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"), + "text2vec": os.path.join(MODEL_PATH, "text2vec-large-chinese"), "sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2") } + # Load model config ISLOAD_8BIT = True ISDEBUG = False @@ -29,4 +31,4 @@ ISDEBUG = False VECTOR_SEARCH_TOP_K = 3 VS_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "vs_store") -KNOWLEDGE_UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "knowledge") \ No newline at end of file +KNOWLEDGE_UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data") \ No newline at end of file diff --git a/pilot/conversation.py b/pilot/conversation.py index 7054fb453..7f526fb89 100644 --- a/pilot/conversation.py +++ b/pilot/conversation.py @@ -247,6 +247,13 @@ conv_qa_prompt_template = """ 基于以下已知的信息, 专业、简要的回 {question} """ +# conv_qa_prompt_template = """ Please provide the known information so that I can professionally and briefly answer the user's question. If the answer cannot be obtained from the provided content, +# please say: "The information provided in the knowledge base is insufficient to answer this question." Fabrication is prohibited.。 +# known information: +# {context} +# question: +# {question} +# """ default_conversation = conv_one_shot conversation_sql_mode ={ diff --git a/pilot/server/webserver.py b/pilot/server/webserver.py index 233655381..66f375d0d 100644 --- a/pilot/server/webserver.py +++ b/pilot/server/webserver.py @@ -19,7 +19,7 @@ from langchain import PromptTemplate ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(ROOT_PATH) -from pilot.configs.model_config import KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG +from pilot.configs.model_config import DB_SETTINGS, KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG, TOP_RETURN_SIZE from pilot.server.vectordb_qa import KnownLedgeBaseQA from pilot.connections.mysql import MySQLOperator from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding @@ -256,11 +256,13 @@ def http_bot(state, mode, sql_mode, db_selector, temperature, max_new_tokens, re if mode == conversation_types["custome"] and not db_selector: persist_dir = os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vector_store_name["vs_name"] + ".vectordb") - print("向量数据库持久化地址: ", persist_dir) - knowledge_embedding_client = KnowledgeEmbedding(file_path="", model_name=LLM_MODEL_CONFIG["sentence-transforms"], vector_store_config={"vector_store_name": vector_store_name["vs_name"], + print("vector store path: ", persist_dir) + knowledge_embedding_client = KnowledgeEmbedding(file_path="", model_name=LLM_MODEL_CONFIG["text2vec"], + local_persist=False, + vector_store_config={"vector_store_name": vector_store_name["vs_name"], "vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH}) query = state.messages[-2][1] - docs = knowledge_embedding_client.similar_search(query, 1) + docs = knowledge_embedding_client.similar_search(query, TOP_RETURN_SIZE) context = [d.page_content for d in docs] prompt_template = PromptTemplate( template=conv_qa_prompt_template, @@ -269,6 +271,20 @@ def http_bot(state, mode, sql_mode, db_selector, temperature, max_new_tokens, re result = prompt_template.format(context="\n".join(context), question=query) state.messages[-2][1] = result prompt = state.get_prompt() + print("prompt length:" + str(len(prompt))) + + if len(prompt) > 4000: + logger.info("prompt length greater than 4000, rebuild") + context = context[:2000] + prompt_template = PromptTemplate( + template=conv_qa_prompt_template, + input_variables=["context", "question"] + ) + result = prompt_template.format(context="\n".join(context), question=query) + state.messages[-2][1] = result + prompt = state.get_prompt() + print("new prompt length:" + str(len(prompt))) + state.messages[-2][1] = query skip_echo_len = len(prompt.replace("", " ")) + 1 @@ -435,7 +451,7 @@ def build_single_model_ui(): max_output_tokens = gr.Slider( minimum=0, maximum=1024, - value=1024, + value=512, step=64, interactive=True, label="最大输出Token数", @@ -585,7 +601,8 @@ def knowledge_embedding_store(vs_id, files): shutil.move(file.name, os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename)) knowledge_embedding_client = KnowledgeEmbedding( file_path=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename), - model_name=LLM_MODEL_CONFIG["sentence-transforms"], + model_name=LLM_MODEL_CONFIG["text2vec"], + local_persist=False, vector_store_config={ "vector_store_name": vector_store_name["vs_name"], "vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH}) @@ -610,8 +627,7 @@ if __name__ == "__main__": # 配置初始化 cfg = Config() - dbs = get_database_list() - + # dbs = get_database_list() cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) # 加载插件可执行命令 diff --git a/pilot/source_embedding/chn_document_splitter.py b/pilot/source_embedding/chn_document_splitter.py new file mode 100644 index 000000000..090a6af56 --- /dev/null +++ b/pilot/source_embedding/chn_document_splitter.py @@ -0,0 +1,59 @@ +import re +from typing import List +from langchain.text_splitter import CharacterTextSplitter + + +class CHNDocumentSplitter(CharacterTextSplitter): + def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs): + super().__init__(**kwargs) + self.pdf = pdf + self.sentence_size = sentence_size + + # def split_text_version2(self, text: str) -> List[str]: + # if self.pdf: + # text = re.sub(r"\n{3,}", "\n", text) + # text = re.sub('\s', ' ', text) + # text = text.replace("\n\n", "") + # sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :; + # sent_list = [] + # for ele in sent_sep_pattern.split(text): + # if sent_sep_pattern.match(ele) and sent_list: + # sent_list[-1] += ele + # elif ele: + # sent_list.append(ele) + # return sent_list + + def split_text(self, text: str) -> List[str]: + if self.pdf: + text = re.sub(r"\n{3,}", r"\n", text) + text = re.sub('\s', " ", text) + text = re.sub("\n\n", "", text) + + text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符 + text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号 + text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号 + text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text) + # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号 + text = text.rstrip() # 段尾如果有多余的\n就去掉它 + # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。 + ls = [i for i in text.split("\n") if i] + for ele in ls: + if len(ele) > self.sentence_size: + ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele) + ele1_ls = ele1.split("\n") + for ele_ele1 in ele1_ls: + if len(ele_ele1) > self.sentence_size: + ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1) + ele2_ls = ele_ele2.split("\n") + for ele_ele2 in ele2_ls: + if len(ele_ele2) > self.sentence_size: + ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2) + ele2_id = ele2_ls.index(ele_ele2) + ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[ + ele2_id + 1:] + ele_id = ele1_ls.index(ele_ele1) + ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:] + + id = ls.index(ele) + ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:] + return ls diff --git a/pilot/source_embedding/knowledge_embedding.py b/pilot/source_embedding/knowledge_embedding.py index a9e4d4e4e..594723b6e 100644 --- a/pilot/source_embedding/knowledge_embedding.py +++ b/pilot/source_embedding/knowledge_embedding.py @@ -1,20 +1,35 @@ +import os + +from bs4 import BeautifulSoup +from langchain.document_loaders import PyPDFLoader, TextLoader, markdown +from langchain.embeddings import HuggingFaceEmbeddings +from langchain.vectorstores import Chroma +from pilot.configs.model_config import DATASETS_DIR +from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter from pilot.source_embedding.csv_embedding import CSVEmbedding from pilot.source_embedding.markdown_embedding import MarkdownEmbedding from pilot.source_embedding.pdf_embedding import PDFEmbedding +import markdown class KnowledgeEmbedding: - def __init__(self, file_path, model_name, vector_store_config): + def __init__(self, file_path, model_name, vector_store_config, local_persist=True): """Initialize with Loader url, model_name, vector_store_config""" self.file_path = file_path self.model_name = model_name self.vector_store_config = vector_store_config self.vector_store_type = "default" - self.knowledge_embedding_client = self.init_knowledge_embedding() + self.embeddings = HuggingFaceEmbeddings(model_name=self.model_name) + self.local_persist = local_persist + if not self.local_persist: + self.knowledge_embedding_client = self.init_knowledge_embedding() def knowledge_embedding(self): self.knowledge_embedding_client.source_embedding() + def knowledge_embedding_batch(self): + self.knowledge_embedding_client.batch_embedding() + def init_knowledge_embedding(self): if self.file_path.endswith(".pdf"): embedding = PDFEmbedding(file_path=self.file_path, model_name=self.model_name, @@ -31,4 +46,65 @@ class KnowledgeEmbedding: return embedding def similar_search(self, text, topk): - return self.knowledge_embedding_client.similar_search(text, topk) \ No newline at end of file + return self.knowledge_embedding_client.similar_search(text, topk) + + def knowledge_persist_initialization(self, append_mode): + vector_name = self.vector_store_config["vector_store_name"] + persist_dir = os.path.join(self.vector_store_config["vector_store_path"], vector_name + ".vectordb") + print("vector db path: ", persist_dir) + if os.path.exists(persist_dir): + if append_mode: + print("append knowledge return vector store") + new_documents = self._load_knownlege(self.file_path) + vector_store = Chroma.from_documents(documents=new_documents, + embedding=self.embeddings, + persist_directory=persist_dir) + else: + print("directly return vector store") + vector_store = Chroma(persist_directory=persist_dir, embedding_function=self.embeddings) + else: + print(vector_name + "is new vector store, knowledge begin load...") + documents = self._load_knownlege(self.file_path) + vector_store = Chroma.from_documents(documents=documents, + embedding=self.embeddings, + persist_directory=persist_dir) + vector_store.persist() + return vector_store + + def _load_knownlege(self, path): + docments = [] + for root, _, files in os.walk(path, topdown=False): + for file in files: + filename = os.path.join(root, file) + docs = self._load_file(filename) + new_docs = [] + for doc in docs: + doc.metadata = {"source": doc.metadata["source"].replace(DATASETS_DIR, "")} + print("doc is embedding...", doc.metadata) + new_docs.append(doc) + docments += new_docs + return docments + + def _load_file(self, filename): + if filename.lower().endswith(".md"): + loader = TextLoader(filename) + text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=100) + docs = loader.load_and_split(text_splitter) + i = 0 + for d in docs: + content = markdown.markdown(d.page_content) + soup = BeautifulSoup(content, 'html.parser') + for tag in soup(['!doctype', 'meta', 'i.fa']): + tag.extract() + docs[i].page_content = soup.get_text() + docs[i].page_content = docs[i].page_content.replace("\n", " ") + i += 1 + elif filename.lower().endswith(".pdf"): + loader = PyPDFLoader(filename) + textsplitter = CHNDocumentSplitter(pdf=True, sentence_size=100) + docs = loader.load_and_split(textsplitter) + else: + loader = TextLoader(filename) + text_splitor = CHNDocumentSplitter(sentence_size=100) + docs = loader.load_and_split(text_splitor) + return docs \ No newline at end of file diff --git a/pilot/source_embedding/markdown_embedding.py b/pilot/source_embedding/markdown_embedding.py index 622011006..fee9504b6 100644 --- a/pilot/source_embedding/markdown_embedding.py +++ b/pilot/source_embedding/markdown_embedding.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +import os from typing import List from bs4 import BeautifulSoup @@ -8,6 +9,7 @@ from langchain.schema import Document import markdown from pilot.source_embedding import SourceEmbedding, register +from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter class MarkdownEmbedding(SourceEmbedding): @@ -24,7 +26,28 @@ class MarkdownEmbedding(SourceEmbedding): def read(self): """Load from markdown path.""" loader = TextLoader(self.file_path) - return loader.load() + text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=100) + return loader.load_and_split(text_splitter) + + @register + def read_batch(self): + """Load from markdown path.""" + docments = [] + for root, _, files in os.walk(self.file_path, topdown=False): + for file in files: + filename = os.path.join(root, file) + loader = TextLoader(filename) + # text_splitor = CHNDocumentSplitter(chunk_size=1000, chunk_overlap=20, length_function=len) + # docs = loader.load_and_split() + docs = loader.load() + # 更新metadata数据 + new_docs = [] + for doc in docs: + doc.metadata = {"source": doc.metadata["source"].replace(self.file_path, "")} + print("doc is embedding ... ", doc.metadata) + new_docs.append(doc) + docments += new_docs + return docments @register def data_process(self, documents: List[Document]): @@ -35,7 +58,7 @@ class MarkdownEmbedding(SourceEmbedding): for tag in soup(['!doctype', 'meta', 'i.fa']): tag.extract() documents[i].page_content = soup.get_text() - documents[i].page_content = documents[i].page_content.replace(" ", "").replace("\n", " ") + documents[i].page_content = documents[i].page_content.replace("\n", " ") i += 1 return documents diff --git a/pilot/source_embedding/pdf_embedding.py b/pilot/source_embedding/pdf_embedding.py index 617190fe5..bd0ae3aba 100644 --- a/pilot/source_embedding/pdf_embedding.py +++ b/pilot/source_embedding/pdf_embedding.py @@ -6,6 +6,7 @@ from langchain.document_loaders import PyPDFLoader from langchain.schema import Document from pilot.source_embedding import SourceEmbedding, register +from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter class PDFEmbedding(SourceEmbedding): @@ -17,20 +18,19 @@ class PDFEmbedding(SourceEmbedding): self.file_path = file_path self.model_name = model_name self.vector_store_config = vector_store_config - # SourceEmbedding(file_path =file_path, ); - SourceEmbedding(file_path, model_name, vector_store_config) @register def read(self): """Load from pdf path.""" loader = PyPDFLoader(self.file_path) - return loader.load() + textsplitter = CHNDocumentSplitter(pdf=True, sentence_size=100) + return loader.load_and_split(textsplitter) @register def data_process(self, documents: List[Document]): i = 0 for d in documents: - documents[i].page_content = d.page_content.replace(" ", "").replace("\n", "") + documents[i].page_content = d.page_content.replace("\n", "") i += 1 return documents diff --git a/pilot/source_embedding/pdf_loader.py b/pilot/source_embedding/pdf_loader.py new file mode 100644 index 000000000..aa7cf4da5 --- /dev/null +++ b/pilot/source_embedding/pdf_loader.py @@ -0,0 +1,53 @@ +"""Loader that loads image files.""" +from typing import List + +from langchain.document_loaders.unstructured import UnstructuredFileLoader +from paddleocr import PaddleOCR +import os +import fitz + + +class UnstructuredPaddlePDFLoader(UnstructuredFileLoader): + """Loader that uses unstructured to load image files, such as PNGs and JPGs.""" + + def _get_elements(self) -> List: + def pdf_ocr_txt(filepath, dir_path="tmp_files"): + full_dir_path = os.path.join(os.path.dirname(filepath), dir_path) + if not os.path.exists(full_dir_path): + os.makedirs(full_dir_path) + filename = os.path.split(filepath)[-1] + ocr = PaddleOCR(lang="ch", use_gpu=False, show_log=False) + doc = fitz.open(filepath) + txt_file_path = os.path.join(full_dir_path, "%s.txt" % (filename)) + img_name = os.path.join(full_dir_path, '.tmp.png') + with open(txt_file_path, 'w', encoding='utf-8') as fout: + + for i in range(doc.page_count): + page = doc[i] + text = page.get_text("") + fout.write(text) + fout.write("\n") + + img_list = page.get_images() + for img in img_list: + pix = fitz.Pixmap(doc, img[0]) + + pix.save(img_name) + + result = ocr.ocr(img_name) + ocr_result = [i[1][0] for line in result for i in line] + fout.write("\n".join(ocr_result)) + os.remove(img_name) + return txt_file_path + + txt_file_path = pdf_ocr_txt(self.file_path) + from unstructured.partition.text import partition_text + return partition_text(filename=txt_file_path, **self.unstructured_kwargs) + + +if __name__ == "__main__": + filepath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "content", "samples", "test.pdf") + loader = UnstructuredPaddlePDFLoader(filepath, mode="elements") + docs = loader.load() + for doc in docs: + print(doc) diff --git a/pilot/source_embedding/source_embedding.py b/pilot/source_embedding/source_embedding.py index 656d24eaf..66bc97b6d 100644 --- a/pilot/source_embedding/source_embedding.py +++ b/pilot/source_embedding/source_embedding.py @@ -76,3 +76,15 @@ class SourceEmbedding(ABC): self.text_to_vector(text) if 'index_to_store' in registered_methods: self.index_to_store(text) + + def batch_embedding(self): + if 'read_batch' in registered_methods: + text = self.read_batch() + if 'data_process' in registered_methods: + text = self.data_process(text) + if 'text_split' in registered_methods: + self.text_split(text) + if 'text_to_vector' in registered_methods: + self.text_to_vector(text) + if 'index_to_store' in registered_methods: + self.index_to_store(text) diff --git a/pilot/source_embedding/url_embedding.py b/pilot/source_embedding/url_embedding.py index 5fa29e0d2..68fbdd5e4 100644 --- a/pilot/source_embedding/url_embedding.py +++ b/pilot/source_embedding/url_embedding.py @@ -1,4 +1,7 @@ from typing import List + +from langchain.text_splitter import CharacterTextSplitter + from pilot.source_embedding import SourceEmbedding, register from bs4 import BeautifulSoup @@ -20,7 +23,8 @@ class URLEmbedding(SourceEmbedding): def read(self): """Load from url path.""" loader = WebBaseLoader(web_path=self.file_path) - return loader.load() + text_splitor = CharacterTextSplitter(chunk_size=1000, chunk_overlap=20, length_function=len) + return loader.load_and_split(text_splitor) @register def data_process(self, documents: List[Document]): diff --git a/pilot/vector_store/file_loader.py b/pilot/vector_store/file_loader.py index 8f668f60e..b94b45ba2 100644 --- a/pilot/vector_store/file_loader.py +++ b/pilot/vector_store/file_loader.py @@ -48,12 +48,16 @@ class KnownLedge2Vector: # vector_store.add_documents(documents=documents) else: documents = self.load_knownlege() +<<<<<<< HEAD # reinit +======= + # reinit +>>>>>>> 31797ecdb53eff76cceb52454888c91c97572851 vector_store = Chroma.from_documents(documents=documents, embedding=self.embeddings, persist_directory=persist_dir) vector_store.persist() - return vector_store + return vector_store def load_knownlege(self): docments = [] @@ -61,7 +65,11 @@ class KnownLedge2Vector: for file in files: filename = os.path.join(root, file) docs = self._load_file(filename) +<<<<<<< HEAD # update metadata. +======= + # update metadata. +>>>>>>> 31797ecdb53eff76cceb52454888c91c97572851 new_docs = [] for doc in docs: doc.metadata = {"source": doc.metadata["source"].replace(DATASETS_DIR, "")} diff --git a/requirements.txt b/requirements.txt index 5bbc34f4c..eac927c3d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -75,4 +75,5 @@ chromadb markdown2 colorama playsound -distro \ No newline at end of file +distro +pypdf \ No newline at end of file