mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 11:45:23 +00:00
* add langchain * add langchain * Add files via upload * add langchain * fix style * fix style: remove extra space * add pytest; modified retriever * add pytest; modified retriever * add tests to build_on_pr.yml * fix build_on_pr.yml * fix build on pr; fix environ vars * seperate unit tests for colossalqa from build from pr * fix container setting; fix environ vars * commented dev code * add incremental update * remove stale code * fix style * change to sha3 224 * fix retriever; fix style; add unit test for document loader * fix ci workflow config * fix ci workflow config * add set cuda visible device script in ci * fix doc string * fix style; update readme; refactored * add force log info * change build on pr, ignore colossalqa * fix docstring, captitalize all initial letters * fix indexing; fix text-splitter * remove debug code, update reference * reset previous commit * update LICENSE update README add key-value mode, fix bugs * add files back * revert force push * remove junk file * add test files * fix retriever bug, add intent classification * change conversation chain design * rewrite prompt and conversation chain * add ui v1 * ui v1 * fix atavar * add header * Refactor the RAG Code and support Pangu * Refactor the ColossalQA chain to Object-Oriented Programming and the UI demo. * resolved conversation. tested scripts under examples. web demo still buggy * fix ci tests * Some modifications to add ChatGPT api * modify llm.py and remove unnecessary files * Delete applications/ColossalQA/examples/ui/test_frontend_input.json * Remove OpenAI api key * add colossalqa * move files * move files * move files * move files * fix style * Add Readme and fix some bugs. * Add something to readme and modify some code * modify a directory name for clarity * remove redundant directory * Correct a type in llm.py * fix AI prefix * fix test_memory.py * fix conversation * fix some erros and typos * Fix a missing import in RAG_ChatBot.py * add colossalcloud LLM wrapper, correct issues in code review --------- Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Orion-Zheng <zheng_zian@u.nus.edu> Co-authored-by: Zian(Andy) Zheng <62330719+Orion-Zheng@users.noreply.github.com> Co-authored-by: Orion-Zheng <zhengzian@u.nus.edu>
129 lines
4.3 KiB
Python
129 lines
4.3 KiB
Python
"""
|
|
Class for loading document type data
|
|
"""
|
|
|
|
import glob
|
|
from typing import List
|
|
|
|
from colossalqa.mylogging import get_logger
|
|
from langchain.document_loaders import (
|
|
JSONLoader,
|
|
PyPDFLoader,
|
|
TextLoader,
|
|
UnstructuredHTMLLoader,
|
|
UnstructuredMarkdownLoader,
|
|
)
|
|
from langchain.document_loaders.csv_loader import CSVLoader
|
|
|
|
logger = get_logger()
|
|
|
|
SUPPORTED_DATA_FORMAT = [".csv", ".json", ".html", ".md", ".pdf", ".txt", ".jsonl"]
|
|
|
|
|
|
class DocumentLoader:
|
|
"""
|
|
Load documents from different files into list of langchain Documents
|
|
"""
|
|
|
|
def __init__(self, files: List, **kwargs) -> None:
|
|
"""
|
|
Args:
|
|
files: list of files (list[file path, name])
|
|
**kwargs: keyword type arguments, useful for certain document types
|
|
"""
|
|
self.data = {}
|
|
self.kwargs = kwargs
|
|
|
|
for item in files:
|
|
path = item[0] if isinstance(item, list) else item
|
|
logger.info(f"Loading data from {path}")
|
|
self.load_data(path)
|
|
logger.info("Data loaded")
|
|
|
|
self.all_data = []
|
|
for key in self.data:
|
|
if isinstance(self.data[key], list):
|
|
for item in self.data[key]:
|
|
if isinstance(item, list):
|
|
self.all_data.extend(item)
|
|
else:
|
|
self.all_data.append(item)
|
|
|
|
def load_data(self, path: str) -> None:
|
|
"""
|
|
Load data. Please refer to https://python.langchain.com/docs/modules/data_connection/document_loaders/
|
|
for sepcific format requirements.
|
|
Args:
|
|
path: path to a file
|
|
To load files with glob path, here are some examples.
|
|
Load all file from directory: folder1/folder2/*
|
|
Load all pdf file from directory: folder1/folder2/*.pdf
|
|
"""
|
|
files = []
|
|
|
|
# Handle glob expression
|
|
try:
|
|
files = glob.glob(path)
|
|
except Exception as e:
|
|
logger.error(e)
|
|
if len(files) == 0:
|
|
raise ValueError("Unsupported file/directory format. For directories, please use glob expression")
|
|
elif len(files) == 1:
|
|
path = files[0]
|
|
else:
|
|
for file in files:
|
|
self.load_data(file)
|
|
return
|
|
|
|
# Load data if the path is a file
|
|
logger.info(f"load {path}", verbose=True)
|
|
if path.endswith(".csv"):
|
|
# Load csv
|
|
loader = CSVLoader(file_path=path, encoding="utf8")
|
|
data = loader.load()
|
|
self.data[path] = data
|
|
elif path.endswith(".txt"):
|
|
# Load txt
|
|
loader = TextLoader(path, encoding="utf8")
|
|
data = loader.load()
|
|
self.data[path] = data
|
|
elif path.endswith("html"):
|
|
# Load html
|
|
loader = UnstructuredHTMLLoader(path, encoding="utf8")
|
|
data = loader.load()
|
|
self.data[path] = data
|
|
elif path.endswith("json"):
|
|
# Load json
|
|
loader = JSONLoader(
|
|
file_path=path,
|
|
jq_schema=self.kwargs.get("jq_schema", ".data[]"),
|
|
content_key=self.kwargs.get("content_key", "content"),
|
|
metadata_func=self.kwargs.get("metadata_func", None),
|
|
)
|
|
|
|
data = loader.load()
|
|
self.data[path] = data
|
|
elif path.endswith("jsonl"):
|
|
# Load jsonl
|
|
loader = JSONLoader(
|
|
file_path=path, jq_schema=self.kwargs.get("jq_schema", ".data[].content"), json_lines=True
|
|
)
|
|
data = loader.load()
|
|
self.data[path] = data
|
|
elif path.endswith(".md"):
|
|
# Load markdown
|
|
loader = UnstructuredMarkdownLoader(path)
|
|
data = loader.load()
|
|
self.data[path] = data
|
|
elif path.endswith(".pdf"):
|
|
# Load pdf
|
|
loader = PyPDFLoader(path)
|
|
data = loader.load_and_split()
|
|
self.data[path] = data
|
|
else:
|
|
if "." in path.split("/")[-1]:
|
|
raise ValueError(f"Unsupported file format {path}. Supported formats: {SUPPORTED_DATA_FORMAT}")
|
|
else:
|
|
# May ba a directory, we strictly follow the glob path and will not load files in subdirectories
|
|
pass
|