mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-03 15:55:44 +00:00
Compare commits
8 Commits
bagatur/us
...
eugene/add
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7aaf26fb5 | ||
|
|
8fe8ee5f80 | ||
|
|
2c37babfdb | ||
|
|
2d7b567c9c | ||
|
|
fb597def2d | ||
|
|
eb78265318 | ||
|
|
4417b4f75e | ||
|
|
7da6ef2390 |
@@ -1,4 +1,5 @@
|
||||
"""Abstract interface for document loader implementations."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Iterator, List, Optional
|
||||
|
||||
|
||||
131
langchain/document_loaders/generic.py
Normal file
131
langchain/document_loaders/generic.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Iterator, List, Optional, Sequence, Union
|
||||
|
||||
from langchain.document_loaders.base import BaseBlobParser, BaseLoader
|
||||
from langchain.document_loaders.blob_loaders import BlobLoader, FileSystemBlobLoader
|
||||
from langchain.document_loaders.parsers.registry import get_parser
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import TextSplitter
|
||||
|
||||
_PathLike = Union[str, Path]
|
||||
|
||||
|
||||
class GenericLoader(BaseLoader):
|
||||
"""A generic document loader.
|
||||
|
||||
A generic document loader that allows combining an arbitrary blob loader with
|
||||
a blob parser.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.document_loaders import GenericLoader
|
||||
from langchain.document_loaders.blob_loaders import FileSystemBlobLoader
|
||||
|
||||
loader = GenericLoader.from_filesystem(
|
||||
path="path/to/directory",
|
||||
glob="**/[!.]*",
|
||||
suffixes=[".pdf"],
|
||||
show_progress=True,
|
||||
)
|
||||
|
||||
docs = loader.lazy_load()
|
||||
next(docs)
|
||||
|
||||
Example instantiations to change which files are loaded:
|
||||
|
||||
... code-block:: python
|
||||
|
||||
# Recursively load all text files in a directory.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/*.txt")
|
||||
|
||||
# Recursively load all non-hidden files in a directory.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/[!.]*")
|
||||
|
||||
# Load all files in a directory without recursion.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="*")
|
||||
|
||||
Example instantiations to change which parser is used:
|
||||
|
||||
... code-block:: python
|
||||
|
||||
from langchain.document_loaders.parsers.pdf import PyPDFParser
|
||||
|
||||
# Recursively load all text files in a directory.
|
||||
loader = GenericLoader.from_filesystem(
|
||||
"/path/to/dir",
|
||||
glob="**/*.pdf",
|
||||
parser=PyPDFParser()
|
||||
)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
blob_loader: BlobLoader,
|
||||
blob_parser: BaseBlobParser,
|
||||
) -> None:
|
||||
"""A generic document loader.
|
||||
|
||||
Args:
|
||||
blob_loader: A blob loader which knows how to yield blobs
|
||||
blob_parser: A blob parser which knows how to parse blobs into documents
|
||||
"""
|
||||
self.blob_loader = blob_loader
|
||||
self.blob_parser = blob_parser
|
||||
|
||||
def lazy_load(
|
||||
self,
|
||||
) -> Iterator[Document]:
|
||||
"""Load documents lazily. Use this when working at a large scale."""
|
||||
for blob in self.blob_loader.yield_blobs():
|
||||
yield from self.blob_parser.lazy_parse(blob)
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load all documents."""
|
||||
return list(self.lazy_load())
|
||||
|
||||
def load_and_split(
|
||||
self, text_splitter: Optional[TextSplitter] = None
|
||||
) -> List[Document]:
|
||||
"""Load all documents and split them into sentences."""
|
||||
raise NotImplementedError(
|
||||
"Loading and splitting is not yet implemented for generic loaders. "
|
||||
"When they will be implemented they will be added via the initializer. "
|
||||
"This method should not be used going forward."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_filesystem(
|
||||
cls,
|
||||
path: _PathLike,
|
||||
*,
|
||||
glob: str = "**/[!.]*",
|
||||
suffixes: Optional[Sequence[str]] = None,
|
||||
show_progress: bool = False,
|
||||
parser: Union[str, BaseBlobParser] = "default",
|
||||
) -> GenericLoader:
|
||||
"""Create a generic document loader using a filesystem blob loader.
|
||||
|
||||
Args:
|
||||
parser: A blob parser which knows how to parse blobs into documents
|
||||
path: The path to the directory to load documents from.
|
||||
glob: The glob pattern to use to find documents.
|
||||
suffixes: The suffixes to use to filter documents. If None, all files
|
||||
matching the glob will be loaded.
|
||||
show_progress: Whether to show a progress bar or not (requires tqdm).
|
||||
Proxies to the file system loader.
|
||||
|
||||
Returns:
|
||||
A generic document loader.
|
||||
"""
|
||||
blob_loader = FileSystemBlobLoader(
|
||||
path, glob=glob, suffixes=suffixes, show_progress=show_progress
|
||||
)
|
||||
if isinstance(parser, str):
|
||||
blob_parser = get_parser(parser)
|
||||
else:
|
||||
blob_parser = parser
|
||||
return cls(blob_loader, blob_parser)
|
||||
@@ -1,3 +1,4 @@
|
||||
from langchain.document_loaders.parsers.html import BS4HTMLParser
|
||||
from langchain.document_loaders.parsers.pdf import (
|
||||
PDFMinerParser,
|
||||
PDFPlumberParser,
|
||||
@@ -7,9 +8,10 @@ from langchain.document_loaders.parsers.pdf import (
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"PyPDFParser",
|
||||
"BS4HTMLParser",
|
||||
"PDFMinerParser",
|
||||
"PDFPlumberParser",
|
||||
"PyMuPDFParser",
|
||||
"PyPDFium2Parser",
|
||||
"PDFPlumberParser",
|
||||
"PyPDFParser",
|
||||
]
|
||||
|
||||
@@ -34,6 +34,7 @@ class MimeTypeBasedParser(BaseBlobParser):
|
||||
def __init__(
|
||||
self,
|
||||
handlers: Mapping[str, BaseBlobParser],
|
||||
*,
|
||||
fallback_parser: Optional[BaseBlobParser] = None,
|
||||
) -> None:
|
||||
"""Define a parser that uses mime-types to determine how to parse a blob.
|
||||
|
||||
4
langchain/document_loaders/parsers/html/__init__.py
Normal file
4
langchain/document_loaders/parsers/html/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from langchain.document_loaders.parsers.html.bs4 import BS4HTMLParser
|
||||
from langchain.document_loaders.parsers.html.markdownify import MarkdownifyHTMLParser
|
||||
|
||||
__all__ = ["MarkdownifyHTMLParser", "BS4HTMLParser"]
|
||||
56
langchain/document_loaders/parsers/html/bs4.py
Normal file
56
langchain/document_loaders/parsers/html/bs4.py
Normal file
@@ -0,0 +1,56 @@
|
||||
"""Loader that uses bs4 to load HTML files, enriching metadata with page title."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Iterator, Mapping, Optional, Union
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseBlobParser
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BS4HTMLParser(BaseBlobParser):
|
||||
"""Parser that uses beautiful soup to parse HTML files."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
features: str = "lxml",
|
||||
bs_kwargs: Optional[Mapping[str, Any]] = None,
|
||||
get_text_separator: str = "",
|
||||
) -> None:
|
||||
"""Initialize a bs4 based HTML parser."""
|
||||
try:
|
||||
import bs4 # noqa:F401
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"beautifulsoup4 package not found, please install it with "
|
||||
"`pip install beautifulsoup4`"
|
||||
)
|
||||
|
||||
if bs_kwargs and "features" in bs_kwargs:
|
||||
raise ValueError("features cannot be set in bs_kwargs")
|
||||
|
||||
_bs_kwargs = bs_kwargs or {}
|
||||
self.bs_kwargs = {"features": features, **_bs_kwargs}
|
||||
self.get_text_separator = get_text_separator
|
||||
|
||||
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
|
||||
"""Load HTML document into document objects."""
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
with blob.as_bytes_io() as f:
|
||||
soup = BeautifulSoup(f, **self.bs_kwargs)
|
||||
|
||||
text = soup.get_text(self.get_text_separator)
|
||||
|
||||
if soup.title:
|
||||
title = str(soup.title.string)
|
||||
else:
|
||||
title = ""
|
||||
|
||||
metadata: Dict[str, Union[str, None]] = {
|
||||
"source": blob.source,
|
||||
"title": title,
|
||||
}
|
||||
yield Document(page_content=text, metadata=metadata)
|
||||
74
langchain/document_loaders/parsers/html/markdownify.py
Normal file
74
langchain/document_loaders/parsers/html/markdownify.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Load and chunk HTMLs with potential pre-processing to clean the html."""
|
||||
|
||||
import re
|
||||
from typing import Iterator, Tuple
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from langchain.document_loaders.base import BaseBlobParser
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
from langchain.schema import Document
|
||||
|
||||
# Regular expression pattern to detect multiple new lines in a row with optional
|
||||
# whitespace in between
|
||||
CONSECUTIVE_NEW_LINES = re.compile(r"\n(\s*\n)+", flags=re.UNICODE)
|
||||
|
||||
|
||||
def _get_mini_html(html: str, *, tags_to_remove: Tuple[str, ...] = tuple()) -> str:
|
||||
"""Clean up HTML tags."""
|
||||
# Parse the HTML document using BeautifulSoup
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
|
||||
# Remove all CSS stylesheets
|
||||
for stylesheet in soup.find_all("link", rel="stylesheet"):
|
||||
stylesheet.extract()
|
||||
|
||||
for tag_to_remove in tags_to_remove:
|
||||
# Remove all matching tags
|
||||
for tag in soup.find_all(tag_to_remove):
|
||||
tag.extract()
|
||||
|
||||
new_html = repr(soup)
|
||||
return new_html
|
||||
|
||||
|
||||
def _clean_html(html: str, *, tags_to_remove: Tuple[str, ...] = tuple()) -> str:
|
||||
"""Clean up HTML and convert to markdown using markdownify."""
|
||||
try:
|
||||
import markdownify
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The markdownify package is required to parse HTML files. "
|
||||
"Please install it with `pip install markdownify`."
|
||||
)
|
||||
html = _get_mini_html(html, tags_to_remove=tags_to_remove)
|
||||
md = markdownify.markdownify(html)
|
||||
return CONSECUTIVE_NEW_LINES.sub("\n\n", md).strip()
|
||||
|
||||
|
||||
## PUBLIC API
|
||||
|
||||
|
||||
class MarkdownifyHTMLParser(BaseBlobParser):
|
||||
"""A blob parser to parse HTML content.."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tags_to_remove: Tuple[str, ...] = ("svg", "img", "script", "style"),
|
||||
) -> None:
|
||||
"""Initialize the preprocessor.
|
||||
|
||||
Args:
|
||||
tags_to_remove: A tuple of tags to remove from the HTML
|
||||
"""
|
||||
|
||||
self.tags_to_remove = tags_to_remove
|
||||
|
||||
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
|
||||
"""Lazily parse the blob."""
|
||||
yield Document(
|
||||
page_content=_clean_html(
|
||||
blob.as_string(), tags_to_remove=self.tags_to_remove
|
||||
),
|
||||
metadata={"source": blob.source},
|
||||
)
|
||||
30
langchain/document_loaders/parsers/registry.py
Normal file
30
langchain/document_loaders/parsers/registry.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""Module includes a registry of default parser configurations."""
|
||||
from langchain.document_loaders.base import BaseBlobParser
|
||||
from langchain.document_loaders.parsers.generic import MimeTypeBasedParser
|
||||
from langchain.document_loaders.parsers.pdf import PyMuPDFParser
|
||||
from langchain.document_loaders.parsers.txt import TextParser
|
||||
|
||||
|
||||
def _get_default_parser() -> BaseBlobParser:
|
||||
"""Get default mime-type based parser."""
|
||||
return MimeTypeBasedParser(
|
||||
handlers={
|
||||
"application/pdf": PyMuPDFParser(),
|
||||
"text/plain": TextParser(),
|
||||
},
|
||||
fallback_parser=None,
|
||||
)
|
||||
|
||||
|
||||
_REGISTRY = {
|
||||
"default": _get_default_parser,
|
||||
}
|
||||
|
||||
# PUBLIC API
|
||||
|
||||
|
||||
def get_parser(parser_name: str) -> BaseBlobParser:
|
||||
"""Get a parser by parser name."""
|
||||
if parser_name not in _REGISTRY:
|
||||
raise ValueError(f"Unknown parser combination: {parser_name}")
|
||||
return _REGISTRY[parser_name]()
|
||||
12
langchain/document_loaders/parsers/txt.py
Normal file
12
langchain/document_loaders/parsers/txt.py
Normal file
@@ -0,0 +1,12 @@
|
||||
"""Module for parsing text files.."""
|
||||
from typing import Iterator
|
||||
|
||||
from langchain.document_loaders.base import BaseBlobParser
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class TextParser(BaseBlobParser):
|
||||
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
|
||||
"""Lazily parse the blob."""
|
||||
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
|
||||
@@ -1,12 +0,0 @@
|
||||
from langchain.document_loaders.parsers import __all__
|
||||
|
||||
|
||||
def test_parsers_public_api_correct() -> None:
|
||||
"""Test public API of parsers for breaking changes."""
|
||||
assert set(__all__) == {
|
||||
"PyPDFParser",
|
||||
"PDFMinerParser",
|
||||
"PyMuPDFParser",
|
||||
"PyPDFium2Parser",
|
||||
"PDFPlumberParser",
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
"""Tests for the HTML parsers."""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
from langchain.document_loaders.parsers.html import BS4HTMLParser
|
||||
|
||||
HERE = Path(__file__).parent
|
||||
EXAMPLES = HERE.parent.parent.parent / "integration_tests" / "examples"
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_bs_html_loader() -> None:
|
||||
"""Test unstructured loader."""
|
||||
file_path = EXAMPLES / "example.html"
|
||||
blob = Blob.from_path(file_path)
|
||||
parser = BS4HTMLParser(get_text_separator="|")
|
||||
docs = list(parser.lazy_parse(blob))
|
||||
assert isinstance(docs, list)
|
||||
assert len(docs) == 1
|
||||
|
||||
metadata = docs[0].metadata
|
||||
content = docs[0].page_content
|
||||
|
||||
assert metadata["title"] == "Chew dad's slippers"
|
||||
assert metadata["source"] == str(file_path)
|
||||
assert content[:2] == "\n|"
|
||||
@@ -4,6 +4,7 @@ from langchain.document_loaders.parsers import __all__
|
||||
def test_parsers_public_api_correct() -> None:
|
||||
"""Test public API of parsers for breaking changes."""
|
||||
assert set(__all__) == {
|
||||
"BS4HTMLParser",
|
||||
"PyPDFParser",
|
||||
"PDFMinerParser",
|
||||
"PyMuPDFParser",
|
||||
|
||||
@@ -5,10 +5,13 @@ import pytest
|
||||
|
||||
from langchain.document_loaders.html_bs import BSHTMLLoader
|
||||
|
||||
HERE = Path(__file__).parent
|
||||
EXAMPLES = HERE.parent.parent / "integration_tests" / "examples"
|
||||
|
||||
|
||||
def test_bs_html_loader() -> None:
|
||||
"""Test unstructured loader."""
|
||||
file_path = Path(__file__).parent.parent / "examples/example.html"
|
||||
file_path = EXAMPLES / "example.html"
|
||||
loader = BSHTMLLoader(str(file_path), get_text_separator="|")
|
||||
docs = loader.load()
|
||||
|
||||
@@ -28,7 +31,7 @@ def test_bs_html_loader() -> None:
|
||||
)
|
||||
def test_bs_html_loader_non_utf8() -> None:
|
||||
"""Test providing encoding to BSHTMLLoader."""
|
||||
file_path = Path(__file__).parent.parent / "examples/example-utf8.html"
|
||||
file_path = EXAMPLES / "example-utf8.html"
|
||||
|
||||
with pytest.raises(UnicodeDecodeError):
|
||||
BSHTMLLoader(str(file_path)).load()
|
||||
114
tests/unit_tests/document_loaders/test_generic_loader.py
Normal file
114
tests/unit_tests/document_loaders/test_generic_loader.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""Test generic loader."""
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Generator, Iterator
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.document_loaders.base import BaseBlobParser
|
||||
from langchain.document_loaders.blob_loaders import Blob, FileSystemBlobLoader
|
||||
from langchain.document_loaders.generic import GenericLoader
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def toy_dir() -> Generator[Path, None, None]:
|
||||
"""Yield a pre-populated directory to test the blob loader."""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
# Create test.txt
|
||||
with open(os.path.join(temp_dir, "test.txt"), "w") as test_txt:
|
||||
test_txt.write("This is a test.txt file.")
|
||||
|
||||
# Create test.html
|
||||
with open(os.path.join(temp_dir, "test.html"), "w") as test_html:
|
||||
test_html.write(
|
||||
"<html><body><h1>This is a test.html file.</h1></body></html>"
|
||||
)
|
||||
|
||||
# Create .hidden_file
|
||||
with open(os.path.join(temp_dir, ".hidden_file"), "w") as hidden_file:
|
||||
hidden_file.write("This is a hidden file.")
|
||||
|
||||
# Create some_dir/nested_file.txt
|
||||
some_dir = os.path.join(temp_dir, "some_dir")
|
||||
os.makedirs(some_dir)
|
||||
with open(os.path.join(some_dir, "nested_file.txt"), "w") as nested_file:
|
||||
nested_file.write("This is a nested_file.txt file.")
|
||||
|
||||
# Create some_dir/other_dir/more_nested.txt
|
||||
other_dir = os.path.join(some_dir, "other_dir")
|
||||
os.makedirs(other_dir)
|
||||
with open(os.path.join(other_dir, "more_nested.txt"), "w") as nested_file:
|
||||
nested_file.write("This is a more_nested.txt file.")
|
||||
|
||||
yield Path(temp_dir)
|
||||
|
||||
|
||||
class AsIsParser(BaseBlobParser):
|
||||
"""Parser created for testing purposes."""
|
||||
|
||||
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
|
||||
"""Extract the first character of a blob."""
|
||||
yield Document(page_content=blob.as_string())
|
||||
|
||||
|
||||
def test__init__(toy_dir: str) -> None:
|
||||
"""Test initialization from init."""
|
||||
loader = GenericLoader(
|
||||
FileSystemBlobLoader(toy_dir, suffixes=[".txt"]),
|
||||
AsIsParser(),
|
||||
)
|
||||
docs = loader.load()
|
||||
assert len(docs) == 3
|
||||
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
|
||||
# we can sort the docs by page content.
|
||||
assert docs[0].page_content == "This is a test.txt file."
|
||||
|
||||
|
||||
def test_from_filesystem_classmethod(toy_dir: str) -> None:
|
||||
"""Test generic loader."""
|
||||
loader = GenericLoader.from_filesystem(
|
||||
toy_dir, suffixes=[".txt"], parser=AsIsParser()
|
||||
)
|
||||
docs = loader.load()
|
||||
assert len(docs) == 3
|
||||
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
|
||||
# we can sort the docs by page content.
|
||||
assert docs[0].page_content == "This is a test.txt file."
|
||||
|
||||
|
||||
def test_from_filesystem_classmethod_with_glob(toy_dir: str) -> None:
|
||||
"""Test that glob parameter is taken into account."""
|
||||
loader = GenericLoader.from_filesystem(toy_dir, glob="*.txt", parser=AsIsParser())
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
|
||||
# we can sort the docs by page content.
|
||||
assert docs[0].page_content == "This is a test.txt file."
|
||||
|
||||
|
||||
@pytest.mark.requires("tqdm")
|
||||
def test_from_filesystem_classmethod_show_progress(toy_dir: str) -> None:
|
||||
"""Test that glob parameter is taken into account."""
|
||||
loader = GenericLoader.from_filesystem(
|
||||
toy_dir, glob="*.txt", parser=AsIsParser(), show_progress=True
|
||||
)
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
|
||||
# we can sort the docs by page content.
|
||||
assert docs[0].page_content == "This is a test.txt file."
|
||||
|
||||
|
||||
def test_from_filesystem_using_default_parser(toy_dir: str) -> None:
|
||||
"""Use the default generic parser."""
|
||||
loader = GenericLoader.from_filesystem(
|
||||
toy_dir,
|
||||
suffixes=[".txt", ".html"],
|
||||
)
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
|
||||
# we can sort the docs by page content.
|
||||
assert docs[0].page_content == "This is a test.txt file."
|
||||
Reference in New Issue
Block a user