mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-04 04:07:54 +00:00
Bibtex integration for document loader and retriever (#5137)
# Bibtex integration Wrap bibtexparser to retrieve a list of docs from a bibtex file. * Get the metadata from the bibtex entries * `page_content` get from the local pdf referenced in the `file` field of the bibtex entry using `pymupdf` * If no valid pdf file, `page_content` set to the `abstract` field of the bibtex entry * Support Zotero flavour using regex to get the file path * Added usage example in `docs/modules/indexes/document_loaders/examples/bibtex.ipynb` --------- Co-authored-by: Sébastien M. Popoff <sebastien.popoff@espci.fr> Co-authored-by: Dev 2049 <dev.dev2049@gmail.com>
This commit is contained in:
parent
40b086d6e8
commit
5cfa72a130
190
docs/modules/indexes/document_loaders/examples/bibtex.ipynb
Normal file
190
docs/modules/indexes/document_loaders/examples/bibtex.ipynb
Normal file
@ -0,0 +1,190 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bda1f3f5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BibTeX\n",
|
||||
"\n",
|
||||
"> BibTeX is a file format and reference management system commonly used in conjunction with LaTeX typesetting. It serves as a way to organize and store bibliographic information for academic and research documents.\n",
|
||||
"\n",
|
||||
"BibTeX files have a .bib extension and consist of plain text entries representing references to various publications, such as books, articles, conference papers, theses, and more. Each BibTeX entry follows a specific structure and contains fields for different bibliographic details like author names, publication title, journal or book title, year of publication, page numbers, and more.\n",
|
||||
"\n",
|
||||
"Bibtex files can also store the path to documents, such as `.pdf` files that can be retrieved."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b7a1eef-7bf7-4e7d-8bfc-c4e27c9488cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"First, you need to install `bibtexparser` and `PyMuPDF`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "b674aaea-ed3a-4541-8414-260a8f67f623",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install bibtexparser pymupdf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "95f05e1c-195e-4e2b-ae8e-8d6637f15be6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e29b954c-1407-4797-ae21-6ba8937156be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`BibtexLoader` has these arguments:\n",
|
||||
"- `file_path`: the path the the `.bib` bibtex file\n",
|
||||
"- optional `max_docs`: default=None, i.e. not limit. Use it to limit number of retrieved documents.\n",
|
||||
"- optional `max_content_chars`: default=4000. Use it to limit the number of characters in a single document.\n",
|
||||
"- optional `load_extra_meta`: default=False. By default only the most important fields from the bibtex entries: `Published` (publication year), `Title`, `Authors`, `Summary`, `Journal`, `Keywords`, and `URL`. If True, it will also try to load return `entry_id`, `note`, `doi`, and `links` fields. \n",
|
||||
"- optional `file_pattern`: default=`r'[^:]+\\.pdf'`. Regex pattern to find files in the `file` entry. Default pattern supports `Zotero` flavour bibtex style and bare file path."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "9bfd5e46",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import BibtexLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "01971b53",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a dummy bibtex file and download a pdf.\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"urllib.request.urlretrieve(\"https://www.fourmilab.ch/etexts/einstein/specrel/specrel.pdf\", \"einstein1905.pdf\")\n",
|
||||
"\n",
|
||||
"bibtex_text = \"\"\"\n",
|
||||
" @article{einstein1915,\n",
|
||||
" title={Die Feldgleichungen der Gravitation},\n",
|
||||
" abstract={Die Grundgleichungen der Gravitation, die ich hier entwickeln werde, wurden von mir in einer Abhandlung: ,,Die formale Grundlage der allgemeinen Relativit{\\\"a}tstheorie`` in den Sitzungsberichten der Preu{\\ss}ischen Akademie der Wissenschaften 1915 ver{\\\"o}ffentlicht.},\n",
|
||||
" author={Einstein, Albert},\n",
|
||||
" journal={Sitzungsberichte der K{\\\"o}niglich Preu{\\ss}ischen Akademie der Wissenschaften},\n",
|
||||
" volume={1915},\n",
|
||||
" number={1},\n",
|
||||
" pages={844--847},\n",
|
||||
" year={1915},\n",
|
||||
" doi={10.1002/andp.19163540702},\n",
|
||||
" link={https://onlinelibrary.wiley.com/doi/abs/10.1002/andp.19163540702},\n",
|
||||
" file={einstein1905.pdf}\n",
|
||||
" }\n",
|
||||
" \"\"\"\n",
|
||||
"# save bibtex_text to biblio.bib file\n",
|
||||
"with open(\"./biblio.bib\", \"w\") as file:\n",
|
||||
" file.write(bibtex_text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "2631f46b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = BibtexLoader(\"./biblio.bib\").load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "33ef1fb2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'id': 'einstein1915',\n",
|
||||
" 'published_year': '1915',\n",
|
||||
" 'title': 'Die Feldgleichungen der Gravitation',\n",
|
||||
" 'publication': 'Sitzungsberichte der K{\"o}niglich Preu{\\\\ss}ischen Akademie der Wissenschaften',\n",
|
||||
" 'authors': 'Einstein, Albert',\n",
|
||||
" 'abstract': 'Die Grundgleichungen der Gravitation, die ich hier entwickeln werde, wurden von mir in einer Abhandlung: ,,Die formale Grundlage der allgemeinen Relativit{\"a}tstheorie`` in den Sitzungsberichten der Preu{\\\\ss}ischen Akademie der Wissenschaften 1915 ver{\"o}ffentlicht.',\n",
|
||||
" 'url': 'https://doi.org/10.1002/andp.19163540702'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "46969806-45a9-4c4d-a61b-cfb9658fc9de",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ON THE ELECTRODYNAMICS OF MOVING\n",
|
||||
"BODIES\n",
|
||||
"By A. EINSTEIN\n",
|
||||
"June 30, 1905\n",
|
||||
"It is known that Maxwell’s electrodynamics—as usually understood at the\n",
|
||||
"present time—when applied to moving bodies, leads to asymmetries which do\n",
|
||||
"not appear to be inherent in the phenomena. Take, for example, the recipro-\n",
|
||||
"cal electrodynamic action of a magnet and a conductor. The observable phe-\n",
|
||||
"nomenon here depends only on the r\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content[:400]) # all pages of the pdf content"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -10,6 +10,7 @@ from langchain.document_loaders.azure_blob_storage_container import (
|
||||
from langchain.document_loaders.azure_blob_storage_file import (
|
||||
AzureBlobStorageFileLoader,
|
||||
)
|
||||
from langchain.document_loaders.bibtex import BibtexLoader
|
||||
from langchain.document_loaders.bigquery import BigQueryLoader
|
||||
from langchain.document_loaders.bilibili import BiliBiliLoader
|
||||
from langchain.document_loaders.blackboard import BlackboardLoader
|
||||
@ -129,6 +130,7 @@ __all__ = [
|
||||
"AzureBlobStorageContainerLoader",
|
||||
"AzureBlobStorageFileLoader",
|
||||
"BSHTMLLoader",
|
||||
"BibtexLoader",
|
||||
"BigQueryLoader",
|
||||
"BiliBiliLoader",
|
||||
"BlackboardLoader",
|
||||
|
108
langchain/document_loaders/bibtex.py
Normal file
108
langchain/document_loaders/bibtex.py
Normal file
@ -0,0 +1,108 @@
|
||||
import logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterator, List, Mapping, Optional
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.utilities.bibtex import BibtexparserWrapper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BibtexLoader(BaseLoader):
|
||||
"""Loads a bibtex file into a list of Documents.
|
||||
|
||||
Each document represents one entry from the bibtex file.
|
||||
|
||||
If a PDF file is present in the `file` bibtex field, the original PDF
|
||||
is loaded into the document text. If no such file entry is present,
|
||||
the `abstract` field is used instead.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
*,
|
||||
parser: Optional[BibtexparserWrapper] = None,
|
||||
max_docs: Optional[int] = None,
|
||||
max_content_chars: Optional[int] = 4_000,
|
||||
load_extra_metadata: bool = False,
|
||||
file_pattern: str = r"[^:]+\.pdf",
|
||||
):
|
||||
"""Initialize the BibtexLoader.
|
||||
|
||||
Args:
|
||||
file_path: Path to the bibtex file.
|
||||
max_docs: Max number of associated documents to load. Use -1 means
|
||||
no limit.
|
||||
"""
|
||||
self.file_path = file_path
|
||||
self.parser = parser or BibtexparserWrapper()
|
||||
self.max_docs = max_docs
|
||||
self.max_content_chars = max_content_chars
|
||||
self.load_extra_metadata = load_extra_metadata
|
||||
self.file_regex = re.compile(file_pattern)
|
||||
|
||||
def _load_entry(self, entry: Mapping[str, Any]) -> Optional[Document]:
|
||||
import fitz
|
||||
|
||||
parent_dir = Path(self.file_path).parent
|
||||
# regex is useful for Zotero flavor bibtex files
|
||||
file_names = self.file_regex.findall(entry.get("file", ""))
|
||||
if not file_names:
|
||||
return None
|
||||
texts: List[str] = []
|
||||
for file_name in file_names:
|
||||
try:
|
||||
with fitz.open(parent_dir / file_name) as f:
|
||||
texts.extend(page.get_text() for page in f)
|
||||
except FileNotFoundError as e:
|
||||
logger.debug(e)
|
||||
content = "\n".join(texts) or entry.get("abstract", "")
|
||||
if self.max_content_chars:
|
||||
content = content[: self.max_content_chars]
|
||||
metadata = self.parser.get_metadata(entry, load_extra=self.load_extra_metadata)
|
||||
return Document(
|
||||
page_content=content,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Load bibtex file using bibtexparser and get the article texts plus the
|
||||
|
||||
article metadata.
|
||||
|
||||
See https://bibtexparser.readthedocs.io/en/master/
|
||||
|
||||
Returns:
|
||||
a list of documents with the document.page_content in text format
|
||||
"""
|
||||
try:
|
||||
import fitz # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"PyMuPDF package not found, please install it with "
|
||||
"`pip install pymupdf`"
|
||||
)
|
||||
|
||||
entries = self.parser.load_bibtex_entries(self.file_path)
|
||||
if self.max_docs:
|
||||
entries = entries[: self.max_docs]
|
||||
for entry in entries:
|
||||
doc = self._load_entry(entry)
|
||||
if doc:
|
||||
yield doc
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load bibtex file documents from the given bibtex file path.
|
||||
|
||||
See https://bibtexparser.readthedocs.io/en/master/
|
||||
|
||||
Args:
|
||||
file_path: the path to the bibtex file
|
||||
|
||||
Returns:
|
||||
a list of documents with the document.page_content in text format
|
||||
"""
|
||||
return list(self.lazy_load())
|
87
langchain/utilities/bibtex.py
Normal file
87
langchain/utilities/bibtex.py
Normal file
@ -0,0 +1,87 @@
|
||||
"""Util that calls bibtexparser."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping
|
||||
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
OPTIONAL_FIELDS = [
|
||||
"annotate",
|
||||
"booktitle",
|
||||
"editor",
|
||||
"howpublished",
|
||||
"journal",
|
||||
"keywords",
|
||||
"note",
|
||||
"organization",
|
||||
"publisher",
|
||||
"school",
|
||||
"series",
|
||||
"type",
|
||||
"doi",
|
||||
"issn",
|
||||
"isbn",
|
||||
]
|
||||
|
||||
|
||||
class BibtexparserWrapper(BaseModel):
|
||||
"""Wrapper around bibtexparser.
|
||||
|
||||
To use, you should have the ``bibtexparser`` python package installed.
|
||||
https://bibtexparser.readthedocs.io/en/master/
|
||||
|
||||
This wrapper will use bibtexparser to load a collection of references from
|
||||
a bibtex file and fetch document summaries.
|
||||
"""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that the python package exists in environment."""
|
||||
try:
|
||||
import bibtexparser # noqa
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import bibtexparser python package. "
|
||||
"Please install it with `pip install bibtexparser`."
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
def load_bibtex_entries(self, path: str) -> List[Dict[str, Any]]:
|
||||
"""Load bibtex entries from the bibtex file at the given path."""
|
||||
import bibtexparser
|
||||
|
||||
with open(path) as file:
|
||||
entries = bibtexparser.load(file).entries
|
||||
return entries
|
||||
|
||||
def get_metadata(
|
||||
self, entry: Mapping[str, Any], load_extra: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
"""Get metadata for the given entry."""
|
||||
publication = entry.get("journal") or entry.get("booktitle")
|
||||
if "url" in entry:
|
||||
url = entry["url"]
|
||||
elif "doi" in entry:
|
||||
url = f'https://doi.org/{entry["doi"]}'
|
||||
else:
|
||||
url = None
|
||||
meta = {
|
||||
"id": entry.get("ID"),
|
||||
"published_year": entry.get("year"),
|
||||
"title": entry.get("title"),
|
||||
"publication": publication,
|
||||
"authors": entry.get("author"),
|
||||
"abstract": entry.get("abstract"),
|
||||
"url": url,
|
||||
}
|
||||
if load_extra:
|
||||
for field in OPTIONAL_FIELDS:
|
||||
meta[field] = entry.get(field)
|
||||
return {k: v for k, v in meta.items() if v is not None}
|
18
poetry.lock
generated
18
poetry.lock
generated
@ -765,6 +765,20 @@ soupsieve = ">1.2"
|
||||
html5lib = ["html5lib"]
|
||||
lxml = ["lxml"]
|
||||
|
||||
[[package]]
|
||||
name = "bibtexparser"
|
||||
version = "1.4.0"
|
||||
description = "Bibtex parser for python 3"
|
||||
category = "main"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "bibtexparser-1.4.0.tar.gz", hash = "sha256:ca7ce2bc34e7c48a678dd49416429bb567441f26dbb13b3609082d8cd109ace6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pyparsing = ">=2.0.3"
|
||||
|
||||
[[package]]
|
||||
name = "black"
|
||||
version = "23.3.0"
|
||||
@ -10856,7 +10870,7 @@ azure = ["azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-
|
||||
cohere = ["cohere"]
|
||||
docarray = ["docarray"]
|
||||
embeddings = ["sentence-transformers"]
|
||||
extended-testing = ["atlassian-python-api", "beautifulsoup4", "beautifulsoup4", "chardet", "gql", "html2text", "jq", "lxml", "pandas", "pdfminer-six", "psychicapi", "pymupdf", "pypdf", "pypdfium2", "requests-toolbelt", "scikit-learn", "telethon", "tqdm", "zep-python"]
|
||||
extended-testing = ["atlassian-python-api", "beautifulsoup4", "beautifulsoup4", "bibtexparser", "chardet", "gql", "html2text", "jq", "lxml", "pandas", "pdfminer-six", "psychicapi", "pymupdf", "pypdf", "pypdfium2", "requests-toolbelt", "scikit-learn", "telethon", "tqdm", "zep-python"]
|
||||
llms = ["anthropic", "cohere", "huggingface_hub", "manifest-ml", "nlpcloud", "openai", "openlm", "torch", "transformers"]
|
||||
openai = ["openai", "tiktoken"]
|
||||
qdrant = ["qdrant-client"]
|
||||
@ -10865,4 +10879,4 @@ text-helpers = ["chardet"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "640f7e8102328d7ec3f56778d7cdb76b4846fc407c99606e0aec31833bc3933e"
|
||||
content-hash = "6eefd001a66f12c5b9978fcb46aaaece7a4f1cd9887d85446d773c884816b104"
|
||||
|
@ -97,6 +97,7 @@ scikit-learn = {version = "^1.2.2", optional = true}
|
||||
azure-ai-formrecognizer = {version = "^3.2.1", optional = true}
|
||||
azure-ai-vision = {version = "^0.11.1b1", optional = true}
|
||||
azure-cognitiveservices-speech = {version = "^1.28.0", optional = true}
|
||||
bibtexparser = {version = "^1.4.0", optional = true}
|
||||
|
||||
[tool.poetry.group.docs.dependencies]
|
||||
autodoc_pydantic = "^1.8.0"
|
||||
@ -259,6 +260,7 @@ all = [
|
||||
# merge-conflicts
|
||||
extended_testing = [
|
||||
"beautifulsoup4",
|
||||
"bibtexparser",
|
||||
"chardet",
|
||||
"jq",
|
||||
"pdfminer.six",
|
||||
|
@ -0,0 +1,14 @@
|
||||
@inproceedings{shen2021layoutparser,
|
||||
title = {LayoutParser: A unified toolkit for deep learning based document image analysis},
|
||||
author = {Shen, Zejiang and Zhang, Ruochen and Dell, Melissa and Lee, Benjamin Charles Germain and Carlson, Jacob and Li, Weining},
|
||||
booktitle = {Document Analysis and Recognition--ICDAR 2021: 16th International Conference, Lausanne, Switzerland, September 5--10, 2021, Proceedings, Part I 16},
|
||||
pages = {131--146},
|
||||
year = {2021},
|
||||
organization = {Springer},
|
||||
editor = {Llad{\'o}s, Josep
|
||||
and Lopresti, Daniel
|
||||
and Uchida, Seiichi},
|
||||
file = {layout-parser-paper.pdf},
|
||||
abstract = {{Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.",
|
||||
isbn="978-3-030-86549-8}},
|
||||
}
|
Binary file not shown.
61
tests/unit_tests/document_loaders/test_bibtex.py
Normal file
61
tests/unit_tests/document_loaders/test_bibtex.py
Normal file
@ -0,0 +1,61 @@
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.document_loaders.bibtex import BibtexLoader
|
||||
|
||||
BIBTEX_EXAMPLE_FILE = Path(__file__).parent / "sample_documents" / "bibtex.bib"
|
||||
|
||||
|
||||
@pytest.mark.requires("fitz", "bibtexparser")
|
||||
def test_load_success() -> None:
|
||||
"""Test that returns one document"""
|
||||
loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE))
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
doc = docs[0]
|
||||
assert doc.page_content
|
||||
assert set(doc.metadata) == {
|
||||
"id",
|
||||
"published_year",
|
||||
"title",
|
||||
"publication",
|
||||
"authors",
|
||||
"abstract",
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.requires("fitz", "bibtexparser")
|
||||
def test_load_max_content_chars() -> None:
|
||||
"""Test that cuts off document contents at max_content_chars."""
|
||||
loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), max_content_chars=10)
|
||||
doc = loader.load()[0]
|
||||
assert len(doc.page_content) == 10
|
||||
|
||||
|
||||
@pytest.mark.requires("fitz", "bibtexparser")
|
||||
def test_load_load_extra_metadata() -> None:
|
||||
"""Test that returns extra metadata fields."""
|
||||
loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), load_extra_metadata=True)
|
||||
doc = loader.load()[0]
|
||||
assert set(doc.metadata) == {
|
||||
"id",
|
||||
"published_year",
|
||||
"title",
|
||||
"publication",
|
||||
"authors",
|
||||
"abstract",
|
||||
"booktitle",
|
||||
"editor",
|
||||
"organization",
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.requires("fitz", "bibtexparser")
|
||||
def test_load_file_pattern() -> None:
|
||||
"""Test that returns no documents when json file pattern specified."""
|
||||
loader = BibtexLoader(
|
||||
file_path=str(BIBTEX_EXAMPLE_FILE), file_pattern=r"[^:]+\.json"
|
||||
)
|
||||
docs = loader.load()
|
||||
assert len(docs) == 0
|
Loading…
Reference in New Issue
Block a user