mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-16 23:13:31 +00:00
Arxiv
document loader (#3627)
It makes sense to use `arxiv` as another source of the documents for downloading. - Added the `arxiv` document_loader, based on the `utilities/arxiv.py:ArxivAPIWrapper` - added tests - added an example notebook - sorted `__all__` in `__init__.py` (otherwise it is hard to find a class in the very long list)
This commit is contained in:
55
tests/integration_tests/document_loaders/test_arxiv.py
Normal file
55
tests/integration_tests/document_loaders/test_arxiv.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from typing import List
|
||||
|
||||
from langchain.document_loaders.arxiv import ArxivLoader
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
def assert_docs(docs: List[Document]) -> None:
|
||||
for doc in docs:
|
||||
assert doc.page_content
|
||||
assert doc.metadata
|
||||
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
|
||||
|
||||
|
||||
def test_load_success() -> None:
|
||||
"""Test that returns one document"""
|
||||
loader = ArxivLoader(query="1605.08386", load_max_docs=2)
|
||||
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
print(docs[0].metadata)
|
||||
print(docs[0].page_content)
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_returns_no_result() -> None:
|
||||
"""Test that returns no docs"""
|
||||
loader = ArxivLoader(query="1605.08386WWW", load_max_docs=2)
|
||||
docs = loader.load()
|
||||
|
||||
assert len(docs) == 0
|
||||
|
||||
|
||||
def test_load_returns_limited_docs() -> None:
|
||||
"""Test that returns several docs"""
|
||||
expected_docs = 2
|
||||
loader = ArxivLoader(query="ChatGPT", load_max_docs=expected_docs)
|
||||
docs = loader.load()
|
||||
|
||||
assert len(docs) == expected_docs
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_returns_full_set_of_metadata() -> None:
|
||||
"""Test that returns several docs"""
|
||||
loader = ArxivLoader(query="ChatGPT", load_max_docs=1, load_all_available_meta=True)
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
for doc in docs:
|
||||
assert doc.page_content
|
||||
assert doc.metadata
|
||||
assert set(doc.metadata).issuperset(
|
||||
{"Published", "Title", "Authors", "Summary"}
|
||||
)
|
||||
print(doc.metadata)
|
||||
assert len(set(doc.metadata)) > 4
|
@@ -1,6 +1,9 @@
|
||||
"""Integration test for Arxiv API Wrapper."""
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.schema import Document
|
||||
from langchain.utilities import ArxivAPIWrapper
|
||||
|
||||
|
||||
@@ -9,22 +12,68 @@ def api_client() -> ArxivAPIWrapper:
|
||||
return ArxivAPIWrapper()
|
||||
|
||||
|
||||
def test_call(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that ArxivAPIWrapper returns correct answer"""
|
||||
def test_run_success(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that returns the correct answer"""
|
||||
|
||||
output = api_client.run("1605.08386")
|
||||
assert "Heat-bath random walks with Markov bases" in output
|
||||
|
||||
|
||||
def test_several_docs(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that ArxivAPIWrapper returns several docs"""
|
||||
def test_run_returns_several_docs(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that returns several docs"""
|
||||
|
||||
output = api_client.run("Caprice Stanley")
|
||||
assert "On Mixing Behavior of a Family of Random Walks" in output
|
||||
|
||||
|
||||
def test_no_result_call(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that call gives no result."""
|
||||
def test_run_returns_no_result(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that gives no result."""
|
||||
|
||||
output = api_client.run("1605.08386WWW")
|
||||
assert "No good Arxiv Result was found" == output
|
||||
|
||||
|
||||
def assert_docs(docs: List[Document]) -> None:
|
||||
for doc in docs:
|
||||
assert doc.page_content
|
||||
assert doc.metadata
|
||||
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
|
||||
|
||||
|
||||
def test_load_success(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that returns one document"""
|
||||
|
||||
docs = api_client.load("1605.08386")
|
||||
assert len(docs) == 1
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_returns_no_result(api_client: ArxivAPIWrapper) -> None:
|
||||
"""Test that returns no docs"""
|
||||
|
||||
docs = api_client.load("1605.08386WWW")
|
||||
assert len(docs) == 0
|
||||
|
||||
|
||||
def test_load_returns_limited_docs() -> None:
|
||||
"""Test that returns several docs"""
|
||||
expected_docs = 2
|
||||
api_client = ArxivAPIWrapper(load_max_docs=expected_docs)
|
||||
docs = api_client.load("ChatGPT")
|
||||
assert len(docs) == expected_docs
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_returns_full_set_of_metadata() -> None:
|
||||
"""Test that returns several docs"""
|
||||
api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True)
|
||||
docs = api_client.load("ChatGPT")
|
||||
assert len(docs) == 1
|
||||
for doc in docs:
|
||||
assert doc.page_content
|
||||
assert doc.metadata
|
||||
assert set(doc.metadata).issuperset(
|
||||
{"Published", "Title", "Authors", "Summary"}
|
||||
)
|
||||
print(doc.metadata)
|
||||
assert len(set(doc.metadata)) > 4
|
||||
|
Reference in New Issue
Block a user