mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-01 19:12:42 +00:00
Newspaper (#8647)
- Description: Added newspaper3k based news article loader. Provide a list of urls. - Issue: N/A - Dependencies: newspaper3k, - Tag maintainer: @rlancemartin , @eyurtsev - Twitter handle: @ruze --------- Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
@@ -95,6 +95,7 @@ from langchain.document_loaders.mediawikidump import MWDumpLoader
|
||||
from langchain.document_loaders.merge import MergedDataLoader
|
||||
from langchain.document_loaders.mhtml import MHTMLLoader
|
||||
from langchain.document_loaders.modern_treasury import ModernTreasuryLoader
|
||||
from langchain.document_loaders.news import NewsURLLoader
|
||||
from langchain.document_loaders.notebook import NotebookLoader
|
||||
from langchain.document_loaders.notion import NotionDirectoryLoader
|
||||
from langchain.document_loaders.notiondb import NotionDBLoader
|
||||
@@ -250,6 +251,7 @@ __all__ = [
|
||||
"MergedDataLoader",
|
||||
"MHTMLLoader",
|
||||
"ModernTreasuryLoader",
|
||||
"NewsURLLoader",
|
||||
"NotebookLoader",
|
||||
"NotionDBLoader",
|
||||
"NotionDirectoryLoader",
|
||||
|
124
libs/langchain/langchain/document_loaders/news.py
Normal file
124
libs/langchain/langchain/document_loaders/news.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""Loader that uses unstructured to load HTML files."""
|
||||
import logging
|
||||
from typing import Any, Iterator, List
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NewsURLLoader(BaseLoader):
|
||||
"""Loader that uses newspaper to load news articles from URLs.
|
||||
|
||||
Args:
|
||||
urls: URLs to load. Each is loaded into its own document.
|
||||
text_mode: If True, extract text from URL and use that for page content.
|
||||
Otherwise, extract raw HTML.
|
||||
nlp: If True, perform NLP on the extracted contents, like providing a summary
|
||||
and extracting keywords.
|
||||
continue_on_failure: If True, continue loading documents even if
|
||||
loading fails for a particular URL.
|
||||
show_progress_bar: If True, use tqdm to show a loading progress bar. Requires
|
||||
tqdm to be installed, ``pip install tqdm``.
|
||||
**newspaper_kwargs: Any additional named arguments to pass to
|
||||
newspaper.Article().
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.document_loaders import NewsURLLoader
|
||||
|
||||
loader = NewsURLLoader(
|
||||
urls=["<url-1>", "<url-2>"],
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
Newspaper reference:
|
||||
https://newspaper.readthedocs.io/en/latest/
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
urls: List[str],
|
||||
text_mode: bool = True,
|
||||
nlp: bool = False,
|
||||
continue_on_failure: bool = True,
|
||||
show_progress_bar: bool = False,
|
||||
**newspaper_kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize with file path."""
|
||||
try:
|
||||
import newspaper # noqa:F401
|
||||
|
||||
self.__version = newspaper.__version__
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"newspaper package not found, please install it with "
|
||||
"`pip install newspaper3k`"
|
||||
)
|
||||
|
||||
self.urls = urls
|
||||
self.text_mode = text_mode
|
||||
self.nlp = nlp
|
||||
self.continue_on_failure = continue_on_failure
|
||||
self.newspaper_kwargs = newspaper_kwargs
|
||||
self.show_progress_bar = show_progress_bar
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
iter = self.lazy_load()
|
||||
if self.show_progress_bar:
|
||||
try:
|
||||
from tqdm import tqdm
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Package tqdm must be installed if show_progress_bar=True. "
|
||||
"Please install with 'pip install tqdm' or set "
|
||||
"show_progress_bar=False."
|
||||
) from e
|
||||
iter = tqdm(iter)
|
||||
return list(iter)
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
try:
|
||||
from newspaper import Article
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Cannot import newspaper, please install with `pip install newspaper3k`"
|
||||
) from e
|
||||
|
||||
for url in self.urls:
|
||||
try:
|
||||
article = Article(url, **self.newspaper_kwargs)
|
||||
article.download()
|
||||
article.parse()
|
||||
|
||||
if self.nlp:
|
||||
article.nlp()
|
||||
|
||||
except Exception as e:
|
||||
if self.continue_on_failure:
|
||||
logger.error(f"Error fetching or processing {url}, exception: {e}")
|
||||
continue
|
||||
else:
|
||||
raise e
|
||||
|
||||
metadata = {
|
||||
"title": getattr(article, "title", ""),
|
||||
"link": getattr(article, "url", getattr(article, "canonical_link", "")),
|
||||
"authors": getattr(article, "authors", []),
|
||||
"language": getattr(article, "meta_lang", ""),
|
||||
"description": getattr(article, "meta_description", ""),
|
||||
"publish_date": getattr(article, "publish_date", ""),
|
||||
}
|
||||
|
||||
if self.text_mode:
|
||||
content = article.text
|
||||
else:
|
||||
content = article.html
|
||||
|
||||
if self.nlp:
|
||||
metadata["keywords"] = getattr(article, "keywords", [])
|
||||
metadata["summary"] = getattr(article, "summary", "")
|
||||
|
||||
yield Document(page_content=content, metadata=metadata)
|
@@ -0,0 +1,68 @@
|
||||
import random
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from langchain.document_loaders import NewsURLLoader
|
||||
|
||||
|
||||
def get_random_news_url() -> str:
|
||||
response = requests.get("https://news.google.com")
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
article_links = [
|
||||
a["href"] for a in soup.find_all("a", href=True) if "/articles/" in a["href"]
|
||||
]
|
||||
random_article_link = random.choice(article_links)
|
||||
|
||||
return "https://news.google.com" + random_article_link
|
||||
|
||||
|
||||
def test_news_loader() -> None:
|
||||
loader = NewsURLLoader([get_random_news_url()])
|
||||
docs = loader.load()
|
||||
|
||||
assert docs[0] is not None
|
||||
assert hasattr(docs[0], "page_content")
|
||||
assert hasattr(docs[0], "metadata")
|
||||
|
||||
metadata = docs[0].metadata
|
||||
assert "title" in metadata
|
||||
assert "link" in metadata
|
||||
assert "authors" in metadata
|
||||
assert "language" in metadata
|
||||
assert "description" in metadata
|
||||
assert "publish_date" in metadata
|
||||
|
||||
|
||||
def test_news_loader_with_nlp() -> None:
|
||||
loader = NewsURLLoader([get_random_news_url()], nlp=True)
|
||||
docs = loader.load()
|
||||
|
||||
assert docs[0] is not None
|
||||
assert hasattr(docs[0], "page_content")
|
||||
assert hasattr(docs[0], "metadata")
|
||||
|
||||
metadata = docs[0].metadata
|
||||
assert "title" in metadata
|
||||
assert "link" in metadata
|
||||
assert "authors" in metadata
|
||||
assert "language" in metadata
|
||||
assert "description" in metadata
|
||||
assert "publish_date" in metadata
|
||||
assert "keywords" in metadata
|
||||
assert "summary" in metadata
|
||||
|
||||
|
||||
def test_continue_on_failure_true() -> None:
|
||||
"""Test exception is not raised when continue_on_failure=True."""
|
||||
loader = NewsURLLoader(["badurl.foobar"])
|
||||
loader.load()
|
||||
|
||||
|
||||
def test_continue_on_failure_false() -> None:
|
||||
"""Test exception is raised when continue_on_failure=False."""
|
||||
loader = NewsURLLoader(["badurl.foobar"], continue_on_failure=False)
|
||||
with pytest.raises(Exception):
|
||||
loader.load()
|
Reference in New Issue
Block a user