community[minor]: add user agent for web scraping loaders (#22480)

**Description:** This PR adds a `USER_AGENT` env variable that is to be
used for web scraping. It creates a util to get that user agent and uses
it in the classes used for scraping in [this piece of
doc](https://python.langchain.com/v0.1/docs/use_cases/web_scraping/).
Identifying your scraper is considered a good politeness practice, this
PR aims at easing it.
**Issue:** `None`
**Dependencies:** `None`
**Twitter handle:** `None`
This commit is contained in:
Emilien Chauvet 2024-06-05 17:20:34 +02:00 committed by GitHub
parent 8250c177de
commit c3d4126eb1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 34 additions and 6 deletions

View File

@ -48,7 +48,7 @@
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
"\n",
"urls = [\"https://www.wsj.com\"]\n",
"loader = AsyncChromiumLoader(urls)\n",
"loader = AsyncChromiumLoader(urls, user_agent=\"MyAppUserAgent\")\n",
"docs = loader.load()\n",
"docs[0].page_content[0:100]"
]

View File

@ -19,11 +19,12 @@ import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utils.user_agent import get_user_agent
logger = logging.getLogger(__name__)
default_header_template = {
"User-Agent": "",
"User-Agent": get_user_agent(),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
";q=0.8",
"Accept-Language": "en-US,en;q=0.5",

View File

@ -1,10 +1,11 @@
import asyncio
import logging
from typing import AsyncIterator, Iterator, List
from typing import AsyncIterator, Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utils.user_agent import get_user_agent
logger = logging.getLogger(__name__)
@ -13,18 +14,26 @@ class AsyncChromiumLoader(BaseLoader):
"""Scrape HTML pages from URLs using a
headless instance of the Chromium."""
def __init__(self, urls: List[str], *, headless: bool = True):
def __init__(
self,
urls: List[str],
*,
headless: bool = True,
user_agent: Optional[str] = None,
):
"""Initialize the loader with a list of URL paths.
Args:
urls: A list of URLs to scrape content from.
headless: Whether to run browser in headless mode.
user_agent: The user agent to use for the browser
Raises:
ImportError: If the required 'playwright' package is not installed.
"""
self.urls = urls
self.headless = headless
self.user_agent = user_agent or get_user_agent()
try:
import playwright # noqa: F401
@ -52,7 +61,7 @@ class AsyncChromiumLoader(BaseLoader):
async with async_playwright() as p:
browser = await p.chromium.launch(headless=self.headless)
try:
page = await browser.new_page()
page = await browser.new_page(user_agent=self.user_agent)
await page.goto(url)
results = await page.content() # Simply get the HTML content
logger.info("Content scraped")

View File

@ -1,4 +1,5 @@
"""Web base loader class."""
import asyncio
import logging
import warnings
@ -9,11 +10,12 @@ import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utils.user_agent import get_user_agent
logger = logging.getLogger(__name__)
default_header_template = {
"User-Agent": "",
"User-Agent": get_user_agent(),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
";q=0.8",
"Accept-Language": "en-US,en;q=0.5",

View File

@ -0,0 +1,16 @@
import logging
import os
log = logging.getLogger(__name__)
def get_user_agent() -> str:
"""Get user agent from environment variable."""
env_user_agent = os.environ.get("USER_AGENT")
if not env_user_agent:
logging.warning(
"USER_AGENT environment variable not set, "
"consider setting it to identify your requests."
)
return "DefaultLangchainUserAgent"
return env_user_agent