mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 06:39:52 +00:00
community[minor]: add user agent for web scraping loaders (#22480)
**Description:** This PR adds a `USER_AGENT` env variable that is to be used for web scraping. It creates a util to get that user agent and uses it in the classes used for scraping in [this piece of doc](https://python.langchain.com/v0.1/docs/use_cases/web_scraping/). Identifying your scraper is considered a good politeness practice, this PR aims at easing it. **Issue:** `None` **Dependencies:** `None` **Twitter handle:** `None`
This commit is contained in:
parent
8250c177de
commit
c3d4126eb1
@ -48,7 +48,7 @@
|
|||||||
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
|
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
|
||||||
"\n",
|
"\n",
|
||||||
"urls = [\"https://www.wsj.com\"]\n",
|
"urls = [\"https://www.wsj.com\"]\n",
|
||||||
"loader = AsyncChromiumLoader(urls)\n",
|
"loader = AsyncChromiumLoader(urls, user_agent=\"MyAppUserAgent\")\n",
|
||||||
"docs = loader.load()\n",
|
"docs = loader.load()\n",
|
||||||
"docs[0].page_content[0:100]"
|
"docs[0].page_content[0:100]"
|
||||||
]
|
]
|
||||||
|
@ -19,11 +19,12 @@ import requests
|
|||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
from langchain_community.document_loaders.base import BaseLoader
|
from langchain_community.document_loaders.base import BaseLoader
|
||||||
|
from langchain_community.utils.user_agent import get_user_agent
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
default_header_template = {
|
default_header_template = {
|
||||||
"User-Agent": "",
|
"User-Agent": get_user_agent(),
|
||||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
|
||||||
";q=0.8",
|
";q=0.8",
|
||||||
"Accept-Language": "en-US,en;q=0.5",
|
"Accept-Language": "en-US,en;q=0.5",
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from typing import AsyncIterator, Iterator, List
|
from typing import AsyncIterator, Iterator, List, Optional
|
||||||
|
|
||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
from langchain_community.document_loaders.base import BaseLoader
|
from langchain_community.document_loaders.base import BaseLoader
|
||||||
|
from langchain_community.utils.user_agent import get_user_agent
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -13,18 +14,26 @@ class AsyncChromiumLoader(BaseLoader):
|
|||||||
"""Scrape HTML pages from URLs using a
|
"""Scrape HTML pages from URLs using a
|
||||||
headless instance of the Chromium."""
|
headless instance of the Chromium."""
|
||||||
|
|
||||||
def __init__(self, urls: List[str], *, headless: bool = True):
|
def __init__(
|
||||||
|
self,
|
||||||
|
urls: List[str],
|
||||||
|
*,
|
||||||
|
headless: bool = True,
|
||||||
|
user_agent: Optional[str] = None,
|
||||||
|
):
|
||||||
"""Initialize the loader with a list of URL paths.
|
"""Initialize the loader with a list of URL paths.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
urls: A list of URLs to scrape content from.
|
urls: A list of URLs to scrape content from.
|
||||||
headless: Whether to run browser in headless mode.
|
headless: Whether to run browser in headless mode.
|
||||||
|
user_agent: The user agent to use for the browser
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ImportError: If the required 'playwright' package is not installed.
|
ImportError: If the required 'playwright' package is not installed.
|
||||||
"""
|
"""
|
||||||
self.urls = urls
|
self.urls = urls
|
||||||
self.headless = headless
|
self.headless = headless
|
||||||
|
self.user_agent = user_agent or get_user_agent()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import playwright # noqa: F401
|
import playwright # noqa: F401
|
||||||
@ -52,7 +61,7 @@ class AsyncChromiumLoader(BaseLoader):
|
|||||||
async with async_playwright() as p:
|
async with async_playwright() as p:
|
||||||
browser = await p.chromium.launch(headless=self.headless)
|
browser = await p.chromium.launch(headless=self.headless)
|
||||||
try:
|
try:
|
||||||
page = await browser.new_page()
|
page = await browser.new_page(user_agent=self.user_agent)
|
||||||
await page.goto(url)
|
await page.goto(url)
|
||||||
results = await page.content() # Simply get the HTML content
|
results = await page.content() # Simply get the HTML content
|
||||||
logger.info("Content scraped")
|
logger.info("Content scraped")
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
"""Web base loader class."""
|
"""Web base loader class."""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import warnings
|
import warnings
|
||||||
@ -9,11 +10,12 @@ import requests
|
|||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
from langchain_community.document_loaders.base import BaseLoader
|
from langchain_community.document_loaders.base import BaseLoader
|
||||||
|
from langchain_community.utils.user_agent import get_user_agent
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
default_header_template = {
|
default_header_template = {
|
||||||
"User-Agent": "",
|
"User-Agent": get_user_agent(),
|
||||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
|
||||||
";q=0.8",
|
";q=0.8",
|
||||||
"Accept-Language": "en-US,en;q=0.5",
|
"Accept-Language": "en-US,en;q=0.5",
|
||||||
|
16
libs/community/langchain_community/utils/user_agent.py
Normal file
16
libs/community/langchain_community/utils/user_agent.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_agent() -> str:
|
||||||
|
"""Get user agent from environment variable."""
|
||||||
|
env_user_agent = os.environ.get("USER_AGENT")
|
||||||
|
if not env_user_agent:
|
||||||
|
logging.warning(
|
||||||
|
"USER_AGENT environment variable not set, "
|
||||||
|
"consider setting it to identify your requests."
|
||||||
|
)
|
||||||
|
return "DefaultLangchainUserAgent"
|
||||||
|
return env_user_agent
|
Loading…
Reference in New Issue
Block a user