langchain/libs/community/langchain_community/document_loaders/scrapfly.py
Bagatur a0c2281540
infra: update mypy 1.10, ruff 0.5 (#23721)
```python
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path

import toml
import subprocess
import re

ROOT_DIR = Path(__file__).parents[1]


def main():
    for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
        print(path)
        with open(path, "rb") as f:
            pyproject = tomllib.load(f)
        try:
            pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
                "^1.10"
            )
            pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
                "^0.5"
            )
        except KeyError:
            continue
        with open(path, "w") as f:
            toml.dump(pyproject, f)
        cwd = "/".join(path.split("/")[:-1])
        completed = subprocess.run(
            "poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color",
            cwd=cwd,
            shell=True,
            capture_output=True,
            text=True,
        )
        logs = completed.stdout.split("\n")

        to_ignore = {}
        for l in logs:
            if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
                path, line_no, error_type = re.match(
                    "^(.*)\:(\d+)\: error:.*\[(.*)\]", l
                ).groups()
                if (path, line_no) in to_ignore:
                    to_ignore[(path, line_no)].append(error_type)
                else:
                    to_ignore[(path, line_no)] = [error_type]
        print(len(to_ignore))
        for (error_path, line_no), error_types in to_ignore.items():
            all_errors = ", ".join(error_types)
            full_path = f"{cwd}/{error_path}"
            try:
                with open(full_path, "r") as f:
                    file_lines = f.readlines()
            except FileNotFoundError:
                continue
            file_lines[int(line_no) - 1] = (
                file_lines[int(line_no) - 1][:-1] + f"  # type: ignore[{all_errors}]\n"
            )
            with open(full_path, "w") as f:
                f.write("".join(file_lines))

        subprocess.run(
            "poetry run ruff format .; poetry run ruff --select I --fix .",
            cwd=cwd,
            shell=True,
            capture_output=True,
            text=True,
        )


if __name__ == "__main__":
    main()

```
2024-07-03 10:33:27 -07:00

71 lines
2.5 KiB
Python

"""Scrapfly Web Reader."""
import logging
from typing import Iterator, List, Literal, Optional
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.utils import get_from_env
logger = logging.getLogger(__file__)
class ScrapflyLoader(BaseLoader):
"""Turn a url to llm accessible markdown with `Scrapfly.io`.
For further details, visit: https://scrapfly.io/docs/sdk/python
"""
def __init__(
self,
urls: List[str],
*,
api_key: Optional[str] = None,
scrape_format: Literal["markdown", "text"] = "markdown",
scrape_config: Optional[dict] = None,
continue_on_failure: bool = True,
) -> None:
"""Initialize client.
Args:
urls: List of urls to scrape.
api_key: The Scrapfly API key. If not specified must have env var
SCRAPFLY_API_KEY set.
scrape_format: Scrape result format, one or "markdown" or "text".
scrape_config: Dictionary of ScrapFly scrape config object.
continue_on_failure: Whether to continue if scraping a url fails.
"""
try:
from scrapfly import ScrapflyClient
except ImportError:
raise ImportError(
"`scrapfly` package not found, please run `pip install scrapfly-sdk`"
)
if not urls:
raise ValueError("URLs must be provided.")
api_key = api_key or get_from_env("api_key", "SCRAPFLY_API_KEY")
self.scrapfly = ScrapflyClient(key=api_key)
self.urls = urls
self.scrape_format = scrape_format
self.scrape_config = scrape_config
self.continue_on_failure = continue_on_failure
def lazy_load(self) -> Iterator[Document]:
from scrapfly import ScrapeConfig
scrape_config = self.scrape_config if self.scrape_config is not None else {}
for url in self.urls:
try:
response = self.scrapfly.scrape(
ScrapeConfig(url, format=self.scrape_format, **scrape_config)
)
yield Document(
page_content=response.scrape_result["content"],
metadata={"url": url},
)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching data from {url}, exception: {e}")
else:
raise e