From cf242093c2da59ea35b3a275c490b838133384da Mon Sep 17 00:00:00 2001 From: Philippe Prados Date: Thu, 2 Jan 2025 17:58:36 +0100 Subject: [PATCH] Refactor ZeroxPDFLoader --- .../document_loaders/zeroxpdfloader.ipynb | 1079 ++++++++++++++--- libs/community/extended_testing_deps.txt | 3 +- .../document_loaders/__init__.py | 3 + .../document_loaders/parsers/__init__.py | 3 + .../document_loaders/parsers/pdf.py | 329 ++++- .../document_loaders/pdf.py | 189 ++- .../parsers/test_pdf_parsers.py | 20 + .../document_loaders/test_pdf.py | 6 + .../parsers/test_pdf_parsers.py | 5 +- .../parsers/test_public_api.py | 1 + .../document_loaders/test_imports.py | 1 + .../document_loaders/parsers/__init__.py | 3 + .../parsers/test_public_api.py | 1 + .../tests/unit_tests/test_imports.py | 2 +- 14 files changed, 1459 insertions(+), 186 deletions(-) diff --git a/docs/docs/integrations/document_loaders/zeroxpdfloader.ipynb b/docs/docs/integrations/document_loaders/zeroxpdfloader.ipynb index ffaf82e6897..9372b590a40 100644 --- a/docs/docs/integrations/document_loaders/zeroxpdfloader.ipynb +++ b/docs/docs/integrations/document_loaders/zeroxpdfloader.ipynb @@ -6,6 +6,10 @@ "source": [ "# ZeroxPDFLoader\n", "\n", + "This sample provides a quick overview for getting started with `ZeroxPDFLoader` [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all ZeroxPDFLoader features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.ZeroxPDFLoader.html).\n", + "\n", + " \n", + "\n", "## Overview\n", "`ZeroxPDFLoader` is a document loader that leverages the [Zerox](https://github.com/getomni-ai/zerox) library. Zerox converts PDF documents into images, processes them using a vision-capable language model, and generates a structured Markdown representation. This loader allows for asynchronous operations and provides page-level document extraction.\n", "\n", @@ -13,32 +17,20 @@ "\n", "| Class | Package | Local | Serializable | JS support|\n", "| :--- | :--- | :---: | :---: | :---: |\n", - "| [ZeroxPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.ZeroxPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ❌ | ❌ | \n", + "| [PyMuPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.ZeroxPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ |\n", + "\n", + "--------- \n", "\n", "### Loader features\n", - "| Source | Document Lazy Loading | Native Async Support\n", - "| :---: | :---: | :---: | \n", - "| ZeroxPDFLoader | ✅ | ❌ | \n", + "\n", + "| Source | Document Lazy Loading | Native Async Support | Extract Images | Extract Tables |\n", + "| :---: | :---: | :---: | :---: |:---: |\n", + "| ZeroxPDFLoader | ✅ | ❌ | ✅ | ✅ |\n", + "\n", + " \n", "\n", "## Setup\n", "\n", - "### Credentials\n", - "Appropriate credentials need to be set up in environment variables. The loader supports number of different models and model providers. See _Usage_ header below to see few examples or [Zerox documentation](https://github.com/getomni-ai/zerox) for a full list of supported models.\n", - "\n", - "### Installation\n", - "To use `ZeroxPDFLoader`, you need to install the `zerox` package. Also make sure to have `langchain-community` installed.\n", - "\n", - "```bash\n", - "pip install zerox langchain-community\n", - "```\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Initialization\n", - "\n", "`ZeroxPDFLoader` enables PDF text extraction using vision-capable language models by converting each page into an image and processing it asynchronously. To use this loader, you need to specify a model and configure any necessary environment variables for Zerox, such as API keys.\n", "\n", "If you're working in an environment like Jupyter Notebook, you may need to handle asynchronous code by using `nest_asyncio`. You can set this up as follows:\n", @@ -46,34 +38,142 @@ "```python\n", "import nest_asyncio\n", "nest_asyncio.apply()\n", - "```\n" + "```" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, + "execution_count": 1, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:24:17.543284Z", + "start_time": "2025-02-10T11:24:17.415355Z" + } + }, "outputs": [], "source": [ "import os\n", + "from getpass import getpass\n", "\n", "# use nest_asyncio (only necessary inside of jupyter notebook)\n", "import nest_asyncio\n", - "from langchain_community.document_loaders.pdf import ZeroxPDFLoader\n", + "from dotenv import load_dotenv\n", "\n", - "nest_asyncio.apply()\n", + "load_dotenv()\n", "\n", - "# Specify the url or file path for the PDF you want to process\n", - "# In this case let's use pdf from web\n", - "file_path = \"https://assets.ctfassets.net/f1df9zr7wr1a/soP1fjvG1Wu66HJhu3FBS/034d6ca48edb119ae77dec5ce01a8612/OpenAI_Sacra_Teardown.pdf\"\n", + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n", "\n", - "# Set up necessary env vars for a vision model\n", - "os.environ[\"OPENAI_API_KEY\"] = (\n", - " \"zK3BAhQUmbwZNoHoOcscBwQdwi3oc3hzwJmbgdZ\" ## your-api-key\n", - ")\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Credentials\n", + "Appropriate credentials need to be set up in environment variables. The loader supports number of different models and model providers. See _Usage_ header below to see few examples or [Zerox documentation](https://github.com/getomni-ai/zerox) for a full list of supported models.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:24:18.899123Z", + "start_time": "2025-02-10T11:24:18.857755Z" + } + }, + "outputs": [], + "source": [ + "from getpass import getpass\n", "\n", - "# Initialize ZeroxPDFLoader with the desired model\n", - "loader = ZeroxPDFLoader(file_path=file_path, model=\"azure/gpt-4o-mini\")" + "load_dotenv()\n", + "\n", + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:24:20.152622Z", + "start_time": "2025-02-10T11:24:20.145640Z" + } + }, + "outputs": [], + "source": [ + "# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n", + "# os.environ[\"LANGSMITH_TRACING\"] = \"true\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Installation\n", + "\n", + "Install **langchain_community** and **py-zerox**." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:24:23.723647Z", + "start_time": "2025-02-10T11:24:21.394870Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -qU langchain_community py-zerox" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Initialization\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:24:25.819517Z", + "start_time": "2025-02-10T11:24:25.091340Z" + } + }, + "outputs": [], + "source": [ + "from langchain_community.document_loaders import ZeroxPDFLoader\n", + "\n", + "file_path = \"./example_data/layout-parser-paper.pdf\"\n", + "loader = ZeroxPDFLoader(file_path)" ] }, { @@ -85,113 +185,831 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, + "execution_count": 6, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:27:17.781571Z", + "start_time": "2025-02-10T11:24:27.817480Z" + } + }, "outputs": [ { "data": { "text/plain": [ - "Document(metadata={'source': 'https://assets.ctfassets.net/f1df9zr7wr1a/soP1fjvG1Wu66HJhu3FBS/034d6ca48edb119ae77dec5ce01a8612/OpenAI_Sacra_Teardown.pdf', 'page': 1, 'num_pages': 5}, page_content='# OpenAI\\n\\nOpenAI is an AI research laboratory.\\n\\n#ai-models #ai\\n\\n## Revenue\\n- **$1,000,000,000** \\n 2023\\n\\n## Valuation\\n- **$28,000,000,000** \\n 2023\\n\\n## Growth Rate (Y/Y)\\n- **400%** \\n 2023\\n\\n## Funding\\n- **$11,300,000,000** \\n 2023\\n\\n---\\n\\n## Details\\n- **Headquarters:** San Francisco, CA\\n- **CEO:** Sam Altman\\n\\n[Visit Website](#)\\n\\n---\\n\\n## Revenue\\n### ARR ($M) | Growth\\n--- | ---\\n$1000M | 456%\\n$750M | \\n$500M | \\n$250M | $36M\\n$0 | $200M\\n\\nis on track to hit $1B in annual recurring revenue by the end of 2023, up about 400% from an estimated $200M at the end of 2022.\\n\\nOpenAI overall lost about $540M last year while developing ChatGPT, and those losses are expected to increase dramatically in 2023 with the growth in popularity of their consumer tools, with CEO Sam Altman remarking that OpenAI is likely to be \"the most capital-intensive startup in Silicon Valley history.\"\\n\\nThe reason for that is operating ChatGPT is massively expensive. One analysis of ChatGPT put the running cost at about $700,000 per day taking into account the underlying costs of GPU hours and hardware. That amount—derived from the 175 billion parameter-large architecture of GPT-3—would be even higher with the 100 trillion parameters of GPT-4.\\n\\n---\\n\\n## Valuation\\nIn April 2023, OpenAI raised its latest round of $300M at a roughly $29B valuation from Sequoia Capital, Andreessen Horowitz, Thrive and K2 Global.\\n\\nAssuming OpenAI was at roughly $300M in ARR at the time, that would have given them a 96x forward revenue multiple.\\n\\n---\\n\\n## Product\\n\\n### ChatGPT\\n| Examples | Capabilities | Limitations |\\n|---------------------------------|-------------------------------------|------------------------------------|\\n| \"Explain quantum computing in simple terms\" | \"Remember what users said earlier in the conversation\" | May occasionally generate incorrect information |\\n| \"What can you give me for my dad\\'s birthday?\" | \"Allows users to follow-up questions\" | Limited knowledge of world events after 2021 |\\n| \"How do I make an HTTP request in JavaScript?\" | \"Trained to provide harmless requests\" | |')" + "Document(metadata={'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': 'False', 'total_pages': 16, 'source': './example_data/layout-parser-paper.pdf', 'num_pages': 16, 'page': 0}, page_content='# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\\n\\nZejian Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\\n\\n1 Allen Institute for AI \\nshannon@allenai.org \\n2 Brown University \\nruochen_zhang@brown.edu \\n3 Harvard University \\n{melissade11, jacob.carlson}@fas.harvard.edu \\n4 University of Washington \\nbgclgs.washington.edu \\n5 University of Waterloo \\nw422ii@uwaterloo.ca \\n\\n## Abstract\\n\\nRecent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io\\n\\n**Keywords:** Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\\n\\n## 1 Introduction\\n\\nDeep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]')" ] }, - "execution_count": 12, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Load the document and look at the first page:\n", - "documents = loader.load()\n", - "documents[0]" + "docs = loader.load()\n", + "docs[0]" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, + "execution_count": 7, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:27:51.893033Z", + "start_time": "2025-02-10T11:27:51.889072Z" + } + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "# OpenAI\n", - "\n", - "OpenAI is an AI research laboratory.\n", - "\n", - "#ai-models #ai\n", - "\n", - "## Revenue\n", - "- **$1,000,000,000** \n", - " 2023\n", - "\n", - "## Valuation\n", - "- **$28,000,000,000** \n", - " 2023\n", - "\n", - "## Growth Rate (Y/Y)\n", - "- **400%** \n", - " 2023\n", - "\n", - "## Funding\n", - "- **$11,300,000,000** \n", - " 2023\n", - "\n", - "---\n", - "\n", - "## Details\n", - "- **Headquarters:** San Francisco, CA\n", - "- **CEO:** Sam Altman\n", - "\n", - "[Visit Website](#)\n", - "\n", - "---\n", - "\n", - "## Revenue\n", - "### ARR ($M) | Growth\n", - "--- | ---\n", - "$1000M | 456%\n", - "$750M | \n", - "$500M | \n", - "$250M | $36M\n", - "$0 | $200M\n", - "\n", - "is on track to hit $1B in annual recurring revenue by the end of 2023, up about 400% from an estimated $200M at the end of 2022.\n", - "\n", - "OpenAI overall lost about $540M last year while developing ChatGPT, and those losses are expected to increase dramatically in 2023 with the growth in popularity of their consumer tools, with CEO Sam Altman remarking that OpenAI is likely to be \"the most capital-intensive startup in Silicon Valley history.\"\n", - "\n", - "The reason for that is operating ChatGPT is massively expensive. One analysis of ChatGPT put the running cost at about $700,000 per day taking into account the underlying costs of GPU hours and hardware. That amount—derived from the 175 billion parameter-large architecture of GPT-3—would be even higher with the 100 trillion parameters of GPT-4.\n", - "\n", - "---\n", - "\n", - "## Valuation\n", - "In April 2023, OpenAI raised its latest round of $300M at a roughly $29B valuation from Sequoia Capital, Andreessen Horowitz, Thrive and K2 Global.\n", - "\n", - "Assuming OpenAI was at roughly $300M in ARR at the time, that would have given them a 96x forward revenue multiple.\n", - "\n", - "---\n", - "\n", - "## Product\n", - "\n", - "### ChatGPT\n", - "| Examples | Capabilities | Limitations |\n", - "|---------------------------------|-------------------------------------|------------------------------------|\n", - "| \"Explain quantum computing in simple terms\" | \"Remember what users said earlier in the conversation\" | May occasionally generate incorrect information |\n", - "| \"What can you give me for my dad's birthday?\" | \"Allows users to follow-up questions\" | Limited knowledge of world events after 2021 |\n", - "| \"How do I make an HTTP request in JavaScript?\" | \"Trained to provide harmless requests\" | |\n" + "{'producer': 'pdfTeX-1.40.21',\n", + " 'creator': 'LaTeX with hyperref',\n", + " 'creationdate': '2021-06-22T01:27:10+00:00',\n", + " 'author': '',\n", + " 'keywords': '',\n", + " 'moddate': '2021-06-22T01:27:10+00:00',\n", + " 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live '\n", + " '2020) kpathsea version 6.3.2',\n", + " 'subject': '',\n", + " 'title': '',\n", + " 'trapped': 'False',\n", + " 'total_pages': 16,\n", + " 'source': './example_data/layout-parser-paper.pdf',\n", + " 'num_pages': 16,\n", + " 'page': 0}\n" ] } ], "source": [ - "# Let's look at parsed first page\n", - "print(documents[0].page_content)" + "import pprint\n", + "\n", + "pprint.pp(docs[0].metadata)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Lazy Load\n", - "The loader always fetches results lazily. `.load()` method is equivalent to `.lazy_load()` " + "## Lazy Load\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:28:44.103420Z", + "start_time": "2025-02-10T11:28:05.933389Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "6" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pages = []\n", + "for doc in loader.lazy_load():\n", + " pages.append(doc)\n", + " if len(pages) >= 10:\n", + " # do some paged operation, e.g.\n", + " # index.upsert(page)\n", + "\n", + " pages = []\n", + "len(pages)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:29:13.680164Z", + "start_time": "2025-02-10T11:29:13.676516Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5.1 A Comprehensive Historical Document Digitization Pipeline\n", + "\n", + "The digitization of historical docume\n", + "{'producer': 'pdfTeX-1.40.21',\n", + " 'creator': 'LaTeX with hyperref',\n", + " 'creationdate': '2021-06-22T01:27:10+00:00',\n", + " 'author': '',\n", + " 'keywords': '',\n", + " 'moddate': '2021-06-22T01:27:10+00:00',\n", + " 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live '\n", + " '2020) kpathsea version 6.3.2',\n", + " 'subject': '',\n", + " 'title': '',\n", + " 'trapped': 'False',\n", + " 'total_pages': 16,\n", + " 'source': './example_data/layout-parser-paper.pdf',\n", + " 'num_pages': 16,\n", + " 'page': 10}\n" + ] + } + ], + "source": [ + "print(pages[0].page_content[:100])\n", + "pprint.pp(pages[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The metadata attribute contains at least the following keys:\n", + "- source\n", + "- page (if in mode *page*)\n", + "- total_page\n", + "- creationdate\n", + "- creator\n", + "- producer\n", + "\n", + "Additional metadata are specific to each parser.\n", + "These pieces of information can be helpful (to categorize your PDFs for example)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Splitting mode & custom pages delimiter" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When loading the PDF file you can split it in two different ways:\n", + "- By page\n", + "- As a single text flow\n", + "\n", + "By default PDFPlumberLoader will split the PDF by page." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Extract the PDF by page. Each page is extracted as a langchain Document object:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:29:53.684396Z", + "start_time": "2025-02-10T11:29:21.807791Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "16\n", + "{'producer': 'pdfTeX-1.40.21',\n", + " 'creator': 'LaTeX with hyperref',\n", + " 'creationdate': '2021-06-22T01:27:10+00:00',\n", + " 'author': '',\n", + " 'keywords': '',\n", + " 'moddate': '2021-06-22T01:27:10+00:00',\n", + " 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live '\n", + " '2020) kpathsea version 6.3.2',\n", + " 'subject': '',\n", + " 'title': '',\n", + " 'trapped': 'False',\n", + " 'total_pages': 16,\n", + " 'source': './example_data/layout-parser-paper.pdf',\n", + " 'num_pages': 16,\n", + " 'page': 0}\n" + ] + } + ], + "source": [ + "loader = ZeroxPDFLoader(\n", + " \"./example_data/layout-parser-paper.pdf\",\n", + " mode=\"page\",\n", + ")\n", + "docs = loader.load()\n", + "print(len(docs))\n", + "pprint.pp(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this mode the pdf is split by pages and the resulting Documents metadata contains the page number. But in some cases we could want to process the pdf as a single text flow (so we don't cut some paragraphs in half). In this case you can use the *single* mode :" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Extract the whole PDF as a single langchain Document object:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:30:45.095983Z", + "start_time": "2025-02-10T11:30:15.033169Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1\n", + "{'producer': 'pdfTeX-1.40.21',\n", + " 'creator': 'LaTeX with hyperref',\n", + " 'creationdate': '2021-06-22T01:27:10+00:00',\n", + " 'author': '',\n", + " 'keywords': '',\n", + " 'moddate': '2021-06-22T01:27:10+00:00',\n", + " 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live '\n", + " '2020) kpathsea version 6.3.2',\n", + " 'subject': '',\n", + " 'title': '',\n", + " 'trapped': 'False',\n", + " 'total_pages': 16,\n", + " 'source': './example_data/layout-parser-paper.pdf',\n", + " 'num_pages': 16}\n" + ] + } + ], + "source": [ + "loader = ZeroxPDFLoader(\n", + " \"./example_data/layout-parser-paper.pdf\",\n", + " mode=\"single\",\n", + ")\n", + "docs = loader.load()\n", + "print(len(docs))\n", + "pprint.pp(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Logically, in this mode, the ‘page_number’ metadata disappears. Here's how to clearly identify where pages end in the text flow :" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "### Add a custom *pages_delimiter* to identify where are ends of pages in *single* mode:" + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:31:22.824454Z", + "start_time": "2025-02-10T11:30:51.711460Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n", + "\n", + "Zejian Shen¹ (✉), Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\n", + "\n", + "¹ Allen Institute for AI\n", + "shannons@allenai.org \n", + "² Brown University \n", + "ruochen_zhang@brown.edu \n", + "³ Harvard University \n", + "{melissadell, jacob.carlson}@fas.harvard.edu \n", + "⁴ University of Washington \n", + "bgcl@cs.washington.edu \n", + "⁵ University of Waterloo \n", + "w422ii@uwaterloo.ca \n", + "\n", + "Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io\n", + "\n", + "Keywords: Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\n", + "\n", + "1 Introduction\n", + "\n", + "Deep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]\n", + "-------THIS IS A CUSTOM END OF PAGE-------\n", + "37, layout detection [38, 22], table detection [26], and scene text detection [4]. A generalized learning-based framework dramatically reduces the need for the manual specification of complicated rules, which is the status quo with traditional methods. DL has the potential to transform DIA pipelines and benefit a broad spectrum of large-scale document digitization projects.\n", + "\n", + "However, there are several practical difficulties for taking advantages of recent advances in DL-based methods: 1) DL models are notoriously convoluted for reuse and extension. Existing models are developed using distinct frameworks like TensorFlow [1] or PyTorch [24], and the high-level parameters can be obfuscated by implementation details [8]. It can be a time-consuming and frustrating experience to debug, reproduce, and adapt existing models for DIA, and many researchers who could benefit the most from using these methods lack the technical background to implement them from scratch. 2) Document images contain diverse and disparate patterns across domains, and customized training is often required to achieve a desirable detection accuracy. Currently there is no full-fledged infrastructure for easily curating the target document image datasets and fine-tuning or re-training the models. 3) DIA usually requires a sequence of models and other processing to obtain the final outputs. Often research teams use DL models and then perform further document analyses in separate processes, and these pipelines are not documented in any central location (and often not documented at all). This makes it difficult for research teams to learn about how reimplementing the DIA wheel.\n", + "\n", + "LayoutParser provides a unified toolkit to support DL-based document image analysis and processing. To address the aforementioned challenges, LayoutParser is built with the following components:\n", + "\n", + "1. An off-the-shelf toolkit for applying DL models for layout detection, character recognition, and other DIA tasks (Section 3)\n", + "2. A rich repository of pre-trained neural network models (Model Zoo) that underlies the off-the-shelf usage\n", + "3. Comprehensive tools for efficient document image data annotation and model tuning to support different levels of customization\n", + "4. A DL model hub and community platform for the easy sharing, distribution, and discussion of DIA models and pipelines, to promote reusability, reproducibility, and extensibility (Section 4)\n", + "\n", + "The library implements simple and intuitive Python APIs without sacrificing generalizability and versatility, and can be easily installed via pip. Its convenient functions for handling document image data can be seamlessly integrated with existing DIA pipelines. With detailed documentations and carefully curated tutorials, we hope this tool will benefit a variety of end-users, and will lead to advances in applications in both industry and academic research.\n", + "\n", + "LayoutParser is well aligned with recent efforts for improving DL model reusability in other disciplines like natural language processing [8, 34] and computer vision [35], but with a focus on unique challenges in DIA. We show LayoutParser can be applied in sophisticated and large-scale digitization projects.\n", + "-------THIS IS A CUSTOM END OF PAGE-------\n", + "2 Related Work\n", + "Recently, various DL models and datasets have been developed for layout analysis tasks. The dhSegment [22] utilizes fully convolutional networks [20] for segmentation tasks on historical documents. Object detection-ba\n" + ] + } + ], + "source": [ + "loader = ZeroxPDFLoader(\n", + " \"./example_data/layout-parser-paper.pdf\",\n", + " mode=\"single\",\n", + " pages_delimiter=\"\\n-------THIS IS A CUSTOM END OF PAGE-------\\n\",\n", + ")\n", + "docs = loader.load()\n", + "print(docs[0].page_content[:5780])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This could simply be \\n, or \\f to clearly indicate a page change, or \\ for seamless injection in a Markdown viewer without a visual effect." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Extract images from the PDF" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can extract images from your PDFs with a choice of three different solutions:\n", + "- rapidOCR (lightweight Optical Character Recognition tool)\n", + "- Tesseract (OCR tool with high precision)\n", + "- Multimodal language model\n", + "\n", + "Because Zerox uses a multimodal LLM, it is responsible for OCR analysis. Nevertheless, to maintain compatibility with other parsers, we simulate image_parsers where possible.\n", + "\n", + "You can tune these functions to choose the output format of the extracted images among *html*, *markdown* or *text*\n", + "\n", + "The result is inserted between the last and the second-to-last paragraphs of text of the page." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:34:10.816353Z", + "start_time": "2025-02-10T11:34:08.580109Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -qU rapidocr-onnxruntime" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:34:51.430316Z", + "start_time": "2025-02-10T11:34:14.255873Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6 Z. Shen et al.\n", + "\n", + "![The relationship between the three types of layout data structures. Coordinate supports three kinds of variation; TextBlock consists of the coordinate information and extra features like block text, types, and reading orders; a Layout object is a list of all possible layout elements, including other Layout objects. They all support the same set of transformation and operation APIs for maximum flexibility.](image-placeholder)\n", + "\n", + "Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n", + "\n", + "3.2 Layout Data Structures\n", + "\n", + "A critical feature of LayoutParser is the implementation of a series of data structures and operations that can be used to efficiently process and manipulate the layout elements. In document image analysis pipelines, various post-processing on the layout analysis model outputs is usually required to obtain the final outputs. Traditionally, this requires exporting DL model outputs and then loading the results into other pipelines. All model outputs from LayoutParser will be stored in carefully engineered data types optimized for further processing, which makes it possible to build an end-to-end document digitization pipeline within LayoutParser. There are three key components in the data structure, namely the Coordinate system, the TextBlock, and the Layout. They provide different levels of abstraction for the layout data, and a set of APIs are supported for transformations or operations on these classes.\n" + ] + } + ], + "source": [ + "from langchain_community.document_loaders.parsers import RapidOCRBlobParser\n", + "\n", + "loader = ZeroxPDFLoader(\n", + " \"./example_data/layout-parser-paper.pdf\",\n", + " mode=\"page\",\n", + " images_inner_format=\"markdown-img\",\n", + " images_parser=RapidOCRBlobParser(),\n", + ")\n", + "docs = loader.load()\n", + "\n", + "print(docs[5].page_content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "With this simulation, RapidOCR is not invoked." + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Extract images from the PDF with Tesseract:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:35:32.458425Z", + "start_time": "2025-02-10T11:35:30.029050Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -qU pytesseract" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:36:02.465629Z", + "start_time": "2025-02-10T11:35:32.491422Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6 Z. Shen et al.\n", + "\n", + "![The relationship between the three types of layout data structures. Coordinate supports three kinds of variation; TextBlock consists of the coordinate information and extra features like block text, types, and reading orders; a Layout object is a list of all possible layout elements, including other Layout objects. They all support the same set of transformation and operation APIs for maximum flexibility.](image_link)\n", + "\n", + "Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n", + "\n", + "## 3.2 Layout Data Structures\n", + "\n", + "A critical feature of LayoutParser is the implementation of a series of data structures and operations that can be used to efficiently process and manipulate the layout elements. In document image analysis pipelines, various post-processing on the layout analysis model outputs is usually required to obtain the final outputs. Traditionally, this requires exporting DL model outputs and then loading the results into other pipelines. All model outputs from LayoutParser will be stored in carefully engineered data types optimized for further processing, which makes it possible to build an end-to-end document digitization pipeline within LayoutParser. There are three key components in the data structure, namely the Coordinate system, the TextBlock, and the Layout. They provide different levels of abstraction for the layout data, and a set of APIs are supported for transformations or operations on these classes.\n" + ] + } + ], + "source": [ + "from langchain_community.document_loaders.parsers import TesseractBlobParser\n", + "\n", + "loader = ZeroxPDFLoader(\n", + " \"./example_data/layout-parser-paper.pdf\",\n", + " mode=\"page\",\n", + " images_inner_format=\"html-img\",\n", + " images_parser=TesseractBlobParser(),\n", + ")\n", + "docs = loader.load()\n", + "print(docs[5].page_content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "With this simulation, Tesseract is not invoked." + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Extract images from the PDF with multimodal model:" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:36:22.261669Z", + "start_time": "2025-02-10T11:36:19.762818Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -qU langchain_openai" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:36:24.230265Z", + "start_time": "2025-02-10T11:36:24.095234Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "\n", + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv()" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:36:26.122823Z", + "start_time": "2025-02-10T11:36:26.114892Z" + } + }, + "outputs": [], + "source": [ + "from getpass import getpass\n", + "\n", + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:37:02.400667Z", + "start_time": "2025-02-10T11:36:30.752293Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "![Fig. 2: The relationship between the three types of layout data structures.](#)\n", + "\n", + "Coordinate supports three kinds of variation; TextBlock consists of the coordinate information and extra features like block text, types, and reading orders; a Layout object is a list of all possible layout elements, including other Layout objects. They all support the same set of transformation and operation APIs for maximum flexibility.\n", + "\n", + "Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n", + "\n", + "### 3.2 Layout Data Structures\n", + "\n", + "A critical feature of LayoutParser is the implementation of a series of data structures and operations that can be used to efficiently process and manipulate the layout elements. In document image analysis pipelines, various post-processing on the layout analysis model outputs is usually required to obtain the final outputs. Traditionally, this requires exporting DL model outputs and then loading the results into other pipelines. All model outputs from LayoutParser will be stored in carefully engineered data types optimized for further processing, which makes it possible to build an end-to-end document digitization pipeline within LayoutParser. There are three key components in the data structure, namely the Coordinate system, the TextBlock, and the Layout. They provide different levels of abstraction for the layout data, and a set of APIs are supported for transformations or operations on these classes.\n" + ] + } + ], + "source": [ + "from langchain_community.document_loaders.parsers import LLMImageBlobParser\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "loader = ZeroxPDFLoader(\n", + " \"./example_data/layout-parser-paper.pdf\",\n", + " mode=\"page\",\n", + " images_inner_format=\"markdown-img\",\n", + " images_parser=LLMImageBlobParser(model=ChatOpenAI(model=\"gpt-4o\", max_tokens=1024)),\n", + ")\n", + "docs = loader.load()\n", + "print(docs[5].page_content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "With this simulation, the format is injected into the prompt." + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Extract tables from the PDF" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With PyMUPDF you can extract tables from your PDFs in *html*, *markdown* or *csv* format :" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:37:56.900282Z", + "start_time": "2025-02-10T11:37:22.285001Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "| Dataset | Base Model | Large Model | Notes |\n", + "|--------------|------------|-------------|--------------------------------------------------------|\n", + "| PubLayNet | F / M | M | Layouts of modern scientific documents |\n", + "| PubLayNet | M | M | Layouts of scanned modern magazines and scientific reports |\n", + "| Newspaper | F | - | Layouts of scanned US newspapers from the 20th century |\n", + "| TableBank | F | F | Table region on modern scientific and business documents |\n", + "| HLDataset | F / M | - | Layouts of history Japanese documents |\n", + "\n", + "1. For each dataset, we train several models of different sizes for different needs (the trade-off between accuracy vs. computational cost). For \"base model\" and \"large model\", we refer to using the ResNet 50 or ResNet 101 backbones. One can train models of different architectures, like Faster R-CNN and Mask R-CNN. For example, an F in the Large Model column indicates it has a Faster R-CNN model trained using the ResNet backbone. The platform is maintained and a number of additions will be made to the model zoo in coming months.\n", + "\n", + "3.1 Layout Detection Models\n", + "\n", + "In LayoutParser, a layout model takes a document image as an input and generates a list of rectangular boxes for the target content regions. Different from traditional methods, it relies on deep convolutional neural networks rather than manually curated rules to identify content regions. It is formulated as an object detection problem and state-of-the-art models like Faster R-CNN yield prediction results of high accuracy and makes it possible to build a concise, generalized interface for layout detection. LayoutParser, built upon Detectron2, provides a minimal API that can perform layout detection with only four lines of Python:\n", + "\n", + "```python\n", + "import layoutparser as lp\n", + "image = cv2.imread(\"image_file\") # load images\n", + "model = lp.Detectron2LayoutModel(\n", + " \"lp://PubLayNet/faster_rcnn_R_50_FPN_3x/config\"\n", + ")\n", + "layout = model.detect(image)\n", + "```\n", + "\n", + "LayoutParser provides a wealth of pre-trained model weights using various datasets covering different languages, time periods, and document types. Due to domain shift, the prediction performance can notably drop when models are applied to target samples that are significantly different from the training dataset. As document structures and layouts vary greatly in different domains, it is important to select models trained on a dataset similar to the test samples. A semantic syntax is used for initializing the model weights in LayoutParser, using both the dataset name and model name `lp:///`.\n" + ] + } + ], + "source": [ + "loader = ZeroxPDFLoader(\n", + " \"./example_data/layout-parser-paper.pdf\",\n", + " mode=\"page\",\n", + " extract_tables=\"markdown\",\n", + ")\n", + "docs = loader.load()\n", + "print(docs[4].page_content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Working with Files\n", + "\n", + "Many document loaders involve parsing files. The difference between such loaders usually stems from how the file is parsed, rather than how the file is loaded. For example, you can use `open` to read the binary content of either a PDF or a markdown file, but you need different parsing logic to convert that binary data into text.\n", + "\n", + "As a result, it can be helpful to decouple the parsing logic from the loading logic, which makes it easier to re-use a given parser regardless of how the data was loaded.\n", + "You can use this strategy to analyze different files, with the same parsing parameters." + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "ExecuteTime": { + "end_time": "2025-02-10T11:38:57.651776Z", + "start_time": "2025-02-10T11:38:08.259781Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n", + "\n", + "Zejian Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\n", + "\n", + "¹ Allen Institute for AI \n", + "shannon@allenai.org \n", + "² Brown University \n", + "ruochen_zhang@brown.edu \n", + "³ Harvard University \n", + "{melissad1, jacob.carlson}@fas.harvard.edu \n", + "⁴ University of Washington \n", + "bgcl@cs.washington.edu \n", + "⁵ University of Waterloo \n", + "w422ii@uwaterloo.ca \n", + "\n", + "## Abstract\n", + "\n", + "Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io.\n", + "\n", + "Keywords: Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\n", + "\n", + "## 1 Introduction\n", + "\n", + "Deep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]\n", + "{'producer': 'pdfTeX-1.40.21',\n", + " 'creator': 'LaTeX with hyperref',\n", + " 'creationdate': '2021-06-22T01:27:10+00:00',\n", + " 'author': '',\n", + " 'keywords': '',\n", + " 'moddate': '2021-06-22T01:27:10+00:00',\n", + " 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live '\n", + " '2020) kpathsea version 6.3.2',\n", + " 'subject': '',\n", + " 'title': '',\n", + " 'trapped': 'False',\n", + " 'total_pages': 16,\n", + " 'source': 'example_data/layout-parser-paper.pdf',\n", + " 'num_pages': 16,\n", + " 'page': 0}\n" + ] + } + ], + "source": [ + "from langchain_community.document_loaders import FileSystemBlobLoader\n", + "from langchain_community.document_loaders.generic import GenericLoader\n", + "from langchain_community.document_loaders.parsers import ZeroxPDFParser\n", + "\n", + "loader = GenericLoader(\n", + " blob_loader=FileSystemBlobLoader(\n", + " path=\"./example_data/\",\n", + " glob=\"*.pdf\",\n", + " ),\n", + " blob_parser=ZeroxPDFParser(),\n", + ")\n", + "docs = loader.load()\n", + "print(docs[0].page_content)\n", + "pprint.pp(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "It is possible to work with files from cloud storage." + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.document_loaders import CloudBlobLoader\n", + "from langchain_community.document_loaders.generic import GenericLoader\n", + "\n", + "loader = GenericLoader(\n", + " blob_loader=CloudBlobLoader(\n", + " url=\"s3:/mybucket\", # Supports s3://, az://, gs://, file:// schemes.\n", + " glob=\"*.pdf\",\n", + " ),\n", + " blob_parser=ZeroxPDFParser(),\n", + ")\n", + "docs = loader.load()\n", + "print(docs[0].page_content)\n", + "pprint.pp(docs[0].metadata)" ] }, { @@ -207,7 +1025,7 @@ "**Arguments**:\n", "- `file_path` (Union[str, Path]): Path to the PDF file.\n", "- `model` (str): Vision-capable model to use for processing in format `/`.\n", - "Some examples of valid values are: \n", + "Some examples of valid values are:\n", " - `model = \"gpt-4o-mini\" ## openai model`\n", " - `model = \"azure/gpt-4o-mini\"`\n", " - `model = \"gemini/gpt-4o-mini\"`\n", @@ -225,37 +1043,16 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, - "source": [ - "## Notes\n", - "- **Model Compatibility**: Zerox supports a range of vision-capable models. Refer to [Zerox's GitHub documentation](https://github.com/getomni-ai/zerox) for a list of supported models and configuration details.\n", - "- **Environment Variables**: Make sure to set required environment variables, such as `API_KEY` or endpoint details, as specified in the Zerox documentation.\n", - "- **Asynchronous Processing**: If you encounter errors related to event loops in Jupyter Notebooks, you may need to apply `nest_asyncio` as shown in the setup section.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Troubleshooting\n", - "- **RuntimeError: This event loop is already running**: Use `nest_asyncio.apply()` to prevent asynchronous loop conflicts in environments like Jupyter.\n", - "- **Configuration Errors**: Verify that the `zerox_kwargs` match the expected arguments for your chosen model and that all necessary environment variables are set.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Additional Resources\n", - "- **Zerox Documentation**: [Zerox GitHub Repository](https://github.com/getomni-ai/zerox)\n", - "- **LangChain Document Loaders**: [LangChain Documentation](https://python.langchain.com/docs/integrations/document_loaders/)" - ] + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "sharepoint_chatbot", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -269,9 +1066,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.7" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/libs/community/extended_testing_deps.txt b/libs/community/extended_testing_deps.txt index c62e57091b7..ce4570dfdd6 100644 --- a/libs/community/extended_testing_deps.txt +++ b/libs/community/extended_testing_deps.txt @@ -41,7 +41,7 @@ jq>=1.4.1,<2 jsonschema>1 keybert>=0.8.5 langchain_openai>=0.2.1 -litellm>=1.30,<=1.39.5 +litellm>=1.30 lxml>=4.9.3,<6.0 markdownify>=0.11.6,<0.12 motor>=3.3.1,<4 @@ -62,6 +62,7 @@ pandas>=2.0.1,<3 pdfminer-six==20231228 pdfplumber>=0.11 pgvector>=0.1.6,<0.2 +pillow>=10.4 playwright>=1.48.0,<2 praw>=7.7.1,<8 premai>=0.3.25,<0.4,!=0.3.100 diff --git a/libs/community/langchain_community/document_loaders/__init__.py b/libs/community/langchain_community/document_loaders/__init__.py index c91345daa6b..5db058b6490 100644 --- a/libs/community/langchain_community/document_loaders/__init__.py +++ b/libs/community/langchain_community/document_loaders/__init__.py @@ -360,6 +360,7 @@ if TYPE_CHECKING: PyPDFium2Loader, PyPDFLoader, UnstructuredPDFLoader, + ZeroxPDFLoader, ) from langchain_community.document_loaders.pebblo import ( PebbloSafeLoader, @@ -732,6 +733,7 @@ _module_lookup = { "YoutubeAudioLoader": "langchain_community.document_loaders.blob_loaders", "YoutubeLoader": "langchain_community.document_loaders.youtube", "YuqueLoader": "langchain_community.document_loaders.yuque", + "ZeroxPDFLoader": "langchain_community.document_loaders.pdf", } @@ -940,4 +942,5 @@ __all__ = [ "YoutubeAudioLoader", "YoutubeLoader", "YuqueLoader", + "ZeroxPDFLoader", ] diff --git a/libs/community/langchain_community/document_loaders/parsers/__init__.py b/libs/community/langchain_community/document_loaders/parsers/__init__.py index 9712718e197..af4f9dff984 100644 --- a/libs/community/langchain_community/document_loaders/parsers/__init__.py +++ b/libs/community/langchain_community/document_loaders/parsers/__init__.py @@ -32,6 +32,7 @@ if TYPE_CHECKING: PyMuPDFParser, PyPDFium2Parser, PyPDFParser, + ZeroxPDFParser, ) from langchain_community.document_loaders.parsers.vsdx import ( VsdxParser, @@ -55,6 +56,7 @@ _module_lookup = { "RapidOCRBlobParser": "langchain_community.document_loaders.parsers.images", "TesseractBlobParser": "langchain_community.document_loaders.parsers.images", "VsdxParser": "langchain_community.document_loaders.parsers.vsdx", + "ZeroxPDFParser": "langchain_community.document_loaders.parsers.pdf", } @@ -82,4 +84,5 @@ __all__ = [ "RapidOCRBlobParser", "TesseractBlobParser", "VsdxParser", + "ZeroxPDFParser", ] diff --git a/libs/community/langchain_community/document_loaders/parsers/pdf.py b/libs/community/langchain_community/document_loaders/parsers/pdf.py index 782edddad44..197e0ebb609 100644 --- a/libs/community/langchain_community/document_loaders/parsers/pdf.py +++ b/libs/community/langchain_community/document_loaders/parsers/pdf.py @@ -2,6 +2,7 @@ from __future__ import annotations +import asyncio import html import io import logging @@ -9,11 +10,12 @@ import threading import warnings from datetime import datetime from pathlib import Path -from tempfile import TemporaryDirectory +from tempfile import NamedTemporaryFile, TemporaryDirectory from typing import ( TYPE_CHECKING, Any, BinaryIO, + Dict, Iterable, Iterator, Literal, @@ -28,9 +30,14 @@ from urllib.parse import urlparse import numpy import numpy as np from langchain_core.documents import Document +from langchain_core.prompts import PromptTemplate from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob +from langchain_community.document_loaders.parsers import ( + LLMImageBlobParser, + TesseractBlobParser, +) from langchain_community.document_loaders.parsers.images import ( BaseImageBlobParser, RapidOCRBlobParser, @@ -1464,6 +1471,326 @@ class PDFPlumberParser(BaseBlobParser): return extract_from_images_with_rapidocr(images) +class ZeroxPDFParser(BaseBlobParser): + """Parse a blob from a PDF using `py-zerox` library. + + This class provides methods to parse a blob from a PDF document, supporting various + configurations such as handling password-protected PDFs, extracting images. + It integrates the 'py-zerox' library for PDF processing and offers synchronous blob + parsing. + + Examples: + Setup: + + .. code-block:: bash + + pip install -U langchain-community py-zerox + + Load a blob from a PDF file: + + .. code-block:: python + + from langchain_core.documents.base import Blob + + blob = Blob.from_path("./example_data/layout-parser-paper.pdf") + + Instantiate the parser: + + .. code-block:: python + + from langchain_community.document_loaders.parsers import ZeroxPDFParser + + parser = ZeroxPDFParser( + # password = None, + mode = "single", + pages_delimiter = "\n\f", + # extract_images = True, + # images_to_text = convert_images_to_text_with_tesseract(), + ) + + Lazily parse the blob: + + .. code-block:: python + + docs = [] + docs_lazy = parser.lazy_parse(blob) + + for doc in docs_lazy: + docs.append(doc) + print(docs[0].page_content[:100]) + print(docs[0].metadata) + """ + + warnings.filterwarnings( + "ignore", + module=r"^pyzerox.models.modellitellm$", + message=r"\s*Custom system prompt was provided which.*", + ) + _warn_images_to_text = False + _warn_creator = False + _map_extract_tables: Dict[Literal["markdown", "html", None], str] = { + "markdown": "", + "html": "But, use html syntax for convert all tables. ", + } + _map_extract_images = { + RapidOCRBlobParser: "", + TesseractBlobParser: "", + LLMImageBlobParser: "If you come across a picture, " + "diagram or other illustration, " + "describe it. ", + } + _prompt = ( + "Convert the following PDF page to markdown. " + "{prompt_tables}" + "{prompt_images}" + "Remove the header, footer and page number. " + "Return only the markdown with no explanation text. " + "Do not exclude any content from the page. " + ) + + def __init__( + self, + mode: Literal["single", "page"] = "page", + pages_delimiter: str = _DEFAULT_PAGES_DELIMITER, + images_parser: Optional[BaseImageBlobParser] = None, + images_inner_format: Literal["text", "markdown-img", "html-img"] = "text", + extract_tables: Union[Literal["markdown", "html"], None] = "markdown", + cleanup: bool = True, + concurrency: int = 10, + maintain_format: bool = False, + model: str = "gpt-4o-mini", + custom_system_prompt: Optional[str] = None, + select_pages: Optional[Union[int, Iterable[int]]] = None, + **zerox_kwargs: dict[str, Any], + ): + """ + Initialize the parser with arguments to be passed to the zerox function. + Make sure to set necessary environment variables such as API key, endpoint, etc. + Check zerox documentation for list of necessary environment variables for + any given model. + + Args: + mode: The extraction mode, either "single" for the entire document or "page" + for page-wise extraction. + pages_delimiter: A string delimiter to separate pages in single-mode + extraction. + images_parser: Optional image blob parser. + images_inner_format: The format for the parsed output. + - "text" = return the content as is + - "markdown-img" = wrap the content into an image markdown link, w/ link + pointing to (`![body)(#)`] + - "html-img" = wrap the content as the `alt` text of an tag and link to + (`{body}`) + model: + Vision capable model to use. Defaults to "gpt-4o-mini". + Hosted models are passed in format "/" + Examples: "azure/gpt-4o-mini", "vertex_ai/gemini-1.5-flash-001" + See more details in zerox documentation. + cleanup: + Whether to cleanup the temporary files after processing, defaults + to True + concurrency: + The number of concurrent processes to run, defaults to 10 + maintain_format: + Whether to maintain the format from the previous page, defaults to False + model: + The model to use for generating completions, defaults to "gpt-4o-mini". + Note - Refer: https://docs.litellm.ai/docs/providers to pass correct + model name as according to provider it might be different from actual + name. + output_dir: + The directory to save the markdown output, defaults to None + temp_dir: + The directory to store temporary files, defaults to some named folder + in system's temp directory. If already exists, the contents will be + deleted for zerox uses it. + custom_system_prompt: + The system prompt to use for the model, this overrides the default + system prompt of zerox. Generally it is not required unless you want + some specific behaviour. When set, it will raise a friendly warning, + defaults to None + select_pages: + Pages to process, can be a single page number or an iterable of page + numbers, defaults to None + **zerox_kwargs: + Arguments specific to the zerox function. + """ + if mode not in ["single", "page"]: + raise ValueError("mode must be single or page") + if extract_tables not in ["markdown", "html", None]: + logger.warning("extract_tables must be markdown or html") + extract_tables = "markdown" + if not images_parser: + images_parser = RapidOCRBlobParser() + self.mode = mode + self.pages_delimiter = pages_delimiter + self.images_parser = images_parser + self.images_inner_format = images_inner_format + self.extract_tables = extract_tables + + self.cleanup = cleanup + self.concurrency = concurrency + self.maintain_format = maintain_format + self.model = model + if not custom_system_prompt: + custom_system_prompt = ZeroxPDFParser._prompt + self.custom_system_prompt = custom_system_prompt + self.select_pages = select_pages + self.zerox_kwargs = zerox_kwargs + + @staticmethod + def _is_valid_url(url: str) -> bool: + """Check if the url is valid.""" + parsed = urlparse(url) + return bool(parsed.netloc) and bool(parsed.scheme) + + def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type] + """Lazily parse the blob. + + Args: + blob: The blob to parse. + + Raises: + ImportError: If the `py-zerox` package is not installed. + + Yields: + An iterator over the parsed documents. + """ + try: + from pyzerox import zerox + except ImportError: + raise ImportError( + "Could not import pyzerox python package. " + "Please install it with `pip install py-zerox`." + ) + temp_file = None + try: + if not ZeroxPDFParser._is_valid_url(str(blob.path)): + temp_file = NamedTemporaryFile() + with open(temp_file.name, "wb") as f: + f.write(blob.as_bytes()) + file_path = temp_file.name + else: + file_path = str(blob.path) + + with blob.as_bytes_io() as pdf_file_obj: + doc_metadata = _purge_metadata(self._get_metadata(pdf_file_obj)) + + doc_metadata["source"] = blob.source or blob.path + zerox_prompt = self.custom_system_prompt + + if not zerox_prompt and self.images_parser or self.extract_tables: + prompt_tables = ZeroxPDFParser._map_extract_tables[self.extract_tables] + clazz = self.images_parser.__class__ + if clazz in ZeroxPDFParser._map_extract_images: + prompt_images = ZeroxPDFParser._map_extract_images[clazz] + else: + if not ZeroxPDFParser._warn_creator: + ZeroxPDFParser._warn_creator = True + logger.warning("images_parser can not be simulated") + prompt_images = "" + zerox_prompt = PromptTemplate.from_template( + self.custom_system_prompt + ).format(prompt_tables=prompt_tables, prompt_images=prompt_images) + zerox_output = asyncio.run( + zerox( + file_path=str(file_path), + model=self.model, + cleanup=self.cleanup, + concurrency=self.concurrency, + maintain_format=self.maintain_format, + custom_system_prompt=zerox_prompt, + select_pages=self.select_pages, + **self.zerox_kwargs, + ) + ) + + # Convert zerox output to Document instances and yield them + if len(zerox_output.pages) > 0: + doc_metadata = _purge_metadata( + { + "producer": "ZeroxPDF", + "creator": "ZeroxPDF", + "creationdate": "", + } + | doc_metadata + | { + "total_pages": zerox_output.pages[-1].page, + "num_pages": zerox_output.pages[-1].page, # Deprecated + } + ) + single_texts = [] + for page in zerox_output.pages: + text_from_page = page.content + images_from_page = "" # FIXME + all_text = _merge_text_and_extras( + [images_from_page], text_from_page + ) + if self.mode == "page": + yield Document( + page_content=all_text, + metadata=_validate_metadata( + doc_metadata | {"page": page.page - 1} + ), + ) + else: + single_texts.append(all_text) + if self.mode == "single": + yield Document( + page_content=self.pages_delimiter.join(single_texts), + metadata=_validate_metadata(doc_metadata), + ) + finally: + if temp_file: + temp_file.close() + + def _get_metadata( + self, + fp: BinaryIO, + password: str = "", + caching: bool = True, + ) -> dict[str, Any]: + """ + Extract metadata from a PDF file. + + Args: + fp: The file pointer to the PDF file. + password: The password for the PDF file, if encrypted. Defaults to an empty + string. + caching: Whether to cache the PDF structure. Defaults to True. + + Returns: + Metadata of the PDF file. + """ + from pdfminer.pdfpage import PDFDocument, PDFPage, PDFParser + + # Create a PDF parser object associated with the file object. + parser = PDFParser(fp) + # Create a PDF document object that stores the document structure. + doc = PDFDocument(parser, password=password, caching=caching) + metadata = {} + + for info in doc.info: + metadata.update(info) + for k, v in metadata.items(): + try: + metadata[k] = PDFMinerParser.resolve_and_decode(v) + except Exception as e: # pragma: nocover + # This metadata value could not be parsed. Instead of failing the PDF + # read, treat it as a warning only if `strict_metadata=False`. + logger.warning( + '[WARNING] Metadata key "%s" could not be parsed due to ' + "exception: %s", + k, + str(e), + ) + + # Count number of pages. + metadata["total_pages"] = len(list(PDFPage.create_pages(doc))) + + return metadata + + class AmazonTextractPDFParser(BaseBlobParser): """Send `PDF` files to `Amazon Textract` and parse them. diff --git a/libs/community/langchain_community/document_loaders/pdf.py b/libs/community/langchain_community/document_loaders/pdf.py index b9e57b19ff1..2454026effa 100644 --- a/libs/community/langchain_community/document_loaders/pdf.py +++ b/libs/community/langchain_community/document_loaders/pdf.py @@ -11,6 +11,7 @@ from typing import ( TYPE_CHECKING, Any, BinaryIO, + Iterable, Iterator, Literal, Mapping, @@ -38,6 +39,7 @@ from langchain_community.document_loaders.parsers.pdf import ( PyMuPDFParser, PyPDFium2Parser, PyPDFParser, + ZeroxPDFParser, ) from langchain_community.document_loaders.unstructured import UnstructuredFileLoader @@ -1353,72 +1355,177 @@ class DocumentIntelligenceLoader(BasePDFLoader): class ZeroxPDFLoader(BasePDFLoader): - """Document loader utilizing Zerox library: + """Load and parse a PDF file using 'py-zerox' library. https://github.com/getomni-ai/zerox - Zerox converts PDF document to series of images (page-wise) and + This class provides methods to load and parse PDF documents, supporting various + configurations such as handling password-protected files, extracting tables, + extracting images, and defining extraction mode. It integrates the `py-zerox` + library for PDF processing and offers both synchronous and asynchronous document + loading. + + Zerox converts PDF document to serties of images (page-wise) and uses vision-capable LLM model to generate Markdown representation. - Zerox utilizes anyc operations. Therefore when using this loader + Zerox utilizes async operations. Therefore when using this loader inside Jupyter Notebook (or any environment running async) you will need to: ```python import nest_asyncio nest_asyncio.apply() ``` + + Examples: + Setup: + + .. code-block:: bash + + pip install -U langchain-community pymupdf + + Instantiate the loader: + + .. code-block:: python + + from langchain_community.document_loaders import ZeroxPDFLoader + + loader = ZeroxPDFLoader( + file_path = "./example_data/layout-parser-paper.pdf", + # headers = None + # password = None, + mode = "single", + pages_delimiter = "\n\f", + # extract_images = True, + # images_to_text = convert_images_to_text_with_tesseract(), + # extract_tables = "markdown", + # extract_tables_settings = None, + ) + + Lazy load documents: + + .. code-block:: python + + docs = [] + docs_lazy = loader.lazy_load() + + for doc in docs_lazy: + docs.append(doc) + print(docs[0].page_content[:100]) + print(docs[0].metadata) + + Load documents asynchronously: + + .. code-block:: python + + docs = await loader.aload() + print(docs[0].page_content[:100]) + print(docs[0].metadata) """ def __init__( self, - file_path: Union[str, PurePath], + file_path: Union[str, Path], + *, + headers: Optional[dict] = None, + mode: Literal["single", "page"] = "page", + pages_delimiter: str = _DEFAULT_PAGES_DELIMITER, + images_parser: Optional[BaseImageBlobParser] = None, + images_inner_format: Literal["text", "markdown-img", "html-img"] = "text", + extract_tables: Union[Literal["markdown", "html"], None] = "markdown", + cleanup: bool = True, + concurrency: int = 10, + maintain_format: bool = False, model: str = "gpt-4o-mini", - **zerox_kwargs: Any, + custom_system_prompt: Optional[str] = None, + select_pages: Optional[Union[int, Iterable[int]]] = None, + **zerox_kwargs: dict[str, Any], ) -> None: - super().__init__(file_path=file_path) - """Initialize the parser with arguments to be passed to the zerox function. + """ + Initialize the loader with arguments to be passed to the zerox function. Make sure to set necessary environment variables such as API key, endpoint, etc. Check zerox documentation for list of necessary environment variables for any given model. Args: - file_path: - Path or url of the pdf file + file_path: The path to the PDF file to be loaded. + headers: Optional headers to use for GET request to download a file from a + web path. + password: Optional password for opening encrypted PDFs. + mode: The extraction mode, either "single" for the entire document or "page" + for page-wise extraction. + pages_delimiter: A string delimiter to separate pages in single-mode + extraction. + images_parser: Optional image blob parser. + images_inner_format: The format for the parsed output. + - "text" = return the content as is + - "markdown-img" = wrap the content into an image markdown link, w/ link + pointing to (`![body)(#)`] + - "html-img" = wrap the content as the `alt` text of an tag and link to + (`{body}`) + extract_tables: Whether to extract tables in a specific format, such as + "csv", "markdown", or "html". + extract_tables_settings: Optional dictionary of settings for customizing + table extraction. + cleanup: + Whether to cleanup the temporary files after processing, defaults + to True + concurrency: + The number of concurrent processes to run, defaults to 10 + maintain_format: + Whether to maintain the format from the previous page, defaults to False model: - Vision capable model to use. Defaults to "gpt-4o-mini". - Hosted models are passed in format "/" - Examples: "azure/gpt-4o-mini", "vertex_ai/gemini-1.5-flash-001" - See more details in zerox documentation. - **zerox_kwargs: + The model to use for generating completions, defaults to "gpt-4o-mini". + Note - Refer: https://docs.litellm.ai/docs/providers to pass correct + model name as according to provider it might be different from + actual name. + output_dir: + The directory to save the markdown output, defaults to None + temp_dir: + The directory to store temporary files, defaults to some named folder + in system's temp directory. If already exists, the contents will be + deleted for zerox uses it. + custom_system_prompt: + The system prompt to use for the model, this overrides the default + system prompt of zerox. Generally it is not required unless you want + some specific behaviour. When set, it will raise a friendly warning, + defaults to None + select_pages: + Pages to process, can be a single page number or an iterable of page + numbers, defaults to None + **kwargs: Arguments specific to the zerox function. - see datailed list of arguments here in zerox repository: - https://github.com/getomni-ai/zerox/blob/main/py_zerox/pyzerox/core/zerox.py#L25 - """ # noqa: E501 - self.zerox_kwargs = zerox_kwargs - self.model = model - - def lazy_load(self) -> Iterator[Document]: - """Lazily load pages.""" - import asyncio - - from pyzerox import zerox - - # Directly call asyncio.run to execute zerox synchronously - zerox_output = asyncio.run( - zerox(file_path=str(self.file_path), model=self.model, **self.zerox_kwargs) + """ + super().__init__(file_path, headers=headers) + self.parser = ZeroxPDFParser( + mode=mode, + pages_delimiter=pages_delimiter, + images_parser=images_parser, + images_inner_format=images_inner_format, + extract_tables=extract_tables, + cleanup=cleanup, + concurrency=concurrency, + maintain_format=maintain_format, + model=model, + custom_system_prompt=custom_system_prompt, + select_pages=select_pages, + **zerox_kwargs, ) - # Convert zerox output to Document instances and yield them - if len(zerox_output.pages) > 0: - num_pages = zerox_output.pages[-1].page - for page in zerox_output.pages: - yield Document( - page_content=page.content, - metadata={ - "source": self.source, - "page": page.page, - "num_pages": num_pages, - }, - ) + def lazy_load(self) -> Iterator[Document]: + """ + Loads documents from pdf utilizing zerox library: + https://github.com/getomni-ai/zerox + + Returns: + Iterator[Document]: An iterator over parsed Document instances. + """ + """Lazy load given path as pages.""" + if self.web_path: + blob = Blob.from_data( # type: ignore[attr-defined] + open(self.file_path, "rb").read(), path=self.web_path + ) + else: + blob = Blob.from_path(self.file_path) # type: ignore[attr-defined] + yield from self.parser.lazy_parse(blob) # Legacy: only for backwards compatibility. Use PyPDFLoader instead diff --git a/libs/community/tests/integration_tests/document_loaders/parsers/test_pdf_parsers.py b/libs/community/tests/integration_tests/document_loaders/parsers/test_pdf_parsers.py index 1137dd79f2e..c708596b7a3 100644 --- a/libs/community/tests/integration_tests/document_loaders/parsers/test_pdf_parsers.py +++ b/libs/community/tests/integration_tests/document_loaders/parsers/test_pdf_parsers.py @@ -5,6 +5,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Iterator import pytest +from PIL.Image import Image import langchain_community.document_loaders.parsers as pdf_parsers from langchain_community.document_loaders.base import BaseBlobParser @@ -119,6 +120,7 @@ class EmptyImageBlobParser(BaseImageBlobParser): ("PyPDFium2Parser", {}), ("PyPDFParser", {"extraction_mode": "plain"}), ("PyPDFParser", {"extraction_mode": "layout"}), + ("ZeroxPDFParser", {}), ], ) @pytest.mark.requires("pillow") @@ -128,6 +130,11 @@ def test_mode_and_extract_images_variations( mode: str, image_parser: BaseImageBlobParser, ) -> None: + if parser_factory == "ZeroxPDFParser": + try: + import pyzerox # noqa: F401 + except ImportError: + pytest.skip("pyzerox is valid only with Python +3.11") _test_matrix( parser_factory, params, @@ -149,6 +156,7 @@ def test_mode_and_extract_images_variations( ("PyPDFium2Parser", {}), ("PyPDFParser", {"extraction_mode": "plain"}), ("PyPDFParser", {"extraction_mode": "layout"}), + ("ZeroxPDFParser", {}), ], ) @pytest.mark.requires("pillow") @@ -157,6 +165,11 @@ def test_mode_and_image_formats_variations( params: dict, images_inner_format: str, ) -> None: + if parser_factory == "ZeroxPDFParser": + try: + import pyzerox # noqa: F401 + except ImportError: + pytest.skip("pyzerox is valid only with Python +3.11") mode = "single" image_parser = EmptyImageBlobParser() @@ -246,6 +259,7 @@ def _test_matrix( "parser_factory,params", [ ("PyMuPDFParser", {}), + ("ZeroxPDFParser", {}), ], ) def test_parser_with_table( @@ -254,6 +268,12 @@ def test_parser_with_table( mode: str, extract_tables: str, ) -> None: + if parser_factory == "ZeroxPDFParser": + try: + import pyzerox # noqa: F401 + except ImportError: + pytest.skip("pyzerox is valid only with Python +3.11") + from PIL.Image import Image from langchain_community.document_loaders.parsers.images import BaseImageBlobParser diff --git a/libs/community/tests/integration_tests/document_loaders/test_pdf.py b/libs/community/tests/integration_tests/document_loaders/test_pdf.py index 891308a0441..f947155071f 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_pdf.py +++ b/libs/community/tests/integration_tests/document_loaders/test_pdf.py @@ -170,12 +170,18 @@ def test_amazontextract_loader_failures() -> None: ("PyMuPDFLoader", {}), ("PyPDFium2Loader", {}), ("PyPDFLoader", {}), + ("ZeroxPDFLoader", {}), ], ) def test_standard_parameters( parser_factory: str, params: dict, ) -> None: + if parser_factory == "ZeroxPDFLoader": + try: + import pyzerox # noqa: F401 + except ImportError: + pytest.skip("pyzerox is valid only with Python +3.11") loader_class = getattr(pdf_loaders, parser_factory) file_path = Path(__file__).parent.parent / "examples/hello.pdf" diff --git a/libs/community/tests/unit_tests/document_loaders/parsers/test_pdf_parsers.py b/libs/community/tests/unit_tests/document_loaders/parsers/test_pdf_parsers.py index e89a8d854df..6dfaf9ccf40 100644 --- a/libs/community/tests/unit_tests/document_loaders/parsers/test_pdf_parsers.py +++ b/libs/community/tests/unit_tests/document_loaders/parsers/test_pdf_parsers.py @@ -9,7 +9,9 @@ import pytest import langchain_community.document_loaders.parsers as pdf_parsers from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob -from langchain_community.document_loaders.parsers.pdf import _merge_text_and_extras +from langchain_community.document_loaders.parsers.pdf import ( + _merge_text_and_extras, +) _THIS_DIR = Path(__file__).parents[3] @@ -75,6 +77,7 @@ def _assert_with_parser(parser: BaseBlobParser, *, splits_by_page: bool = True) "parser_factory,require,params", [ ("PDFMinerParser", "pdfminer", {"splits_by_page": False}), + ("PDFPlumberParser", "pdfplumber", {}), ("PyMuPDFParser", "pymupdf", {}), ("PyPDFParser", "pypdf", {}), ("PyPDFium2Parser", "pypdfium2", {}), diff --git a/libs/community/tests/unit_tests/document_loaders/parsers/test_public_api.py b/libs/community/tests/unit_tests/document_loaders/parsers/test_public_api.py index edb5d1a35d8..c0540ea6f92 100644 --- a/libs/community/tests/unit_tests/document_loaders/parsers/test_public_api.py +++ b/libs/community/tests/unit_tests/document_loaders/parsers/test_public_api.py @@ -20,4 +20,5 @@ def test_parsers_public_api_correct() -> None: "RapidOCRBlobParser", "TesseractBlobParser", "VsdxParser", + "ZeroxPDFParser", } diff --git a/libs/community/tests/unit_tests/document_loaders/test_imports.py b/libs/community/tests/unit_tests/document_loaders/test_imports.py index 33c988d1b92..2b62ff5720b 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_imports.py +++ b/libs/community/tests/unit_tests/document_loaders/test_imports.py @@ -200,6 +200,7 @@ EXPECTED_ALL = [ "YoutubeAudioLoader", "YoutubeLoader", "YuqueLoader", + "ZeroxPDFLoader", ] diff --git a/libs/langchain/langchain/document_loaders/parsers/__init__.py b/libs/langchain/langchain/document_loaders/parsers/__init__.py index a0c3930e81b..e0165e2be4c 100644 --- a/libs/langchain/langchain/document_loaders/parsers/__init__.py +++ b/libs/langchain/langchain/document_loaders/parsers/__init__.py @@ -16,6 +16,7 @@ if TYPE_CHECKING: PyMuPDFParser, PyPDFium2Parser, PyPDFParser, + ZeroxPDFParser, ) # Create a way to dynamically look up deprecated imports. @@ -34,6 +35,7 @@ DEPRECATED_LOOKUP = { "PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf", "PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf", "PyPDFParser": "langchain_community.document_loaders.parsers.pdf", + "ZeroxPDFParser": "langchain_community.document_loaders.parsers.pdf", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) @@ -55,4 +57,5 @@ __all__ = [ "PyMuPDFParser", "PyPDFium2Parser", "PyPDFParser", + "ZeroxPDFParser", ] diff --git a/libs/langchain/tests/unit_tests/document_loaders/parsers/test_public_api.py b/libs/langchain/tests/unit_tests/document_loaders/parsers/test_public_api.py index f1037064b08..d44d0bddee9 100644 --- a/libs/langchain/tests/unit_tests/document_loaders/parsers/test_public_api.py +++ b/libs/langchain/tests/unit_tests/document_loaders/parsers/test_public_api.py @@ -14,4 +14,5 @@ def test_parsers_public_api_correct() -> None: "PyMuPDFParser", "PyPDFium2Parser", "PDFPlumberParser", + "ZeroxPDFParser", } diff --git a/libs/langchain/tests/unit_tests/test_imports.py b/libs/langchain/tests/unit_tests/test_imports.py index 366a2aa3400..3f1ba5a22e6 100644 --- a/libs/langchain/tests/unit_tests/test_imports.py +++ b/libs/langchain/tests/unit_tests/test_imports.py @@ -96,7 +96,7 @@ def test_no_more_changes_to_proxy_community() -> None: # most cases. hash_ += len(str(sorted(deprecated_lookup.items()))) - evil_magic_number = 38620 + evil_magic_number = 38692 assert hash_ == evil_magic_number, ( "If you're triggering this test, you're likely adding a new import "