mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-26 21:11:25 +00:00
Fix revue
This commit is contained in:
parent
97996eaff1
commit
0d701f00d7
@ -43,18 +43,19 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:08:44.721334Z",
|
"end_time": "2025-03-13T08:43:23.098023Z",
|
||||||
"start_time": "2025-03-10T16:08:44.603495Z"
|
"start_time": "2025-03-13T08:43:23.007462Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"from getpass import getpass\n",
|
"from getpass import getpass\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# use nest_asyncio (only necessary inside of jupyter notebook)\n",
|
"# use nest_asyncio (only necessary inside of jupyter notebook)\n",
|
||||||
"import nest_asyncio\n",
|
|
||||||
"from dotenv import load_dotenv\n",
|
"from dotenv import load_dotenv\n",
|
||||||
"\n",
|
"\n",
|
||||||
"load_dotenv()\n",
|
"load_dotenv()\n",
|
||||||
@ -62,10 +63,10 @@
|
|||||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||||
" os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n",
|
" os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"import nest_asyncio\n",
|
||||||
|
"\n",
|
||||||
"nest_asyncio.apply()"
|
"nest_asyncio.apply()"
|
||||||
],
|
]
|
||||||
"outputs": [],
|
|
||||||
"execution_count": 1
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -77,12 +78,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:08:46.797620Z",
|
"end_time": "2025-03-13T08:43:24.826878Z",
|
||||||
"start_time": "2025-03-10T16:08:46.685305Z"
|
"start_time": "2025-03-13T08:43:24.787405Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from getpass import getpass\n",
|
"from getpass import getpass\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -92,9 +95,7 @@
|
|||||||
" os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n",
|
" os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"nest_asyncio.apply()"
|
"nest_asyncio.apply()"
|
||||||
],
|
]
|
||||||
"outputs": [],
|
|
||||||
"execution_count": 2
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -106,18 +107,18 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:08:48.610751Z",
|
"end_time": "2025-03-13T08:43:26.330466Z",
|
||||||
"start_time": "2025-03-10T16:08:48.604079Z"
|
"start_time": "2025-03-13T08:43:26.324045Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||||
],
|
]
|
||||||
"outputs": [],
|
|
||||||
"execution_count": 3
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -130,15 +131,13 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:30:21.902215Z",
|
"end_time": "2025-03-13T08:43:30.208499Z",
|
||||||
"start_time": "2025-03-10T16:30:19.755014Z"
|
"start_time": "2025-03-13T08:43:28.345747Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"source": [
|
|
||||||
"%pip install -qU langchain_community py-zerox"
|
|
||||||
],
|
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
@ -148,7 +147,9 @@
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"execution_count": 4
|
"source": [
|
||||||
|
"%pip install -qU langchain_community py-zerox"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -160,109 +161,44 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:30:23.336449Z",
|
"end_time": "2025-03-13T08:43:31.748909Z",
|
||||||
"start_time": "2025-03-10T16:30:23.328747Z"
|
"start_time": "2025-03-13T08:43:31.738214Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"import asyncio\n",
|
|
||||||
"import nest_asyncio\n",
|
|
||||||
"nest_asyncio.apply()"
|
|
||||||
],
|
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"execution_count": 5
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2025-03-10T16:30:25.533375Z",
|
|
||||||
"start_time": "2025-03-10T16:30:25.095083Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
"source": [
|
||||||
"def run_in_thread():\n",
|
"import warnings\n",
|
||||||
" loop = asyncio.get_event_loop() # Créer une nouvelle boucle d'événements pour ce thread\n",
|
|
||||||
" result = loop.run_until_complete(toto())\n",
|
|
||||||
" return result\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"from multiprocessing.pool import ThreadPool\n",
|
"import nest_asyncio\n",
|
||||||
"pool = ThreadPool(processes=1)\n",
|
|
||||||
"async_result = pool.apply_async(run_in_thread) # tuple of args for foo\n",
|
|
||||||
"result=async_result.get()\n",
|
|
||||||
"print(result)\n"
|
|
||||||
],
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"ename": "NameError",
|
|
||||||
"evalue": "name 'toto' is not defined",
|
|
||||||
"output_type": "error",
|
|
||||||
"traceback": [
|
|
||||||
"\u001B[31m---------------------------------------------------------------------------\u001B[39m",
|
|
||||||
"\u001B[31mNameError\u001B[39m Traceback (most recent call last)",
|
|
||||||
"\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[6]\u001B[39m\u001B[32m, line 9\u001B[39m\n\u001B[32m 7\u001B[39m pool = ThreadPool(processes=\u001B[32m1\u001B[39m)\n\u001B[32m 8\u001B[39m async_result = pool.apply_async(run_in_thread) \u001B[38;5;66;03m# tuple of args for foo\u001B[39;00m\n\u001B[32m----> \u001B[39m\u001B[32m9\u001B[39m result=\u001B[43masync_result\u001B[49m\u001B[43m.\u001B[49m\u001B[43mget\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 10\u001B[39m \u001B[38;5;28mprint\u001B[39m(result)\n",
|
|
||||||
"\u001B[36mFile \u001B[39m\u001B[32m~/miniconda3/lib/python3.12/multiprocessing/pool.py:774\u001B[39m, in \u001B[36mApplyResult.get\u001B[39m\u001B[34m(self, timeout)\u001B[39m\n\u001B[32m 772\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m._value\n\u001B[32m 773\u001B[39m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m774\u001B[39m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;28mself\u001B[39m._value\n",
|
|
||||||
"\u001B[36mFile \u001B[39m\u001B[32m~/miniconda3/lib/python3.12/multiprocessing/pool.py:125\u001B[39m, in \u001B[36mworker\u001B[39m\u001B[34m(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)\u001B[39m\n\u001B[32m 123\u001B[39m job, i, func, args, kwds = task\n\u001B[32m 124\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m125\u001B[39m result = (\u001B[38;5;28;01mTrue\u001B[39;00m, \u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[43m*\u001B[49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwds\u001B[49m\u001B[43m)\u001B[49m)\n\u001B[32m 126\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[32m 127\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m wrap_exception \u001B[38;5;129;01mand\u001B[39;00m func \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m _helper_reraises_exception:\n",
|
|
||||||
"\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[6]\u001B[39m\u001B[32m, line 3\u001B[39m, in \u001B[36mrun_in_thread\u001B[39m\u001B[34m()\u001B[39m\n\u001B[32m 1\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34mrun_in_thread\u001B[39m():\n\u001B[32m 2\u001B[39m loop = asyncio.get_event_loop() \u001B[38;5;66;03m# Créer une nouvelle boucle d'événements pour ce thread\u001B[39;00m\n\u001B[32m----> \u001B[39m\u001B[32m3\u001B[39m result = loop.run_until_complete(\u001B[43mtoto\u001B[49m())\n\u001B[32m 4\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m result\n",
|
|
||||||
"\u001B[31mNameError\u001B[39m: name 'toto' is not defined"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"execution_count": 6
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2025-03-10T16:05:43.143734Z",
|
|
||||||
"start_time": "2025-03-10T16:05:43.111814Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"loop = asyncio.get_running_loop()\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"def _run_in_thread(loop):\n",
|
"nest_asyncio.apply()\n",
|
||||||
" loop = asyncio.get_event_loop() # Créer une nouvelle boucle d'événements pour ce thread\n",
|
"warnings.filterwarnings(\n",
|
||||||
" result = loop.run_until_complete(toto())\n",
|
" \"ignore\",\n",
|
||||||
" return result\n",
|
" module=r\"^pyzerox.models.modellitellm$\",\n",
|
||||||
"\n",
|
" message=r\"\\s*Custom system prompt was provided which.*\",\n",
|
||||||
"from multiprocessing.pool import ThreadPool\n",
|
")"
|
||||||
"pool = ThreadPool(processes=1)\n",
|
]
|
||||||
"async_result = pool.apply_async(_run_in_thread,(loop,)) # tuple of args for foo\n",
|
|
||||||
"result = async_result.get()\n",
|
|
||||||
"print(result)"
|
|
||||||
],
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"toto\n",
|
|
||||||
"hello\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"execution_count": 26
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:30:30.217668Z",
|
"end_time": "2025-03-13T08:43:34.013207Z",
|
||||||
"start_time": "2025-03-10T16:30:29.274838Z"
|
"start_time": "2025-03-13T08:43:32.871614Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain_community.document_loaders.pdf import ZeroxPDFLoader\n",
|
"from langchain_community.document_loaders.pdf import ZeroxPDFLoader\n",
|
||||||
"\n",
|
"\n",
|
||||||
"file_path = \"./example_data/layout-parser-paper.pdf\"\n",
|
"file_path = \"./example_data/layout-parser-paper.pdf\"\n",
|
||||||
"loader = ZeroxPDFLoader(file_path)"
|
"loader = ZeroxPDFLoader(file_path)"
|
||||||
],
|
]
|
||||||
"outputs": [],
|
|
||||||
"execution_count": 7
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -273,58 +209,39 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:31:07.654904Z",
|
"end_time": "2025-03-13T08:43:55.887979Z",
|
||||||
"start_time": "2025-03-10T16:30:33.071884Z"
|
"start_time": "2025-03-13T08:43:35.469623Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"source": [
|
|
||||||
"docs = loader.load()\n",
|
|
||||||
"docs[0]"
|
|
||||||
],
|
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"/home/pprados/workspace.bda/langchain/libs/community/.venv/lib/python3.12/site-packages/pyzerox/models/modellitellm.py:52: UserWarning: \n",
|
|
||||||
" Custom system prompt was provided which overrides the default system prompt. We assume that you know what you are doing. \n",
|
|
||||||
" . Default prompt for zerox is:\n",
|
|
||||||
" \n",
|
|
||||||
" Convert the following PDF page to markdown.\n",
|
|
||||||
" Return only the markdown with no explanation text.\n",
|
|
||||||
" Do not exclude any content from the page.\n",
|
|
||||||
" \n",
|
|
||||||
" warnings.warn(f\"{Messages.CUSTOM_SYSTEM_PROMPT_WARNING}. Default prompt for zerox is:\\n {DEFAULT_SYSTEM_PROMPT}\")\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"Document(metadata={'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': 'False', 'total_pages': 16, 'source': './example_data/layout-parser-paper.pdf', 'num_pages': 16, 'page': 0}, page_content='# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\\n\\nZejian Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\\n\\n¹ Allen Institute for AI \\nshannons@allenai.org \\n² Brown University \\nruochen_zhang@brown.edu \\n³ Harvard University \\n{melissadell, jacob.carlson}@fas.harvard.edu \\n⁴ University of Washington \\nbgcl@cs.washington.edu \\n⁵ University of Waterloo \\nw4221@uwaterloo.ca \\n\\n**Abstract.** Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io \\n\\n**Keywords:** Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit. \\n\\n# 1 Introduction\\n\\nDeep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11].')"
|
"Document(metadata={'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': 'False', 'total_pages': 16, 'source': './example_data/layout-parser-paper.pdf', 'num_pages': 16, 'page': 0}, page_content='# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\\n\\nZejiong Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\\n\\n¹ Allen Institute for AI \\nshannons@allenai.org \\n² Brown University \\nruochen_zhang@brown.edu \\n³ Harvard University \\n{melissadell,jacob.carlson}@fas.harvard.edu \\n⁴ University of Washington \\nbgc@cs.washington.edu \\n⁵ University of Waterloo \\nw422ii@uwaterloo.ca \\n\\n**Abstract.** Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io\\n\\n**Keywords:** Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\\n\\n# 1 Introduction\\n\\nDeep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 8,
|
"execution_count": 7,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"execution_count": 8
|
"source": [
|
||||||
|
"docs = loader.load()\n",
|
||||||
|
"docs[0]"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-03-10T16:31:58.192569Z",
|
"end_time": "2025-03-13T08:43:55.969916Z",
|
||||||
"start_time": "2025-03-10T16:31:58.182464Z"
|
"start_time": "2025-03-13T08:43:55.960140Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"source": [
|
|
||||||
"import pprint\n",
|
|
||||||
"\n",
|
|
||||||
"pprint.pp(docs[0].metadata)"
|
|
||||||
],
|
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
@ -348,7 +265,11 @@
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"execution_count": 9
|
"source": [
|
||||||
|
"import pprint\n",
|
||||||
|
"\n",
|
||||||
|
"pprint.pp(docs[0].metadata)"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -359,14 +280,25 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"jupyter": {
|
|
||||||
"is_executing": true
|
|
||||||
},
|
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"start_time": "2025-03-10T16:33:11.308283Z"
|
"end_time": "2025-03-13T08:44:38.007126Z",
|
||||||
|
"start_time": "2025-03-13T08:43:56.049306Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"6"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"pages = []\n",
|
"pages = []\n",
|
||||||
"for doc in loader.lazy_load():\n",
|
"for doc in loader.lazy_load():\n",
|
||||||
@ -377,33 +309,15 @@
|
|||||||
"\n",
|
"\n",
|
||||||
" pages = []\n",
|
" pages = []\n",
|
||||||
"len(pages)"
|
"len(pages)"
|
||||||
],
|
]
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"/home/pprados/workspace.bda/langchain/libs/community/.venv/lib/python3.12/site-packages/pyzerox/models/modellitellm.py:52: UserWarning: \n",
|
|
||||||
" Custom system prompt was provided which overrides the default system prompt. We assume that you know what you are doing. \n",
|
|
||||||
" . Default prompt for zerox is:\n",
|
|
||||||
" \n",
|
|
||||||
" Convert the following PDF page to markdown.\n",
|
|
||||||
" Return only the markdown with no explanation text.\n",
|
|
||||||
" Do not exclude any content from the page.\n",
|
|
||||||
" \n",
|
|
||||||
" warnings.warn(f\"{Messages.CUSTOM_SYSTEM_PROMPT_WARNING}. Default prompt for zerox is:\\n {DEFAULT_SYSTEM_PROMPT}\")\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"execution_count": null
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 10,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:29:13.680164Z",
|
"end_time": "2025-03-13T08:44:38.061709Z",
|
||||||
"start_time": "2025-02-10T11:29:13.676516Z"
|
"start_time": "2025-03-13T08:44:38.056310Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -480,11 +394,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 10,
|
"execution_count": 11,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:29:53.684396Z",
|
"end_time": "2025-03-13T08:45:08.112805Z",
|
||||||
"start_time": "2025-02-10T11:29:21.807791Z"
|
"start_time": "2025-03-13T08:44:38.106140Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -537,11 +451,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 11,
|
"execution_count": 12,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:30:45.095983Z",
|
"end_time": "2025-03-13T08:45:27.803240Z",
|
||||||
"start_time": "2025-02-10T11:30:15.033169Z"
|
"start_time": "2025-03-13T08:45:08.157621Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -587,15 +501,17 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "### Add a custom *pages_delimiter* to identify where are ends of pages in *single* mode:"
|
"source": [
|
||||||
|
"### Add a custom *pages_delimiter* to identify where are ends of pages in *single* mode:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 12,
|
"execution_count": 13,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:31:22.824454Z",
|
"end_time": "2025-03-13T08:47:15.278553Z",
|
||||||
"start_time": "2025-02-10T11:30:51.711460Z"
|
"start_time": "2025-03-13T08:45:27.916120Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -603,14 +519,14 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n",
|
"# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Zejian Shen¹ (✉), Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\n",
|
"Zejian Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\n",
|
||||||
"\n",
|
"\n",
|
||||||
"¹ Allen Institute for AI\n",
|
"¹ Allen Institute for AI \n",
|
||||||
"shannons@allenai.org \n",
|
"shannon@allenai.org \n",
|
||||||
"² Brown University \n",
|
"² Brown University \n",
|
||||||
"ruochen_zhang@brown.edu \n",
|
"ruohen_zhang@brown.edu \n",
|
||||||
"³ Harvard University \n",
|
"³ Harvard University \n",
|
||||||
"{melissadell, jacob.carlson}@fas.harvard.edu \n",
|
"{melissadell, jacob.carlson}@fas.harvard.edu \n",
|
||||||
"⁴ University of Washington \n",
|
"⁴ University of Washington \n",
|
||||||
@ -618,17 +534,17 @@
|
|||||||
"⁵ University of Waterloo \n",
|
"⁵ University of Waterloo \n",
|
||||||
"w422ii@uwaterloo.ca \n",
|
"w422ii@uwaterloo.ca \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io\n",
|
"**Abstract.** Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at [https://layout-parser.github.io](https://layout-parser.github.io).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Keywords: Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\n",
|
"**Keywords:** Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"1 Introduction\n",
|
"## 1 Introduction\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Deep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]\n",
|
"Deep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]\n",
|
||||||
"-------THIS IS A CUSTOM END OF PAGE-------\n",
|
"-------THIS IS A CUSTOM END OF PAGE-------\n",
|
||||||
"37, layout detection [38, 22], table detection [26], and scene text detection [4]. A generalized learning-based framework dramatically reduces the need for the manual specification of complicated rules, which is the status quo with traditional methods. DL has the potential to transform DIA pipelines and benefit a broad spectrum of large-scale document digitization projects.\n",
|
"[37], layout detection [38, 22], table detection [26], and scene text detection [4]. A generalized learning-based framework dramatically reduces the need for the manual specification of complicated rules, which is the status quo with traditional methods. DL has the potential to transform DIA pipelines and benefit a broad spectrum of large-scale document digitization projects.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"However, there are several practical difficulties for taking advantages of recent advances in DL-based methods: 1) DL models are notoriously convoluted for reuse and extension. Existing models are developed using distinct frameworks like TensorFlow [1] or PyTorch [24], and the high-level parameters can be obfuscated by implementation details [8]. It can be a time-consuming and frustrating experience to debug, reproduce, and adapt existing models for DIA, and many researchers who could benefit the most from using these methods lack the technical background to implement them from scratch. 2) Document images contain diverse and disparate patterns across domains, and customized training is often required to achieve a desirable detection accuracy. Currently there is no full-fledged infrastructure for easily curating the target document image datasets and fine-tuning or re-training the models. 3) DIA usually requires a sequence of models and other processing to obtain the final outputs. Often research teams use DL models and then perform further document analyses in separate processes, and these pipelines are not documented in any central location (and often not documented at all). This makes it difficult for research teams to learn about how reimplementing the DIA wheel.\n",
|
"However, there are several practical difficulties for taking advantages of recent advances in DL-based methods: 1) DL models are notoriously convoluted for reuse and extension. Existing models are developed using distinct frameworks like TensorFlow [1] or PyTorch [24], and the high-level parameters can be obfuscated by implementation details [8]. It can be a time-consuming and frustrating experience to debug, reproduce, and adapt existing models for DIA, and many researchers who could benefit the most from using these methods lack the technical background to implement them from scratch. 2) Document images contain diverse and disparate patterns across domains, and customized training is often required to achieve a desirable detection accuracy. Currently there is no full-fledged infrastructure for easily curating the target document image datasets and fine-tuning or re-training the models. 3) DIA usually requires a sequence of models and other processing to obtain the final outputs. Often research teams use DL models and then perform further document analyses in separate processes, and these pipelines are not documented in any central location (and often not documented at all). This makes it difficult for research teams to learn about how to reinvent the DIA wheel.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"LayoutParser provides a unified toolkit to support DL-based document image analysis and processing. To address the aforementioned challenges, LayoutParser is built with the following components:\n",
|
"LayoutParser provides a unified toolkit to support DL-based document image analysis and processing. To address the aforementioned challenges, LayoutParser is built with the following components:\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -642,7 +558,7 @@
|
|||||||
"LayoutParser is well aligned with recent efforts for improving DL model reusability in other disciplines like natural language processing [8, 34] and computer vision [35], but with a focus on unique challenges in DIA. We show LayoutParser can be applied in sophisticated and large-scale digitization projects.\n",
|
"LayoutParser is well aligned with recent efforts for improving DL model reusability in other disciplines like natural language processing [8, 34] and computer vision [35], but with a focus on unique challenges in DIA. We show LayoutParser can be applied in sophisticated and large-scale digitization projects.\n",
|
||||||
"-------THIS IS A CUSTOM END OF PAGE-------\n",
|
"-------THIS IS A CUSTOM END OF PAGE-------\n",
|
||||||
"2 Related Work\n",
|
"2 Related Work\n",
|
||||||
"Recently, various DL models and datasets have been developed for layout analysis tasks. The dhSegment [22] utilizes fully convolutional networks [20] for segmentation tasks on historical documents. Object detection-ba\n"
|
"Recently, various DL models and datasets have been developed for layout analysis tasks. The dhSegment utilizes fully convolutional networks for segmentation tasks on histor\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -688,11 +604,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 13,
|
"execution_count": 14,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:34:10.816353Z",
|
"end_time": "2025-03-13T08:47:17.434520Z",
|
||||||
"start_time": "2025-02-10T11:34:08.580109Z"
|
"start_time": "2025-03-13T08:47:15.617062Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -710,11 +626,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 14,
|
"execution_count": 15,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:34:51.430316Z",
|
"end_time": "2025-03-13T08:47:36.222510Z",
|
||||||
"start_time": "2025-02-10T11:34:14.255873Z"
|
"start_time": "2025-03-13T08:47:17.450679Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -724,7 +640,9 @@
|
|||||||
"text": [
|
"text": [
|
||||||
"6 Z. Shen et al.\n",
|
"6 Z. Shen et al.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Fig. 2: The relationship between the three types of layout data structures. \n",
|
||||||
|
"Coordinate supports three kinds of variation; TextBlock consists of the coordinate information and extra features like block text, types, and reading orders; a Layout object is a list of all possible layout elements, including other Layout objects. They all support the same set of transformation and operation APIs for maximum flexibility.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n",
|
"Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -751,7 +669,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "With this simulation, RapidOCR is not invoked."
|
"source": [
|
||||||
|
"With this simulation, RapidOCR is not invoked."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -762,11 +682,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 15,
|
"execution_count": 16,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:35:32.458425Z",
|
"end_time": "2025-03-13T08:47:37.917861Z",
|
||||||
"start_time": "2025-02-10T11:35:30.029050Z"
|
"start_time": "2025-03-13T08:47:36.337522Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -784,11 +704,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 16,
|
"execution_count": 17,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:36:02.465629Z",
|
"end_time": "2025-03-13T08:48:11.447528Z",
|
||||||
"start_time": "2025-02-10T11:35:32.491422Z"
|
"start_time": "2025-03-13T08:47:37.950471Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -798,11 +718,13 @@
|
|||||||
"text": [
|
"text": [
|
||||||
"6 Z. Shen et al.\n",
|
"6 Z. Shen et al.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"Coordinate supports three kinds of variation; TextBlock consists of the coordinate information and extra features like block text, types, and reading orders; a Layout object is a list of all possible layout elements, including other Layout objects. They all support the same set of transformation and operation APIs for maximum flexibility.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n",
|
"Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## 3.2 Layout Data Structures\n",
|
"3.2 Layout Data Structures\n",
|
||||||
"\n",
|
"\n",
|
||||||
"A critical feature of LayoutParser is the implementation of a series of data structures and operations that can be used to efficiently process and manipulate the layout elements. In document image analysis pipelines, various post-processing on the layout analysis model outputs is usually required to obtain the final outputs. Traditionally, this requires exporting DL model outputs and then loading the results into other pipelines. All model outputs from LayoutParser will be stored in carefully engineered data types optimized for further processing, which makes it possible to build an end-to-end document digitization pipeline within LayoutParser. There are three key components in the data structure, namely the Coordinate system, the TextBlock, and the Layout. They provide different levels of abstraction for the layout data, and a set of APIs are supported for transformations or operations on these classes.\n"
|
"A critical feature of LayoutParser is the implementation of a series of data structures and operations that can be used to efficiently process and manipulate the layout elements. In document image analysis pipelines, various post-processing on the layout analysis model outputs is usually required to obtain the final outputs. Traditionally, this requires exporting DL model outputs and then loading the results into other pipelines. All model outputs from LayoutParser will be stored in carefully engineered data types optimized for further processing, which makes it possible to build an end-to-end document digitization pipeline within LayoutParser. There are three key components in the data structure, namely the Coordinate system, the TextBlock, and the Layout. They provide different levels of abstraction for the layout data, and a set of APIs are supported for transformations or operations on these classes.\n"
|
||||||
]
|
]
|
||||||
@ -824,7 +746,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "With this simulation, Tesseract is not invoked."
|
"source": [
|
||||||
|
"With this simulation, Tesseract is not invoked."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -835,11 +759,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 17,
|
"execution_count": 18,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:36:22.261669Z",
|
"end_time": "2025-03-13T08:48:13.261108Z",
|
||||||
"start_time": "2025-02-10T11:36:19.762818Z"
|
"start_time": "2025-03-13T08:48:11.560188Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -857,11 +781,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 18,
|
"execution_count": 19,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:36:24.230265Z",
|
"end_time": "2025-03-13T08:48:13.358860Z",
|
||||||
"start_time": "2025-02-10T11:36:24.095234Z"
|
"start_time": "2025-03-13T08:48:13.280561Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -871,7 +795,7 @@
|
|||||||
"True"
|
"True"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 18,
|
"execution_count": 19,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
@ -886,11 +810,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 19,
|
"execution_count": 20,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:36:26.122823Z",
|
"end_time": "2025-03-13T08:48:13.385243Z",
|
||||||
"start_time": "2025-02-10T11:36:26.114892Z"
|
"start_time": "2025-03-13T08:48:13.379476Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
@ -903,11 +827,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 20,
|
"execution_count": 21,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:37:02.400667Z",
|
"end_time": "2025-03-13T08:48:39.795277Z",
|
||||||
"start_time": "2025-02-10T11:36:30.752293Z"
|
"start_time": "2025-03-13T08:48:13.425691Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -915,9 +839,11 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"- **Coordinate** supports three kinds of variation;\n",
|
||||||
"Coordinate supports three kinds of variation; TextBlock consists of the coordinate information and extra features like block text, types, and reading orders; a Layout object is a list of all possible layout elements, including other Layout objects. They all support the same set of transformation and operation APIs for maximum flexibility.\n",
|
"- **TextBlock** consists of the coordinate information and extra features like block text, types, and reading orders;\n",
|
||||||
|
"- A **Layout** object is a list of all possible layout elements, including other Layout objects.\n",
|
||||||
|
"- They all support the same set of transformation and operation APIs for maximum flexibility.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n",
|
"Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -944,7 +870,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "With this simulation, the format is injected into the prompt."
|
"source": [
|
||||||
|
"With this simulation, the format is injected into the prompt."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -962,11 +890,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 21,
|
"execution_count": 22,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:37:56.900282Z",
|
"end_time": "2025-03-13T08:49:03.336306Z",
|
||||||
"start_time": "2025-02-10T11:37:22.285001Z"
|
"start_time": "2025-03-13T08:48:39.903502Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -974,19 +902,19 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"| Dataset | Base Model | Large Model | Notes |\n",
|
"| Dataset | Base Model | Large Model | Notes |\n",
|
||||||
"|--------------|------------|-------------|--------------------------------------------------------|\n",
|
"|---------------|------------|-------------|-----------------------------------------------------|\n",
|
||||||
"| PubLayNet | F / M | M | Layouts of modern scientific documents |\n",
|
"| PubLayNet | F / M | M | Layouts of modern scientific documents |\n",
|
||||||
"| PubLayNet | M | M | Layouts of scanned modern magazines and scientific reports |\n",
|
"| PRImA | F / M | - | Layouts of scanned modern magazines and scientific reports |\n",
|
||||||
"| Newspaper | F | - | Layouts of scanned US newspapers from the 20th century |\n",
|
"| Newspaper | F | - | Layouts of scanned US newspapers from the 20th century |\n",
|
||||||
"| TableBank | F | F | Table region on modern scientific and business documents |\n",
|
"| TableBank | F | F | Table regions on modern scientific and business documents |\n",
|
||||||
"| HLDataset | F / M | - | Layouts of history Japanese documents |\n",
|
"| HJDataset | F / M | - | Layouts of history Japanese documents |\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. For each dataset, we train several models of different sizes for different needs (the trade-off between accuracy vs. computational cost). For \"base model\" and \"large model\", we refer to using the ResNet 50 or ResNet 101 backbones. One can train models of different architectures, like Faster R-CNN and Mask R-CNN. For example, an F in the Large Model column indicates it has a Faster R-CNN model trained using the ResNet backbone. The platform is maintained and a number of additions will be made to the model zoo in coming months.\n",
|
"For each dataset, we train several models of different sizes for different needs (the trade-off between accuracy vs. computational cost). For \"base model\" and \"large model\", we refer to using the ResNet 50 or ResNet 101 backbones respectively. One can train models of different architectures, like Faster R-CNN and Mask R-CNN. For example, an F in the Large Model column indicates it has a Faster R-CNN model trained using the ResNet backbone. The platform is maintained and a number of additions will be made to the model zoo in coming months.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"3.1 Layout Detection Models\n",
|
"### 3.1 Layout Detection Models\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In LayoutParser, a layout model takes a document image as an input and generates a list of rectangular boxes for the target content regions. Different from traditional methods, it relies on deep convolutional neural networks rather than manually curated rules to identify content regions. It is formulated as an object detection problem and state-of-the-art models like Faster R-CNN yield prediction results of high accuracy and makes it possible to build a concise, generalized interface for layout detection. LayoutParser, built upon Detectron2, provides a minimal API that can perform layout detection with only four lines of Python:\n",
|
"In LayoutParser, a layout model takes a document image as an input and generates a list of rectangular boxes for the target content regions. Different from traditional methods, it relies on deep convolutional neural networks rather than manually curated rules to identify content regions. It is formulated as an object detection problem and state-of-the-art models like Faster R-CNN yield prediction results of high accuracy and make it possible to build a concise, generalized interface for layout detection. LayoutParser, built upon Detectron2, provides a minimal API that can perform layout detection with only four lines of Python:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"```python\n",
|
"```python\n",
|
||||||
"import layoutparser as lp\n",
|
"import layoutparser as lp\n",
|
||||||
@ -997,7 +925,7 @@
|
|||||||
"layout = model.detect(image)\n",
|
"layout = model.detect(image)\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"LayoutParser provides a wealth of pre-trained model weights using various datasets covering different languages, time periods, and document types. Due to domain shift, the prediction performance can notably drop when models are applied to target samples that are significantly different from the training dataset. As document structures and layouts vary greatly in different domains, it is important to select models trained on a dataset similar to the test samples. A semantic syntax is used for initializing the model weights in LayoutParser, using both the dataset name and model name `lp://<dataset-name>/<model-architecture-name>`.\n"
|
"LayoutParser provides a wealth of pre-trained model weights using various datasets covering different languages, time periods, and document types. Due to domain shift, the prediction performance can notably drop when models are applied to target samples that are significantly different from the training dataset. As document structures and layouts vary greatly in different domains, it is important to select models trained on a dataset similar to the test samples. A semantic syntax is used for initializing the model weights in LayoutParser, using both the dataset name and model name lp://<dataset-name>/<model-architecture-name>.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -1025,11 +953,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 22,
|
"execution_count": 23,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
"end_time": "2025-02-10T11:38:57.651776Z",
|
"end_time": "2025-03-13T08:49:25.942671Z",
|
||||||
"start_time": "2025-02-10T11:38:08.259781Z"
|
"start_time": "2025-03-13T08:49:03.450966Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -1039,28 +967,26 @@
|
|||||||
"text": [
|
"text": [
|
||||||
"# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n",
|
"# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Zejian Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\n",
|
"Zejing Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\n",
|
||||||
"\n",
|
"\n",
|
||||||
"¹ Allen Institute for AI \n",
|
"¹ Allen Institute for AI \n",
|
||||||
"shannon@allenai.org \n",
|
"shannons@allenai.org \n",
|
||||||
"² Brown University \n",
|
"² Brown University \n",
|
||||||
"ruochen_zhang@brown.edu \n",
|
"ruochen_zhang@brown.edu \n",
|
||||||
"³ Harvard University \n",
|
"³ Harvard University \n",
|
||||||
"{melissad1, jacob.carlson}@fas.harvard.edu \n",
|
"{melissadell,jacob_carlson}@fas.harvard.edu \n",
|
||||||
"⁴ University of Washington \n",
|
"⁴ University of Washington \n",
|
||||||
"bgcl@cs.washington.edu \n",
|
"bgcl@cs.washington.edu \n",
|
||||||
"⁵ University of Waterloo \n",
|
"⁵ University of Waterloo \n",
|
||||||
"w422ii@uwaterloo.ca \n",
|
"w422ii@uwaterloo.ca \n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Abstract\n",
|
"**Abstract.** Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io.\n",
|
"**Keywords:** Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\n",
|
||||||
"\n",
|
|
||||||
"Keywords: Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"## 1 Introduction\n",
|
"## 1 Introduction\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Deep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]\n",
|
"Deep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11].\n",
|
||||||
"{'producer': 'pdfTeX-1.40.21',\n",
|
"{'producer': 'pdfTeX-1.40.21',\n",
|
||||||
" 'creator': 'LaTeX with hyperref',\n",
|
" 'creator': 'LaTeX with hyperref',\n",
|
||||||
" 'creationdate': '2021-06-22T01:27:10+00:00',\n",
|
" 'creationdate': '2021-06-22T01:27:10+00:00',\n",
|
||||||
@ -1082,7 +1008,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from langchain_community.document_loaders import FileSystemBlobLoader\n",
|
"from langchain_community.document_loaders import FileSystemBlobLoader\n",
|
||||||
"from langchain_community.document_loaders.generic import GenericLoader\n",
|
"from langchain_community.document_loaders.generic import GenericLoader\n",
|
||||||
"from langchain_community.document_loaders.parsers import ZeroxPDFParser\n",
|
"from langchain_community.document_loaders.parsers.pdf import ZeroxPDFParser\n",
|
||||||
"\n",
|
"\n",
|
||||||
"loader = GenericLoader(\n",
|
"loader = GenericLoader(\n",
|
||||||
" blob_loader=FileSystemBlobLoader(\n",
|
" blob_loader=FileSystemBlobLoader(\n",
|
||||||
@ -1099,7 +1025,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "It is possible to work with files from cloud storage."
|
"source": [
|
||||||
|
"It is possible to work with files from cloud storage."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -1162,9 +1090,9 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3 (ipykernel)",
|
"display_name": "langchain-community",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "langchain-community"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@ -1176,7 +1104,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.12.7"
|
"version": "3.11.11"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -360,7 +360,6 @@ if TYPE_CHECKING:
|
|||||||
PyPDFium2Loader,
|
PyPDFium2Loader,
|
||||||
PyPDFLoader,
|
PyPDFLoader,
|
||||||
UnstructuredPDFLoader,
|
UnstructuredPDFLoader,
|
||||||
ZeroxPDFLoader,
|
|
||||||
)
|
)
|
||||||
from langchain_community.document_loaders.pebblo import (
|
from langchain_community.document_loaders.pebblo import (
|
||||||
PebbloSafeLoader,
|
PebbloSafeLoader,
|
||||||
@ -733,7 +732,6 @@ _module_lookup = {
|
|||||||
"YoutubeAudioLoader": "langchain_community.document_loaders.blob_loaders",
|
"YoutubeAudioLoader": "langchain_community.document_loaders.blob_loaders",
|
||||||
"YoutubeLoader": "langchain_community.document_loaders.youtube",
|
"YoutubeLoader": "langchain_community.document_loaders.youtube",
|
||||||
"YuqueLoader": "langchain_community.document_loaders.yuque",
|
"YuqueLoader": "langchain_community.document_loaders.yuque",
|
||||||
"ZeroxPDFLoader": "langchain_community.document_loaders.pdf",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -942,5 +940,4 @@ __all__ = [
|
|||||||
"YoutubeAudioLoader",
|
"YoutubeAudioLoader",
|
||||||
"YoutubeLoader",
|
"YoutubeLoader",
|
||||||
"YuqueLoader",
|
"YuqueLoader",
|
||||||
"ZeroxPDFLoader",
|
|
||||||
]
|
]
|
||||||
|
@ -8,10 +8,10 @@ import io
|
|||||||
import logging
|
import logging
|
||||||
import threading
|
import threading
|
||||||
import warnings
|
import warnings
|
||||||
from asyncio import AbstractEventLoop
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from multiprocessing.pool import ThreadPool
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Any,
|
Any,
|
||||||
@ -1471,6 +1471,7 @@ class PDFPlumberParser(BaseBlobParser):
|
|||||||
|
|
||||||
return extract_from_images_with_rapidocr(images)
|
return extract_from_images_with_rapidocr(images)
|
||||||
|
|
||||||
|
|
||||||
_map_extract_tables: Dict[Literal["markdown", "html", None], str] = {
|
_map_extract_tables: Dict[Literal["markdown", "html", None], str] = {
|
||||||
"markdown": "",
|
"markdown": "",
|
||||||
"html": "But, use html syntax for convert all tables. ",
|
"html": "But, use html syntax for convert all tables. ",
|
||||||
@ -1483,6 +1484,7 @@ _map_extract_images = {
|
|||||||
"describe it. ",
|
"describe it. ",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ZeroxPDFParser(BaseBlobParser):
|
class ZeroxPDFParser(BaseBlobParser):
|
||||||
"""Parse a blob from a PDF using `py-zerox` library.
|
"""Parse a blob from a PDF using `py-zerox` library.
|
||||||
|
|
||||||
@ -1532,6 +1534,9 @@ class ZeroxPDFParser(BaseBlobParser):
|
|||||||
print(docs[0].page_content[:100])
|
print(docs[0].page_content[:100])
|
||||||
print(docs[0].metadata)
|
print(docs[0].metadata)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_pool = ThreadPool()
|
||||||
|
|
||||||
_warn_images_to_text = False
|
_warn_images_to_text = False
|
||||||
_warn_creator = False
|
_warn_creator = False
|
||||||
_prompt = (
|
_prompt = (
|
||||||
@ -1543,12 +1548,6 @@ class ZeroxPDFParser(BaseBlobParser):
|
|||||||
"Do not exclude any content from the page. "
|
"Do not exclude any content from the page. "
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _run_async_from_thread(coro, loop):
|
|
||||||
future = asyncio.run_coroutine_threadsafe(coro,
|
|
||||||
loop) # Lancer la coroutine dans la boucle existante
|
|
||||||
return future.result() # Bloque en attendant le résultat
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
mode: Literal["single", "page"] = "page",
|
mode: Literal["single", "page"] = "page",
|
||||||
@ -1686,11 +1685,7 @@ class ZeroxPDFParser(BaseBlobParser):
|
|||||||
zerox_prompt = PromptTemplate.from_template(
|
zerox_prompt = PromptTemplate.from_template(
|
||||||
self.custom_system_prompt
|
self.custom_system_prompt
|
||||||
).format(prompt_tables=prompt_tables, prompt_images=prompt_images)
|
).format(prompt_tables=prompt_tables, prompt_images=prompt_images)
|
||||||
# async def toto():
|
coro = zerox(
|
||||||
# await asyncio.sleep(0)
|
|
||||||
# return "hello"
|
|
||||||
# coro=toto()
|
|
||||||
coro=zerox(
|
|
||||||
file_path=str(file_path),
|
file_path=str(file_path),
|
||||||
model=self.model,
|
model=self.model,
|
||||||
cleanup=self.cleanup,
|
cleanup=self.cleanup,
|
||||||
@ -1703,10 +1698,9 @@ class ZeroxPDFParser(BaseBlobParser):
|
|||||||
try:
|
try:
|
||||||
loop = asyncio.get_running_loop()
|
loop = asyncio.get_running_loop()
|
||||||
|
|
||||||
from multiprocessing.pool import ThreadPool
|
zerox_output = ZeroxPDFParser._pool.apply_async(
|
||||||
pool = ThreadPool(processes=1)
|
lambda: loop.run_until_complete(coro)
|
||||||
zerox_output = pool.apply_async(
|
).get() # tuple of args for foo
|
||||||
lambda : loop.run_until_complete(coro)).get() # tuple of args for foo
|
|
||||||
|
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
zerox_output = asyncio.run(coro)
|
zerox_output = asyncio.run(coro)
|
||||||
|
@ -2,48 +2,43 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import TYPE_CHECKING, Iterator, Type
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from typing import TYPE_CHECKING, Iterator
|
|
||||||
|
|
||||||
from langchain_community.document_loaders import PDFMinerLoader, PDFPlumberLoader, \
|
|
||||||
PyMuPDFLoader, PyPDFium2Loader, PyPDFLoader
|
|
||||||
from langchain_community.document_loaders.base import BaseBlobParser
|
from langchain_community.document_loaders.base import BaseBlobParser
|
||||||
from langchain_community.document_loaders.blob_loaders import Blob
|
from langchain_community.document_loaders.blob_loaders import Blob
|
||||||
from langchain_community.document_loaders.parsers import (
|
from langchain_community.document_loaders.parsers import (
|
||||||
BaseImageBlobParser,
|
BaseImageBlobParser,
|
||||||
PDFPlumberParser,
|
PDFPlumberParser,
|
||||||
)
|
)
|
||||||
from langchain_community.document_loaders.parsers.pdf import ZeroxPDFParser, \
|
from langchain_community.document_loaders.parsers.pdf import (
|
||||||
PyMuPDFParser, PDFMinerParser, PyPDFium2Parser, PyPDFParser
|
PDFMinerParser,
|
||||||
|
PyMuPDFParser,
|
||||||
|
PyPDFium2Parser,
|
||||||
|
PyPDFParser,
|
||||||
|
ZeroxPDFParser,
|
||||||
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from PIL.Image import Image
|
from PIL.Image import Image
|
||||||
|
|
||||||
_map_parser = {
|
|
||||||
'PDFMinerParser': PDFMinerParser,
|
|
||||||
'PDFPlumberParser': PDFPlumberParser,
|
|
||||||
'PyMuPDFParser': PyMuPDFParser,
|
|
||||||
'PyPDFium2Parser': PyPDFium2Parser,
|
|
||||||
'PyPDFParser': PyPDFParser,
|
|
||||||
'ZeroxPDFParser': ZeroxPDFParser,
|
|
||||||
}
|
|
||||||
|
|
||||||
# PDFs to test parsers on.
|
# PDFs to test parsers on.
|
||||||
HELLO_PDF = Path(__file__).parent.parent.parent / "examples" / "hello.pdf"
|
HELLO_PDF = Path(__file__).parent.parent.parent / "examples" / "hello.pdf"
|
||||||
|
|
||||||
LAYOUT_PARSER_PAPER_PDF = (
|
LAYOUT_PARSER_PAPER_PDF = (
|
||||||
Path(__file__).parent.parent.parent / "examples" / "layout-parser-paper.pdf"
|
Path(__file__).parent.parent.parent / "examples" / "layout-parser-paper.pdf"
|
||||||
)
|
)
|
||||||
|
|
||||||
LAYOUT_PARSER_PAPER_PASSWORD_PDF = (
|
LAYOUT_PARSER_PAPER_PASSWORD_PDF = (
|
||||||
Path(__file__).parent.parent.parent
|
Path(__file__).parent.parent.parent
|
||||||
/ "examples"
|
/ "examples"
|
||||||
/ "layout-parser-paper-password.pdf"
|
/ "layout-parser-paper-password.pdf"
|
||||||
)
|
)
|
||||||
|
|
||||||
DUPLICATE_CHARS = (
|
DUPLICATE_CHARS = (
|
||||||
Path(__file__).parent.parent.parent / "examples" / "duplicate-chars.pdf"
|
Path(__file__).parent.parent.parent / "examples" / "duplicate-chars.pdf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -123,30 +118,30 @@ class EmptyImageBlobParser(BaseImageBlobParser):
|
|||||||
[("single", EmptyImageBlobParser()), ("page", None)],
|
[("single", EmptyImageBlobParser()), ("page", None)],
|
||||||
)
|
)
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"parser_factory,params",
|
"parser_class,params",
|
||||||
[
|
[
|
||||||
("PDFMinerParser", {}),
|
(PDFMinerParser, {}),
|
||||||
("PyMuPDFParser", {}),
|
(PyMuPDFParser, {}),
|
||||||
("PyPDFium2Parser", {}),
|
(PyPDFium2Parser, {}),
|
||||||
("PyPDFParser", {"extraction_mode": "plain"}),
|
(PyPDFParser, {"extraction_mode": "plain"}),
|
||||||
("PyPDFParser", {"extraction_mode": "layout"}),
|
(PyPDFParser, {"extraction_mode": "layout"}),
|
||||||
("ZeroxPDFParser", {}),
|
(ZeroxPDFParser, {}),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@pytest.mark.requires("pillow")
|
@pytest.mark.requires("pillow")
|
||||||
def test_mode_and_extract_images_variations(
|
def test_mode_and_extract_images_variations(
|
||||||
parser_factory: str,
|
parser_class: Type,
|
||||||
params: dict,
|
params: dict,
|
||||||
mode: str,
|
mode: str,
|
||||||
image_parser: BaseImageBlobParser,
|
image_parser: BaseImageBlobParser,
|
||||||
) -> None:
|
) -> None:
|
||||||
if parser_factory == "ZeroxPDFParser":
|
if parser_class == ZeroxPDFParser:
|
||||||
try:
|
try:
|
||||||
import pyzerox # noqa: F401
|
import pyzerox # noqa: F401
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pytest.skip("py-zerox is valid only with Python +3.11")
|
pytest.skip("py-zerox is valid only with Python +3.11")
|
||||||
_test_matrix(
|
_test_matrix(
|
||||||
parser_factory,
|
parser_class,
|
||||||
params,
|
params,
|
||||||
mode,
|
mode,
|
||||||
image_parser,
|
image_parser,
|
||||||
@ -159,23 +154,23 @@ def test_mode_and_extract_images_variations(
|
|||||||
["text", "markdown-img", "html-img"],
|
["text", "markdown-img", "html-img"],
|
||||||
)
|
)
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"parser_factory,params",
|
"parser_class,params",
|
||||||
[
|
[
|
||||||
("PDFMinerParser", {}),
|
(PDFMinerParser, {}),
|
||||||
("PyMuPDFParser", {}),
|
(PyMuPDFParser, {}),
|
||||||
("PyPDFium2Parser", {}),
|
(PyPDFium2Parser, {}),
|
||||||
("PyPDFParser", {"extraction_mode": "plain"}),
|
(PyPDFParser, {"extraction_mode": "plain"}),
|
||||||
("PyPDFParser", {"extraction_mode": "layout"}),
|
(PyPDFParser, {"extraction_mode": "layout"}),
|
||||||
("ZeroxPDFParser", {}),
|
(ZeroxPDFParser, {}),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@pytest.mark.requires("pillow")
|
@pytest.mark.requires("pillow")
|
||||||
def test_mode_and_image_formats_variations(
|
def test_mode_and_image_formats_variations(
|
||||||
parser_factory: str,
|
parser_class: Type,
|
||||||
params: dict,
|
params: dict,
|
||||||
images_inner_format: str,
|
images_inner_format: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
if parser_factory == "ZeroxPDFParser":
|
if parser_class == ZeroxPDFParser:
|
||||||
try:
|
try:
|
||||||
import pyzerox # noqa: F401
|
import pyzerox # noqa: F401
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -184,7 +179,7 @@ def test_mode_and_image_formats_variations(
|
|||||||
image_parser = EmptyImageBlobParser()
|
image_parser = EmptyImageBlobParser()
|
||||||
|
|
||||||
_test_matrix(
|
_test_matrix(
|
||||||
parser_factory,
|
parser_class,
|
||||||
params,
|
params,
|
||||||
mode,
|
mode,
|
||||||
image_parser,
|
image_parser,
|
||||||
@ -193,11 +188,11 @@ def test_mode_and_image_formats_variations(
|
|||||||
|
|
||||||
|
|
||||||
def _test_matrix(
|
def _test_matrix(
|
||||||
parser_factory: str,
|
parser_class: Type,
|
||||||
params: dict,
|
params: dict,
|
||||||
mode: str,
|
mode: str,
|
||||||
image_parser: BaseImageBlobParser,
|
image_parser: BaseImageBlobParser,
|
||||||
images_inner_format: str,
|
images_inner_format: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Apply the same test for all *standard* PDF parsers.
|
"""Apply the same test for all *standard* PDF parsers.
|
||||||
|
|
||||||
@ -245,8 +240,6 @@ def _test_matrix(
|
|||||||
assert len(docs)
|
assert len(docs)
|
||||||
parser.password = old_password
|
parser.password = old_password
|
||||||
|
|
||||||
parser_class = _map_parser[parser_factory]
|
|
||||||
|
|
||||||
parser = parser_class(
|
parser = parser_class(
|
||||||
mode=mode,
|
mode=mode,
|
||||||
images_parser=image_parser,
|
images_parser=image_parser,
|
||||||
@ -266,19 +259,19 @@ def _test_matrix(
|
|||||||
["markdown", "html", "csv", None],
|
["markdown", "html", "csv", None],
|
||||||
)
|
)
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"parser_factory,params",
|
"parser_class,params",
|
||||||
[
|
[
|
||||||
("PyMuPDFParser", {}),
|
(PyMuPDFParser, {}),
|
||||||
("ZeroxPDFParser", {"model": "gpt-4o-mini"}),
|
(ZeroxPDFParser, {"model": "gpt-4o-mini"}),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_parser_with_table(
|
def test_parser_with_table(
|
||||||
parser_factory: str,
|
parser_class: Type,
|
||||||
params: dict,
|
params: dict,
|
||||||
mode: str,
|
mode: str,
|
||||||
extract_tables: str,
|
extract_tables: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
if parser_factory == "ZeroxPDFParser":
|
if parser_class == ZeroxPDFParser:
|
||||||
try:
|
try:
|
||||||
import pyzerox # noqa: F401
|
import pyzerox # noqa: F401
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -333,8 +326,6 @@ def test_parser_with_table(
|
|||||||
def _analyze_image(self, img: Image) -> str:
|
def _analyze_image(self, img: Image) -> str:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
parser_class = _map_parser[parser_factory]
|
|
||||||
|
|
||||||
parser = parser_class(
|
parser = parser_class(
|
||||||
mode=mode,
|
mode=mode,
|
||||||
extract_tables=extract_tables,
|
extract_tables=extract_tables,
|
||||||
|
@ -1,15 +1,19 @@
|
|||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Sequence, Union
|
from typing import Sequence, Type, Union
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import langchain_community.document_loaders as pdf_loaders
|
from langchain_community.document_loaders.pdf import (
|
||||||
from langchain_community.document_loaders import (
|
|
||||||
AmazonTextractPDFLoader,
|
AmazonTextractPDFLoader,
|
||||||
MathpixPDFLoader,
|
MathpixPDFLoader,
|
||||||
|
PDFMinerLoader,
|
||||||
PDFMinerPDFasHTMLLoader,
|
PDFMinerPDFasHTMLLoader,
|
||||||
|
PyMuPDFLoader,
|
||||||
|
PyPDFium2Loader,
|
||||||
|
PyPDFLoader,
|
||||||
UnstructuredPDFLoader,
|
UnstructuredPDFLoader,
|
||||||
|
ZeroxPDFLoader,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -164,25 +168,24 @@ def test_amazontextract_loader_failures() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"parser_factory,params",
|
"loader_class,params",
|
||||||
[
|
[
|
||||||
("PDFMinerLoader", {}),
|
(PDFMinerLoader, {}),
|
||||||
("PyMuPDFLoader", {}),
|
(PyMuPDFLoader, {}),
|
||||||
("PyPDFium2Loader", {}),
|
(PyPDFium2Loader, {}),
|
||||||
("PyPDFLoader", {}),
|
(PyPDFLoader, {}),
|
||||||
("ZeroxPDFLoader", {}),
|
(ZeroxPDFLoader, {}),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_standard_parameters(
|
def test_standard_parameters(
|
||||||
parser_factory: str,
|
loader_class: Type,
|
||||||
params: dict,
|
params: dict,
|
||||||
) -> None:
|
) -> None:
|
||||||
if parser_factory == "ZeroxPDFLoader":
|
if loader_class == ZeroxPDFLoader:
|
||||||
try:
|
try:
|
||||||
import pyzerox # noqa: F401
|
import pyzerox # noqa: F401
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pytest.skip("pyzerox is valid only with Python +3.11")
|
pytest.skip("pyzerox is valid only with Python +3.11")
|
||||||
loader_class = getattr(pdf_loaders, parser_factory)
|
|
||||||
|
|
||||||
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
|
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
|
||||||
loader = loader_class(file_path)
|
loader = loader_class(file_path)
|
||||||
|
@ -2,14 +2,17 @@
|
|||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Iterator
|
from typing import Any, Iterator, Type
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import langchain_community.document_loaders.parsers as pdf_parsers
|
|
||||||
from langchain_community.document_loaders.base import BaseBlobParser
|
from langchain_community.document_loaders.base import BaseBlobParser
|
||||||
from langchain_community.document_loaders.blob_loaders import Blob
|
from langchain_community.document_loaders.blob_loaders import Blob
|
||||||
from langchain_community.document_loaders.parsers.pdf import (
|
from langchain_community.document_loaders.parsers.pdf import (
|
||||||
|
PDFMinerParser,
|
||||||
|
PyMuPDFParser,
|
||||||
|
PyPDFium2Parser,
|
||||||
|
PyPDFParser,
|
||||||
_merge_text_and_extras,
|
_merge_text_and_extras,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -74,25 +77,23 @@ def _assert_with_parser(parser: BaseBlobParser, *, splits_by_page: bool = True)
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"parser_factory,require,params",
|
"parser_class,require,params",
|
||||||
[
|
[
|
||||||
("PDFMinerParser", "pdfminer", {"splits_by_page": False}),
|
(PDFMinerParser, "pdfminer", {"splits_by_page": False}),
|
||||||
("PDFPlumberParser", "pdfplumber", {}),
|
(PyMuPDFParser, "pymupdf", {}),
|
||||||
("PyMuPDFParser", "pymupdf", {}),
|
(PyPDFParser, "pypdf", {}),
|
||||||
("PyPDFParser", "pypdf", {}),
|
(PyPDFium2Parser, "pypdfium2", {}),
|
||||||
("PyPDFium2Parser", "pypdfium2", {}),
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_parsers(
|
def test_parsers(
|
||||||
parser_factory: str,
|
parser_class: Type,
|
||||||
require: str,
|
require: str,
|
||||||
params: dict[str, Any],
|
params: dict[str, Any],
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
require = require.replace("-", "")
|
require = require.replace("-", "")
|
||||||
importlib.import_module(require, package=None)
|
importlib.import_module(require, package=None)
|
||||||
parser_class = getattr(pdf_parsers, parser_factory)
|
|
||||||
parser = parser_class()
|
parser = parser_class()
|
||||||
_assert_with_parser(parser, **params)
|
_assert_with_parser(parser, **params)
|
||||||
except ModuleNotFoundError:
|
except ModuleNotFoundError:
|
||||||
pytest.skip(f"{parser_factory} skiped. Require '{require}'")
|
pytest.skip(f"{parser_class} skiped. Require '{require}'")
|
||||||
|
@ -200,7 +200,6 @@ EXPECTED_ALL = [
|
|||||||
"YoutubeAudioLoader",
|
"YoutubeAudioLoader",
|
||||||
"YoutubeLoader",
|
"YoutubeLoader",
|
||||||
"YuqueLoader",
|
"YuqueLoader",
|
||||||
"ZeroxPDFLoader",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user