diff --git a/.github/workflows/scheduled_test.yml b/.github/workflows/scheduled_test.yml index 1b6522385de..e67d3afff42 100644 --- a/.github/workflows/scheduled_test.yml +++ b/.github/workflows/scheduled_test.yml @@ -19,11 +19,11 @@ jobs: working-directory: - "libs/partners/openai" - "libs/partners/anthropic" - # - "libs/partners/ai21" # standard-tests broken + - "libs/partners/ai21" - "libs/partners/fireworks" - # - "libs/partners/groq" # rate-limited + - "libs/partners/groq" - "libs/partners/mistralai" - # - "libs/partners/together" # rate-limited + - "libs/partners/together" name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }} steps: - uses: actions/checkout@v4 diff --git a/docs/docs/integrations/llms/openai.ipynb b/docs/docs/integrations/llms/openai.ipynb index 458505c30ec..a368259fb80 100644 --- a/docs/docs/integrations/llms/openai.ipynb +++ b/docs/docs/integrations/llms/openai.ipynb @@ -59,21 +59,20 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, "id": "6fb585dd", "metadata": { "tags": [] }, "outputs": [], "source": [ - "from langchain.chains import LLMChain\n", "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "id": "035dea0f", "metadata": { "tags": [] @@ -89,7 +88,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "id": "3f3458d9", "metadata": { "tags": [] @@ -113,19 +112,19 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "id": "a641dbd9", "metadata": { "tags": [] }, "outputs": [], "source": [ - "llm_chain = LLMChain(prompt=prompt, llm=llm)" + "llm_chain = prompt | llm" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 5, "id": "9f844993", "metadata": { "tags": [] @@ -134,10 +133,10 @@ { "data": { "text/plain": [ - "' Justin Bieber was born in 1994, so the NFL team that won the Super Bowl in 1994 was the Dallas Cowboys.'" + "' Justin Bieber was born on March 1, 1994. The Super Bowl is typically played in late January or early February. So, we need to look at the Super Bowl from 1994. In 1994, the Super Bowl was Super Bowl XXVIII, played on January 30, 1994. The winning team of that Super Bowl was the Dallas Cowboys.'" ] }, - "execution_count": 7, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -145,7 +144,7 @@ "source": [ "question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n", "\n", - "llm_chain.run(question)" + "llm_chain.invoke(question)" ] }, { @@ -173,7 +172,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.11.1 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -187,7 +186,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.9.6" }, "vscode": { "interpreter": { diff --git a/docs/docs/integrations/llms/predibase.ipynb b/docs/docs/integrations/llms/predibase.ipynb index fabd36d75fb..fc5e43bd463 100644 --- a/docs/docs/integrations/llms/predibase.ipynb +++ b/docs/docs/integrations/llms/predibase.ipynb @@ -63,12 +63,13 @@ "source": [ "from langchain_community.llms import Predibase\n", "\n", - "# With a fine-tuned adapter hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version).\n", + "# With a fine-tuned adapter hosted at Predibase (adapter_version must be specified).\n", "model = Predibase(\n", " model=\"mistral-7b\",\n", + " predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n", + " predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n", " adapter_id=\"e2e_nlg\",\n", " adapter_version=1,\n", - " predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n", ")" ] }, @@ -83,8 +84,9 @@ "# With a fine-tuned adapter hosted at HuggingFace (adapter_version does not apply and will be ignored).\n", "model = Predibase(\n", " model=\"mistral-7b\",\n", - " adapter_id=\"predibase/e2e_nlg\",\n", " predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n", + " predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n", + " adapter_id=\"predibase/e2e_nlg\",\n", ")" ] }, @@ -122,7 +124,9 @@ "from langchain_community.llms import Predibase\n", "\n", "model = Predibase(\n", - " model=\"mistral-7b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n", + " model=\"mistral-7b\",\n", + " predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n", + " predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n", ")" ] }, @@ -136,12 +140,13 @@ }, "outputs": [], "source": [ - "# With a fine-tuned adapter hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version).\n", + "# With a fine-tuned adapter hosted at Predibase (adapter_version must be specified).\n", "model = Predibase(\n", " model=\"mistral-7b\",\n", + " predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n", + " predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n", " adapter_id=\"e2e_nlg\",\n", " adapter_version=1,\n", - " predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n", ")" ] }, @@ -154,8 +159,9 @@ "# With a fine-tuned adapter hosted at HuggingFace (adapter_version does not apply and will be ignored).\n", "llm = Predibase(\n", " model=\"mistral-7b\",\n", - " adapter_id=\"predibase/e2e_nlg\",\n", " predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n", + " predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n", + " adapter_id=\"predibase/e2e_nlg\",\n", ")" ] }, @@ -247,13 +253,14 @@ "\n", "model = Predibase(\n", " model=\"my-base-LLM\",\n", - " adapter_id=\"my-finetuned-adapter-id\", # Supports both, Predibase-hosted and HuggingFace-hosted model repositories.\n", - " # adapter_version=1, # optional (returns the latest, if omitted)\n", " predibase_api_key=os.environ.get(\n", " \"PREDIBASE_API_TOKEN\"\n", " ), # Adapter argument is optional.\n", + " predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n", + " adapter_id=\"my-finetuned-adapter-id\", # Supports both, Predibase-hosted and HuggingFace-hosted adapter repositories.\n", + " adapter_version=1, # required for Predibase-hosted adapters (ignored for HuggingFace-hosted adapters)\n", ")\n", - "# replace my-finetuned-LLM with the name of your model in Predibase" + "# replace my-base-LLM with the name of your choice of a serverless base model in Predibase" ] }, { diff --git a/docs/docs/integrations/memory/zep_memory.ipynb b/docs/docs/integrations/memory/zep_memory.ipynb index a9e7a29a9bd..623191e7c14 100644 --- a/docs/docs/integrations/memory/zep_memory.ipynb +++ b/docs/docs/integrations/memory/zep_memory.ipynb @@ -2,26 +2,21 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, "source": [ "# Zep\n", + "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", "\n", - "## Fast, Scalable Building Blocks for LLM Apps\n", - "Zep is an open source platform for productionizing LLM apps. Go from a prototype\n", - "built in LangChain or LlamaIndex, or a custom app, to production in minutes without\n", - "rewriting code.\n", + ">[Zep](http://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", + "> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n", + "> while also reducing hallucinations, latency, and cost.\n", "\n", - "Key Features:\n", + "> Interested in Zep Cloud? See [Zep Cloud Installation Guide](https://help.getzep.com/sdks) and [Zep Cloud Memory Example](https://help.getzep.com/langchain/examples/vectorstore-example)\n", "\n", - "- **Fast!** Zep operates independently of the your chat loop, ensuring a snappy user experience.\n", - "- **Chat History Memory, Archival, and Enrichment**, populate your prompts with relevant chat history, sumamries, named entities, intent data, and more.\n", - "- **Vector Search over Chat History and Documents** Automatic embedding of documents, chat histories, and summaries. Use Zep's similarity or native MMR Re-ranked search to find the most relevant.\n", - "- **Manage Users and their Chat Sessions** Users and their Chat Sessions are first-class citizens in Zep, allowing you to manage user interactions with your bots or agents easily.\n", - "- **Records Retention and Privacy Compliance** Comply with corporate and regulatory mandates for records retention while ensuring compliance with privacy regulations such as CCPA and GDPR. Fulfill *Right To Be Forgotten* requests with a single API call\n", - "\n", - "Zep project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n", - "Docs: [https://docs.getzep.com/](https://docs.getzep.com/)\n", + "## Open Source Installation and Setup\n", "\n", + "> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n", + ">\n", + "> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/)\n", "\n", "## Example\n", "\n", @@ -34,7 +29,10 @@ "2. Running an agent and having message automatically added to the store.\n", "3. Viewing the enriched messages.\n", "4. Vector search over the conversation history." - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", @@ -259,11 +257,11 @@ "text": [ "\n", "\n", - "\u001b[1m> Entering new chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3mThought: Do I need to use a tool? No\n", - "AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001b[0m\n", + "\u001B[1m> Entering new chain...\u001B[0m\n", + "\u001B[32;1m\u001B[1;3mThought: Do I need to use a tool? No\n", + "AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001B[0m\n", "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" + "\u001B[1m> Finished chain.\u001B[0m\n" ] }, { @@ -394,10 +392,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [], "source": [] diff --git a/docs/docs/integrations/platforms/index.mdx b/docs/docs/integrations/platforms/index.mdx index ed255195383..5e7040eba49 100644 --- a/docs/docs/integrations/platforms/index.mdx +++ b/docs/docs/integrations/platforms/index.mdx @@ -5,6 +5,13 @@ sidebar_class_name: hidden # Providers +:::info + +If you'd like to write your own integration, see [Extending LangChain](/docs/guides/development/extending_langchain/). +If you'd like to contribute an integration, see [Contributing integrations](/docs/contributing/integrations/). + +::: + LangChain integrates with many providers. ## Partner Packages diff --git a/docs/docs/integrations/providers/predibase.md b/docs/docs/integrations/providers/predibase.md index 5a88ff117f3..8f27b818f4a 100644 --- a/docs/docs/integrations/providers/predibase.md +++ b/docs/docs/integrations/providers/predibase.md @@ -17,7 +17,11 @@ os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}" from langchain_community.llms import Predibase -model = Predibase(model="mistral-7b"", predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN")) +model = Predibase( + model="mistral-7b", + predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"), + predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted) +) response = model("Can you recommend me a nice dry wine?") print(response) @@ -31,8 +35,14 @@ os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}" from langchain_community.llms import Predibase -# The fine-tuned adapter is hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version). -model = Predibase(model="mistral-7b"", adapter_id="e2e_nlg", adapter_version=1, predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN")) +# The fine-tuned adapter is hosted at Predibase (adapter_version must be specified). +model = Predibase( + model="mistral-7b", + predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"), + predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted) + adapter_id="e2e_nlg", + adapter_version=1, +) response = model("Can you recommend me a nice dry wine?") print(response) @@ -47,7 +57,12 @@ os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}" from langchain_community.llms import Predibase # The fine-tuned adapter is hosted at HuggingFace (adapter_version does not apply and will be ignored). -model = Predibase(model="mistral-7b"", adapter_id="predibase/e2e_nlg", predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN")) +model = Predibase( + model="mistral-7b", + predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"), + predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted) + adapter_id="predibase/e2e_nlg", +) response = model("Can you recommend me a nice dry wine?") print(response) diff --git a/docs/docs/integrations/providers/zep.mdx b/docs/docs/integrations/providers/zep.mdx index 28847fdb6ca..c86787769a6 100644 --- a/docs/docs/integrations/providers/zep.mdx +++ b/docs/docs/integrations/providers/zep.mdx @@ -1,23 +1,40 @@ # Zep +> Recall, understand, and extract data from chat histories. Power personalized AI experiences. ->[Zep](http://www.getzep.com) is an open source platform for productionizing LLM apps. Go from a prototype -built in LangChain or LlamaIndex, or a custom app, to production in minutes without -rewriting code. +>[Zep](http://www.getzep.com) is a long-term memory service for AI Assistant apps. +> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant, +> while also reducing hallucinations, latency, and cost. ->Key Features: +## How Zep works + +Zep persists and recalls chat histories, and automatically generates summaries and other artifacts from these chat histories. +It also embeds messages and summaries, enabling you to search Zep for relevant context from past conversations. +Zep does all of this asynchronously, ensuring these operations don't impact your user's chat experience. +Data is persisted to database, allowing you to scale out when growth demands. + +Zep also provides a simple, easy to use abstraction for document vector search called Document Collections. +This is designed to complement Zep's core memory features, but is not designed to be a general purpose vector database. + +Zep allows you to be more intentional about constructing your prompt: +- automatically adding a few recent messages, with the number customized for your app; +- a summary of recent conversations prior to the messages above; +- and/or contextually relevant summaries or messages surfaced from the entire chat session. +- and/or relevant Business data from Zep Document Collections. + +## What is Zep Cloud? +[Zep Cloud](http://www.getzep.com) is a managed service with Zep Open Source at its core. +In addition to Zep Open Source's memory management features, Zep Cloud offers: +- **Fact Extraction**: Automatically build fact tables from conversations, without having to define a data schema upfront. +- **Dialog Classification**: Instantly and accurately classify chat dialog. Understand user intent and emotion, segment users, and more. Route chains based on semantic context, and trigger events. +- **Structured Data Extraction**: Quickly extract business data from chat conversations using a schema you define. Understand what your Assistant should ask for next in order to complete its task. + +> Interested in Zep Cloud? See [Zep Cloud Installation Guide](https://help.getzep.com/sdks), [Zep Cloud Message History Example](https://help.getzep.com/langchain/examples/messagehistory-example), [Zep Cloud Vector Store Example](https://help.getzep.com/langchain/examples/vectorstore-example) + +## Open Source Installation and Setup + +> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep) > ->- **Fast!** Zep operates independently of the your chat loop, ensuring a snappy user experience. ->- **Chat History Memory, Archival, and Enrichment**, populate your prompts with relevant chat history, sumamries, named entities, intent data, and more. ->- **Vector Search over Chat History and Documents** Automatic embedding of documents, chat histories, and summaries. Use Zep's similarity or native MMR Re-ranked search to find the most relevant. ->- **Manage Users and their Chat Sessions** Users and their Chat Sessions are first-class citizens in Zep, allowing you to manage user interactions with your bots or agents easily. ->- **Records Retention and Privacy Compliance** Comply with corporate and regulatory mandates for records retention while ensuring compliance with privacy regulations such as CCPA and GDPR. Fulfill *Right To Be Forgotten* requests with a single API call - ->Zep project: [https://github.com/getzep/zep](https://github.com/getzep/zep) -> ->Docs: [https://docs.getzep.com/](https://docs.getzep.com/) - - -## Installation and Setup +> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/) 1. Install the Zep service. See the [Zep Quick Start Guide](https://docs.getzep.com/deployment/quickstart/). diff --git a/docs/docs/integrations/retrievers/zep_memorystore.ipynb b/docs/docs/integrations/retrievers/zep_memorystore.ipynb index 7d296a7b29e..aaa8a053fd1 100644 --- a/docs/docs/integrations/retrievers/zep_memorystore.ipynb +++ b/docs/docs/integrations/retrievers/zep_memorystore.ipynb @@ -1,29 +1,28 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", - "metadata": {}, "source": [ "# Zep\n", "## Retriever Example for [Zep](https://docs.getzep.com/)\n", "\n", - "### Fast, Scalable Building Blocks for LLM Apps\n", - "Zep is an open source platform for productionizing LLM apps. Go from a prototype\n", - "built in LangChain or LlamaIndex, or a custom app, to production in minutes without\n", - "rewriting code.\n", + "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", "\n", - "Key Features:\n", + "> [Zep](http://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", + "> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n", + "> while also reducing hallucinations, latency, and cost.\n", "\n", - "- **Fast!** Zep operates independently of the your chat loop, ensuring a snappy user experience.\n", - "- **Chat History Memory, Archival, and Enrichment**, populate your prompts with relevant chat history, sumamries, named entities, intent data, and more.\n", - "- **Vector Search over Chat History and Documents** Automatic embedding of documents, chat histories, and summaries. Use Zep's similarity or native MMR Re-ranked search to find the most relevant.\n", - "- **Manage Users and their Chat Sessions** Users and their Chat Sessions are first-class citizens in Zep, allowing you to manage user interactions with your bots or agents easily.\n", - "- **Records Retention and Privacy Compliance** Comply with corporate and regulatory mandates for records retention while ensuring compliance with privacy regulations such as CCPA and GDPR. Fulfill *Right To Be Forgotten* requests with a single API call\n", + "> Interested in Zep Cloud? See [Zep Cloud Installation Guide](https://help.getzep.com/sdks) and [Zep Cloud Retriever Example](https://help.getzep.com/langchain/examples/rag-message-history-example)\n", "\n", - "Zep project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n", - "Docs: [https://docs.getzep.com/](https://docs.getzep.com/)\n" - ] + "## Open Source Installation and Setup\n", + "\n", + "> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n", + ">\n", + "> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/)" + ], + "metadata": { + "collapsed": false + } }, { "attachments": {}, @@ -54,10 +53,7 @@ "end_time": "2023-08-11T20:31:12.231459Z", "start_time": "2023-08-11T20:31:11.211176Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [], "source": [ @@ -109,10 +105,7 @@ "end_time": "2023-08-11T20:31:12.342790Z", "start_time": "2023-08-11T20:31:12.235291Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [], "source": [ @@ -130,10 +123,7 @@ "end_time": "2023-08-11T20:31:14.455269Z", "start_time": "2023-08-11T20:31:12.345635Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [], "source": [ @@ -283,10 +273,7 @@ "end_time": "2023-08-11T20:31:14.758738Z", "start_time": "2023-08-11T20:31:14.458850Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -333,10 +320,7 @@ "end_time": "2023-08-11T20:31:14.922838Z", "start_time": "2023-08-11T20:31:14.751737Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -376,10 +360,7 @@ "end_time": "2023-08-11T20:31:14.923032Z", "start_time": "2023-08-11T20:31:14.918181Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { diff --git a/docs/docs/integrations/vectorstores/zep.ipynb b/docs/docs/integrations/vectorstores/zep.ipynb index 588bd352995..917fb52a6a7 100644 --- a/docs/docs/integrations/vectorstores/zep.ipynb +++ b/docs/docs/integrations/vectorstores/zep.ipynb @@ -2,37 +2,21 @@ "cells": [ { "cell_type": "markdown", - "id": "9eb8dfa6fdb71ef5", - "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } - }, "source": [ "# Zep\n", + "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", "\n", - ">[Zep](https://docs.getzep.com/) is an open-source platform for LLM apps. Go from a prototype\n", - ">built in LangChain or LlamaIndex, or a custom app, to production in minutes without rewriting code.\n", + ">[Zep](http://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", + "> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n", + "> while also reducing hallucinations, latency, and cost.\n", "\n", - "## Key Features:\n", + "> Interested in Zep Cloud? See [Zep Cloud Installation Guide](https://help.getzep.com/sdks) and [Zep Cloud Vector Store example](https://help.getzep.com/langchain/examples/messagehistory-example)\n", "\n", - "- **Fast!** `Zep` operates independently of your chat loop, ensuring a snappy user experience.\n", - "- **Chat History Memory, Archival, and Enrichment**, populate your prompts with relevant chat history, summaries, named entities, intent data, and more.\n", - "- **Vector Search over Chat History and Documents** Automatic embedding of documents, chat histories, and summaries. Use Zep's similarity or native MMR Re-ranked search to find the most relevant.\n", - "- **Manage Users and their Chat Sessions** Users and their Chat Sessions are first-class citizens in Zep, allowing you to manage user interactions with your bots or agents easily.\n", - "- **Records Retention and Privacy Compliance** Comply with corporate and regulatory mandates for records retention while ensuring compliance with privacy regulations such as CCPA and GDPR. Fulfill *Right To Be Forgotten* requests with a single API call\n", + "## Open Source Installation and Setup\n", "\n", - "**Note:** The `ZepVectorStore` works with `Documents` and is intended to be used as a `Retriever`.\n", - "It offers separate functionality to Zep's `ZepMemory` class, which is designed for persisting, enriching\n", - "and searching your user's chat history.\n", - "\n", - "## Installation\n", - "\n", - "Follow the [Zep Quickstart Guide](https://docs.getzep.com/deployment/quickstart/) to install and get started with Zep.\n", - "\n", - "You'll need your Zep API URL and optionally an API key to use the Zep VectorStore. \n", - "See the [Zep docs](https://docs.getzep.com) for more information.\n", + "> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n", + ">\n", + "> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/)\n", "\n", "## Usage\n", "\n", @@ -44,16 +28,17 @@ "- If you pass in an `Embeddings` instance Zep will use this to embed documents rather than auto-embed them.\n", "You must also set your document collection to `isAutoEmbedded === false`. \n", "- If you set your collection to `isAutoEmbedded === false`, you must pass in an `Embeddings` instance." - ] + ], + "metadata": { + "collapsed": false + }, + "id": "182f1b4f6cc28385" }, { "cell_type": "markdown", "id": "9a3a11aab1412d98", "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "source": [ "## Load or create a Collection from documents" @@ -68,10 +53,7 @@ "end_time": "2023-08-13T01:07:50.672390Z", "start_time": "2023-08-13T01:07:48.777799Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [], "source": [ @@ -125,10 +107,7 @@ "end_time": "2023-08-13T01:07:53.807663Z", "start_time": "2023-08-13T01:07:50.671241Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -174,10 +153,7 @@ "cell_type": "markdown", "id": "94ca9dfa7d0ecaa5", "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "source": [ "## Simarility Search Query over the Collection" @@ -192,10 +168,7 @@ "end_time": "2023-08-13T01:07:54.195988Z", "start_time": "2023-08-13T01:07:53.808550Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -228,8 +201,7 @@ "mechanical connexion of the solid members of the bodies of men and\n", "animals, but likewise the structure and operation of the softer parts,\n", "including the muscles, integuments, membranes, &c. the nature, motion, -> 0.8899750214770481 \n", - "====\n", - "\n" + "====\n" ] } ], @@ -247,10 +219,7 @@ "cell_type": "markdown", "id": "e02b61a9af0b2c80", "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "source": [ "## Search over Collection Re-ranked by MMR\n", @@ -267,10 +236,7 @@ "end_time": "2023-08-13T01:07:54.394873Z", "start_time": "2023-08-13T01:07:54.180901Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -303,8 +269,7 @@ "expedients adopted in this machinery;--although such is the perfection\n", "of their action, that in any ordinary case they would be regarded as\n", "having attained the ends in view with an almost superfluous degree of \n", - "====\n", - "\n" + "====\n" ] } ], @@ -320,10 +285,7 @@ "cell_type": "markdown", "id": "42455e31d4ab0d68", "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "source": [ "# Filter by Metadata\n", @@ -340,10 +302,7 @@ "end_time": "2023-08-13T01:08:06.323569Z", "start_time": "2023-08-13T01:07:54.381822Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -389,10 +348,7 @@ "cell_type": "markdown", "id": "5b225f3ae1e61de8", "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "source": [ "We see results from both books. Note the `source` metadata" @@ -407,10 +363,7 @@ "end_time": "2023-08-13T01:08:06.504769Z", "start_time": "2023-08-13T01:08:06.325435Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -442,8 +395,7 @@ "astronomer is interrupted in his pursuit, and diverted from his task of\n", "observation by the irksome labours of computation, or his diligence in\n", "observing becomes ineffectual for want of yet greater industry of -> {'source': 'https://www.gutenberg.org/cache/epub/71292/pg71292.txt'} \n", - "====\n", - "\n" + "====\n" ] } ], @@ -459,10 +411,7 @@ "cell_type": "markdown", "id": "7b81d7cae351a1ec", "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "source": [ "Now, we set up a filter" @@ -477,10 +426,7 @@ "end_time": "2023-08-13T01:08:06.672836Z", "start_time": "2023-08-13T01:08:06.505944Z" }, - "collapsed": false, - "jupyter": { - "outputs_hidden": false - } + "collapsed": false }, "outputs": [ { @@ -518,8 +464,7 @@ "shutter, heavily barred, was folded across it. It was a wonderfully\n", "silent house. There was an old clock ticking loudly somewhere in the\n", "passage, but otherwise everything was deadly still. A vague feeling of -> {'source': 'https://www.gutenberg.org/files/48320/48320-0.txt'} \n", - "====\n", - "\n" + "====\n" ] } ], diff --git a/docs/docs/modules/model_io/chat/structured_output.ipynb b/docs/docs/modules/model_io/chat/structured_output.ipynb index aa0c956ee10..e76f9926438 100644 --- a/docs/docs/modules/model_io/chat/structured_output.ipynb +++ b/docs/docs/modules/model_io/chat/structured_output.ipynb @@ -15,7 +15,7 @@ "id": "6e3f0f72", "metadata": {}, "source": [ - "# [beta] Structured Output\n", + "# Structured Output\n", "\n", "It is often crucial to have LLMs return structured output. This is because oftentimes the outputs of the LLMs are used in downstream applications, where specific arguments are required. Having the LLM return structured output reliably is necessary for that.\n", "\n", @@ -39,21 +39,14 @@ }, { "cell_type": "code", - "execution_count": 2, - "id": "08029f4e", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.pydantic_v1 import BaseModel, Field" - ] - }, - { - "cell_type": "code", - "execution_count": 3, + "execution_count": 1, "id": "070bf702", "metadata": {}, "outputs": [], "source": [ + "from langchain_core.pydantic_v1 import BaseModel, Field\n", + "\n", + "\n", "class Joke(BaseModel):\n", " setup: str = Field(description=\"The setup of the joke\")\n", " punchline: str = Field(description=\"The punchline to the joke\")" @@ -93,7 +86,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 3, "id": "6700994a", "metadata": {}, "outputs": [], @@ -104,17 +97,17 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 4, "id": "c55a61b8", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "Joke(setup='Why was the cat sitting on the computer?', punchline='It wanted to keep an eye on the mouse!')" + "Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!')" ] }, - "execution_count": 10, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -135,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 5, "id": "df0370e3", "metadata": {}, "outputs": [], @@ -145,17 +138,17 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 6, "id": "23844a26", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "Joke(setup=\"Why don't cats play poker in the jungle?\", punchline='Too many cheetahs!')" + "Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!')" ] }, - "execution_count": 14, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -180,7 +173,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 7, "id": "ad45fdd8", "metadata": {}, "outputs": [], @@ -252,7 +245,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "id": "649f9632", "metadata": {}, "outputs": [ @@ -262,7 +255,7 @@ "Joke(setup='Why did the dog sit in the shade?', punchline='To avoid getting burned.')" ] }, - "execution_count": 12, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -287,7 +280,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 12, "id": "bffd3fad", "metadata": {}, "outputs": [], @@ -297,7 +290,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "id": "c8bd7549", "metadata": {}, "outputs": [], @@ -308,10 +301,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "17b15816", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "Joke(setup=\"Why don't cats play poker in the jungle?\", punchline='Too many cheetahs!')" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "structured_llm.invoke(\"Tell me a joke about cats\")" ] @@ -328,7 +332,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 15, "id": "9b9617e3", "metadata": {}, "outputs": [], @@ -340,7 +344,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 16, "id": "90549664", "metadata": {}, "outputs": [], @@ -355,7 +359,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 17, "id": "01da39be", "metadata": {}, "outputs": [ @@ -365,7 +369,7 @@ "Joke(setup='Why did the cat sit on the computer?', punchline='To keep an eye on the mouse!')" ] }, - "execution_count": 25, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -388,7 +392,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 18, "id": "70511bc3", "metadata": {}, "outputs": [], @@ -408,19 +412,10 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 19, "id": "be9fdf04", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/reag/src/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change.\n", - " warn_beta(\n" - ] - } - ], + "outputs": [], "source": [ "model = ChatGroq()\n", "structured_llm = model.with_structured_output(Joke)" @@ -428,7 +423,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 20, "id": "e13f4676", "metadata": {}, "outputs": [ @@ -438,7 +433,7 @@ "Joke(setup=\"Why don't cats play poker in the jungle?\", punchline='Too many cheetahs!')" ] }, - "execution_count": 7, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -459,7 +454,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 21, "id": "86574fb8", "metadata": {}, "outputs": [], @@ -469,7 +464,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 22, "id": "01dced9c", "metadata": {}, "outputs": [ @@ -479,7 +474,7 @@ "Joke(setup=\"Why don't cats play poker in the jungle?\", punchline='Too many cheetahs!')" ] }, - "execution_count": 9, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -504,7 +499,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 23, "id": "12682237-6689-4408-88b1-3595feac447f", "metadata": {}, "outputs": [ @@ -514,7 +509,7 @@ "Joke(setup='What do you call a cat that loves to bowl?', punchline='An alley cat!')" ] }, - "execution_count": 5, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -541,17 +536,17 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 2, "id": "24421189-02bf-4589-a91a-197584c4a696", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "Joke(setup='A cat-ch', punchline='What do you call a cat that loves to play fetch?')" + "Joke(setup='Why did the scarecrow win an award?', punchline='Why did the scarecrow win an award? Because he was outstanding in his field.')" ] }, - "execution_count": 7, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -563,13 +558,21 @@ "structured_llm = llm.with_structured_output(Joke)\n", "structured_llm.invoke(\"Tell me a joke about cats\")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2630a2cb", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "poetry-venv-2", + "display_name": ".venv", "language": "python", - "name": "poetry-venv-2" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -581,7 +584,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/libs/community/langchain_community/chat_message_histories/file.py b/libs/community/langchain_community/chat_message_histories/file.py index 41dbd2afaad..e32f668efb5 100644 --- a/libs/community/langchain_community/chat_message_histories/file.py +++ b/libs/community/langchain_community/chat_message_histories/file.py @@ -1,5 +1,40 @@ -from langchain_core.chat_history import FileChatMessageHistory +import json +from pathlib import Path +from typing import List -__all__ = [ - "FileChatMessageHistory", -] +from langchain_core.chat_history import ( + BaseChatMessageHistory, +) +from langchain_core.messages import BaseMessage, messages_from_dict, messages_to_dict + + +class FileChatMessageHistory(BaseChatMessageHistory): + """Chat message history that stores history in a local file.""" + + def __init__(self, file_path: str) -> None: + """Initialize the file path for the chat history. + + Args: + file_path: The path to the local file to store the chat history. + """ + self.file_path = Path(file_path) + if not self.file_path.exists(): + self.file_path.touch() + self.file_path.write_text(json.dumps([])) + + @property + def messages(self) -> List[BaseMessage]: # type: ignore + """Retrieve the messages from the local file""" + items = json.loads(self.file_path.read_text()) + messages = messages_from_dict(items) + return messages + + def add_message(self, message: BaseMessage) -> None: + """Append the message to the record in the local file""" + messages = messages_to_dict(self.messages) + messages.append(messages_to_dict([message])[0]) + self.file_path.write_text(json.dumps(messages)) + + def clear(self) -> None: + """Clear session memory from the local file""" + self.file_path.write_text(json.dumps([])) diff --git a/libs/community/langchain_community/chat_models/bedrock.py b/libs/community/langchain_community/chat_models/bedrock.py index 74eb1af216e..852eb0091f6 100644 --- a/libs/community/langchain_community/chat_models/bedrock.py +++ b/libs/community/langchain_community/chat_models/bedrock.py @@ -193,7 +193,13 @@ class ChatPromptAdapter: ) -_message_type_lookups = {"human": "user", "ai": "assistant"} +_message_type_lookups = { + "human": "user", + "ai": "assistant", + "AIMessageChunk": "assistant", + "HumanMessageChunk": "user", + "function": "user", +} @deprecated( diff --git a/libs/community/langchain_community/chat_models/perplexity.py b/libs/community/langchain_community/chat_models/perplexity.py index 23d7c9e09a6..110f4518e3e 100644 --- a/libs/community/langchain_community/chat_models/perplexity.py +++ b/libs/community/langchain_community/chat_models/perplexity.py @@ -1,4 +1,5 @@ """Wrapper around Perplexity APIs.""" + from __future__ import annotations import logging @@ -63,10 +64,12 @@ class ChatPerplexity(BaseChatModel): """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" - pplx_api_key: Optional[str] = None + pplx_api_key: Optional[str] = Field(None, alias="api_key") """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" - request_timeout: Optional[Union[float, Tuple[float, float]]] = None + request_timeout: Optional[Union[float, Tuple[float, float]]] = Field( + None, alias="timeout" + ) """Timeout for requests to PerplexityChat completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" diff --git a/libs/community/langchain_community/llms/predibase.py b/libs/community/langchain_community/llms/predibase.py index e3f5da7fd9e..b45ff1d1ea9 100644 --- a/libs/community/langchain_community/llms/predibase.py +++ b/libs/community/langchain_community/llms/predibase.py @@ -1,3 +1,4 @@ +import os from typing import Any, Dict, List, Mapping, Optional, Union from langchain_core.callbacks import CallbackManagerForLLMRun @@ -17,13 +18,15 @@ class Predibase(LLM): An optional `adapter_id` parameter is the Predibase ID or HuggingFace ID of a fine-tuned LLM adapter, whose base model is the `model` parameter; the fine-tuned adapter must be compatible with its base model; - otherwise, an error is raised. If a Predibase ID references the - fine-tuned adapter, then the `adapter_version` in the adapter repository can - be optionally specified; omitting it defaults to the most recent version. + otherwise, an error is raised. If the fine-tuned adapter is hosted at Predibase, + then `adapter_version` in the adapter repository must be specified. + + An optional `predibase_sdk_version` parameter defaults to latest SDK version. """ model: str predibase_api_key: SecretStr + predibase_sdk_version: Optional[str] = None adapter_id: Optional[str] = None adapter_version: Optional[int] = None model_kwargs: Dict[str, Any] = Field(default_factory=dict) @@ -46,65 +49,139 @@ class Predibase(LLM): run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: - try: - from predibase import PredibaseClient - from predibase.pql import get_session - from predibase.pql.api import ( - ServerResponseError, - Session, - ) - from predibase.resource.llm.interface import ( - HuggingFaceLLM, - LLMDeployment, - ) - from predibase.resource.llm.response import GeneratedResponse - from predibase.resource.model import Model - - session: Session = get_session( - token=self.predibase_api_key.get_secret_value(), - gateway="https://api.app.predibase.com/v1", - serving_endpoint="serving.app.predibase.com", - ) - pc: PredibaseClient = PredibaseClient(session=session) - except ImportError as e: - raise ImportError( - "Could not import Predibase Python package. " - "Please install it with `pip install predibase`." - ) from e - except ValueError as e: - raise ValueError("Your API key is not correct. Please try again") from e options: Dict[str, Union[str, float]] = ( self.model_kwargs or self.default_options_for_generation ) - base_llm_deployment: LLMDeployment = pc.LLM( - uri=f"pb://deployments/{self.model}" + if self._is_deprecated_sdk_version(): + try: + from predibase import PredibaseClient + from predibase.pql import get_session + from predibase.pql.api import ( + ServerResponseError, + Session, + ) + from predibase.resource.llm.interface import ( + HuggingFaceLLM, + LLMDeployment, + ) + from predibase.resource.llm.response import GeneratedResponse + from predibase.resource.model import Model + + session: Session = get_session( + token=self.predibase_api_key.get_secret_value(), + gateway="https://api.app.predibase.com/v1", + serving_endpoint="serving.app.predibase.com", + ) + pc: PredibaseClient = PredibaseClient(session=session) + except ImportError as e: + raise ImportError( + "Could not import Predibase Python package. " + "Please install it with `pip install predibase`." + ) from e + except ValueError as e: + raise ValueError("Your API key is not correct. Please try again") from e + + base_llm_deployment: LLMDeployment = pc.LLM( + uri=f"pb://deployments/{self.model}" + ) + result: GeneratedResponse + if self.adapter_id: + """ + Attempt to retrieve the fine-tuned adapter from a Predibase + repository. If absent, then load the fine-tuned adapter + from a HuggingFace repository. + """ + adapter_model: Union[Model, HuggingFaceLLM] + try: + adapter_model = pc.get_model( + name=self.adapter_id, + version=self.adapter_version, + model_id=None, + ) + except ServerResponseError: + # Predibase does not recognize the adapter ID (query HuggingFace). + adapter_model = pc.LLM(uri=f"hf://{self.adapter_id}") + result = base_llm_deployment.with_adapter(model=adapter_model).generate( + prompt=prompt, + options=options, + ) + else: + result = base_llm_deployment.generate( + prompt=prompt, + options=options, + ) + return result.response + + from predibase import Predibase + + os.environ["PREDIBASE_GATEWAY"] = "https://api.app.predibase.com" + predibase: Predibase = Predibase( + api_token=self.predibase_api_key.get_secret_value() ) - result: GeneratedResponse + + import requests + from lorax.client import Client as LoraxClient + from lorax.errors import GenerationError + from lorax.types import Response + + lorax_client: LoraxClient = predibase.deployments.client( + deployment_ref=self.model + ) + + response: Response if self.adapter_id: """ Attempt to retrieve the fine-tuned adapter from a Predibase repository. If absent, then load the fine-tuned adapter from a HuggingFace repository. """ - adapter_model: Union[Model, HuggingFaceLLM] - try: - adapter_model = pc.get_model( - name=self.adapter_id, - version=self.adapter_version, - model_id=None, - ) - except ServerResponseError: - # Predibase does not recognize the adapter ID (query HuggingFace). - adapter_model = pc.LLM(uri=f"hf://{self.adapter_id}") - result = base_llm_deployment.with_adapter(model=adapter_model).generate( - prompt=prompt, - options=options, - ) + if self.adapter_version: + # Since the adapter version is provided, query the Predibase repository. + pb_adapter_id: str = f"{self.adapter_id}/{self.adapter_version}" + try: + response = lorax_client.generate( + prompt=prompt, + adapter_id=pb_adapter_id, + **options, + ) + except GenerationError as ge: + raise ValueError( + f"""An adapter with the ID "{pb_adapter_id}" cannot be \ +found in the Predibase repository of fine-tuned adapters.""" + ) from ge + else: + # The adapter version is omitted, + # hence look for the adapter ID in the HuggingFace repository. + try: + response = lorax_client.generate( + prompt=prompt, + adapter_id=self.adapter_id, + adapter_source="hub", + **options, + ) + except GenerationError as ge: + raise ValueError( + f"""Either an adapter with the ID "{self.adapter_id}" \ +cannot be found in a HuggingFace repository, or it is incompatible with the \ +base model (please make sure that the adapter configuration is consistent). +""" + ) from ge else: - result = base_llm_deployment.generate( - prompt=prompt, - options=options, - ) - return result.response + try: + response = lorax_client.generate( + prompt=prompt, + **options, + ) + except requests.JSONDecodeError as jde: + raise ValueError( + f"""An LLM with the deployment ID "{self.model}" cannot be found \ +at Predibase (please refer to \ +"https://docs.predibase.com/user-guide/inference/models" for the list of \ +supported models). +""" + ) from jde + response_text = response.generated_text + + return response_text @property def _identifying_params(self) -> Mapping[str, Any]: @@ -112,3 +189,26 @@ class Predibase(LLM): return { **{"model_kwargs": self.model_kwargs}, } + + def _is_deprecated_sdk_version(self) -> bool: + try: + import semantic_version + from predibase.version import __version__ as current_version + from semantic_version.base import Version + + sdk_semver_deprecated: Version = semantic_version.Version( + version_string="2024.4.8" + ) + actual_current_version: str = self.predibase_sdk_version or current_version + sdk_semver_current: Version = semantic_version.Version( + version_string=actual_current_version + ) + return not ( + (sdk_semver_current > sdk_semver_deprecated) + or ("+dev" in actual_current_version) + ) + except ImportError as e: + raise ImportError( + "Could not import Predibase Python package. " + "Please install it with `pip install semantic_version predibase`." + ) from e diff --git a/libs/community/langchain_community/vectorstores/azuresearch.py b/libs/community/langchain_community/vectorstores/azuresearch.py index b07628d5a7f..906bb9d00d8 100644 --- a/libs/community/langchain_community/vectorstores/azuresearch.py +++ b/libs/community/langchain_community/vectorstores/azuresearch.py @@ -479,7 +479,7 @@ class AzureSearch(VectorStore): def hybrid_search_with_score( self, query: str, k: int = 4, filters: Optional[str] = None ) -> List[Tuple[Document, float]]: - """Return docs most similar to query with an hybrid query. + """Return docs most similar to query with a hybrid query. Args: query: Text to look up documents similar to. @@ -558,7 +558,7 @@ class AzureSearch(VectorStore): def semantic_hybrid_search_with_score_and_rerank( self, query: str, k: int = 4, filters: Optional[str] = None ) -> List[Tuple[Document, float, float]]: - """Return docs most similar to query with an hybrid query. + """Return docs most similar to query with a hybrid query. Args: query: Text to look up documents similar to. @@ -702,7 +702,12 @@ class AzureSearchVectorStoreRetriever(BaseRetriever): if "search_type" in values: search_type = values["search_type"] if search_type not in ( - allowed_search_types := ("similarity", "hybrid", "semantic_hybrid") + allowed_search_types := ( + "similarity", + "similarity_score_threshold", + "hybrid", + "semantic_hybrid", + ) ): raise ValueError( f"search_type of {search_type} not allowed. Valid values are: " @@ -718,6 +723,13 @@ class AzureSearchVectorStoreRetriever(BaseRetriever): ) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.vector_search(query, k=self.k, **kwargs) + elif self.search_type == "similarity_score_threshold": + docs = [ + doc + for doc, _ in self.vectorstore.similarity_search_with_relevance_scores( + query, k=self.k, **kwargs + ) + ] elif self.search_type == "hybrid": docs = self.vectorstore.hybrid_search(query, k=self.k, **kwargs) elif self.search_type == "semantic_hybrid": diff --git a/libs/core/tests/unit_tests/chat_history/test_file_chat_message_history.py b/libs/community/tests/unit_tests/chat_message_histories/test_file_chat_message_history.py similarity index 97% rename from libs/core/tests/unit_tests/chat_history/test_file_chat_message_history.py rename to libs/community/tests/unit_tests/chat_message_histories/test_file_chat_message_history.py index 4c292c61e5a..f069ff24935 100644 --- a/libs/core/tests/unit_tests/chat_history/test_file_chat_message_history.py +++ b/libs/community/tests/unit_tests/chat_message_histories/test_file_chat_message_history.py @@ -3,10 +3,10 @@ from pathlib import Path from typing import Generator import pytest - -from langchain_core.chat_history import FileChatMessageHistory from langchain_core.messages import AIMessage, HumanMessage +from langchain_community.chat_message_histories import FileChatMessageHistory + @pytest.fixture def file_chat_message_history() -> Generator[FileChatMessageHistory, None, None]: diff --git a/libs/community/tests/unit_tests/chat_models/test_perplexity.py b/libs/community/tests/unit_tests/chat_models/test_perplexity.py index 071f1340dd5..1ae6cd8b5fd 100644 --- a/libs/community/tests/unit_tests/chat_models/test_perplexity.py +++ b/libs/community/tests/unit_tests/chat_models/test_perplexity.py @@ -1,4 +1,5 @@ """Test Perplexity Chat API wrapper.""" + import os import pytest @@ -25,6 +26,17 @@ def test_perplexity_initialization() -> None: """Test perplexity initialization.""" # Verify that chat perplexity can be initialized using a secret key provided # as a parameter rather than an environment variable. - ChatPerplexity( - model="test", perplexity_api_key="test", temperature=0.7, verbose=True - ) + for model in [ + ChatPerplexity( + model="test", timeout=1, api_key="test", temperature=0.7, verbose=True + ), + ChatPerplexity( + model="test", + request_timeout=1, + pplx_api_key="test", + temperature=0.7, + verbose=True, + ), + ]: + assert model.request_timeout == 1 + assert model.pplx_api_key == "test" diff --git a/libs/community/tests/unit_tests/llms/test_predibase.py b/libs/community/tests/unit_tests/llms/test_predibase.py index 9a9fba7f0ef..e2b2dc128d9 100644 --- a/libs/community/tests/unit_tests/llms/test_predibase.py +++ b/libs/community/tests/unit_tests/llms/test_predibase.py @@ -19,6 +19,22 @@ def test_api_key_masked_when_passed_via_constructor( assert captured.out == "**********" +def test_specifying_predibase_sdk_version_argument() -> None: + llm = Predibase( + model="my_llm", + predibase_api_key="secret-api-key", + ) + assert not llm.predibase_sdk_version + + legacy_predibase_sdk_version = "2024.4.8" + llm = Predibase( + model="my_llm", + predibase_api_key="secret-api-key", + predibase_sdk_version=legacy_predibase_sdk_version, + ) + assert llm.predibase_sdk_version == legacy_predibase_sdk_version + + def test_specifying_adapter_id_argument() -> None: llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") assert not llm.adapter_id @@ -33,8 +49,8 @@ def test_specifying_adapter_id_argument() -> None: llm = Predibase( model="my_llm", - adapter_id="my-other-hf-adapter", predibase_api_key="secret-api-key", + adapter_id="my-other-hf-adapter", ) assert llm.adapter_id == "my-other-hf-adapter" assert llm.adapter_version is None @@ -55,9 +71,9 @@ def test_specifying_adapter_id_and_adapter_version_arguments() -> None: llm = Predibase( model="my_llm", + predibase_api_key="secret-api-key", adapter_id="my-other-hf-adapter", adapter_version=3, - predibase_api_key="secret-api-key", ) assert llm.adapter_id == "my-other-hf-adapter" assert llm.adapter_version == 3 diff --git a/libs/core/langchain_core/chat_history.py b/libs/core/langchain_core/chat_history.py index 4388b373305..da14d070657 100644 --- a/libs/core/langchain_core/chat_history.py +++ b/libs/core/langchain_core/chat_history.py @@ -16,9 +16,7 @@ """ # noqa: E501 from __future__ import annotations -import json from abc import ABC, abstractmethod -from pathlib import Path from typing import List, Sequence, Union from langchain_core.messages import ( @@ -26,8 +24,6 @@ from langchain_core.messages import ( BaseMessage, HumanMessage, get_buffer_string, - messages_from_dict, - messages_to_dict, ) from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import run_in_executor @@ -215,35 +211,3 @@ class InMemoryChatMessageHistory(BaseChatMessageHistory, BaseModel): async def aclear(self) -> None: self.clear() - - -class FileChatMessageHistory(BaseChatMessageHistory): - """Chat message history that stores history in a local file.""" - - def __init__(self, file_path: str) -> None: - """Initialize the file path for the chat history. - - Args: - file_path: The path to the local file to store the chat history. - """ - self.file_path = Path(file_path) - if not self.file_path.exists(): - self.file_path.touch() - self.file_path.write_text(json.dumps([])) - - @property - def messages(self) -> List[BaseMessage]: # type: ignore - """Retrieve the messages from the local file""" - items = json.loads(self.file_path.read_text()) - messages = messages_from_dict(items) - return messages - - def add_message(self, message: BaseMessage) -> None: - """Append the message to the record in the local file""" - messages = messages_to_dict(self.messages) - messages.append(messages_to_dict([message])[0]) - self.file_path.write_text(json.dumps(messages)) - - def clear(self) -> None: - """Clear session memory from the local file""" - self.file_path.write_text(json.dumps([])) diff --git a/libs/core/langchain_core/indexing/__init__.py b/libs/core/langchain_core/indexing/__init__.py new file mode 100644 index 00000000000..d67ec5eec0c --- /dev/null +++ b/libs/core/langchain_core/indexing/__init__.py @@ -0,0 +1,15 @@ +"""Code to help indexing data into a vectorstore. + +This package contains helper logic to help deal with indexing data into +a vectorstore while avoiding duplicated content and over-writing content +if it's unchanged. +""" +from langchain_core.indexing.api import IndexingResult, aindex, index +from langchain_core.indexing.base import RecordManager + +__all__ = [ + "aindex", + "index", + "IndexingResult", + "RecordManager", +] diff --git a/libs/core/langchain_core/indexing/api.py b/libs/core/langchain_core/indexing/api.py new file mode 100644 index 00000000000..01bc9bffb2b --- /dev/null +++ b/libs/core/langchain_core/indexing/api.py @@ -0,0 +1,606 @@ +"""Module contains logic for indexing documents into vector stores.""" +from __future__ import annotations + +import hashlib +import json +import uuid +from itertools import islice +from typing import ( + Any, + AsyncIterable, + AsyncIterator, + Callable, + Dict, + Iterable, + Iterator, + List, + Literal, + Optional, + Sequence, + Set, + TypedDict, + TypeVar, + Union, + cast, +) + +from langchain_core.document_loaders.base import BaseLoader +from langchain_core.documents import Document +from langchain_core.indexing.base import RecordManager +from langchain_core.pydantic_v1 import root_validator +from langchain_core.vectorstores import VectorStore + +# Magic UUID to use as a namespace for hashing. +# Used to try and generate a unique UUID for each document +# from hashing the document content and metadata. +NAMESPACE_UUID = uuid.UUID(int=1984) + + +T = TypeVar("T") + + +def _hash_string_to_uuid(input_string: str) -> uuid.UUID: + """Hashes a string and returns the corresponding UUID.""" + hash_value = hashlib.sha1(input_string.encode("utf-8")).hexdigest() + return uuid.uuid5(NAMESPACE_UUID, hash_value) + + +def _hash_nested_dict_to_uuid(data: dict[Any, Any]) -> uuid.UUID: + """Hashes a nested dictionary and returns the corresponding UUID.""" + serialized_data = json.dumps(data, sort_keys=True) + hash_value = hashlib.sha1(serialized_data.encode("utf-8")).hexdigest() + return uuid.uuid5(NAMESPACE_UUID, hash_value) + + +class _HashedDocument(Document): + """A hashed document with a unique ID.""" + + uid: str + hash_: str + """The hash of the document including content and metadata.""" + content_hash: str + """The hash of the document content.""" + metadata_hash: str + """The hash of the document metadata.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @root_validator(pre=True) + def calculate_hashes(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Root validator to calculate content and metadata hash.""" + content = values.get("page_content", "") + metadata = values.get("metadata", {}) + + forbidden_keys = ("hash_", "content_hash", "metadata_hash") + + for key in forbidden_keys: + if key in metadata: + raise ValueError( + f"Metadata cannot contain key {key} as it " + f"is reserved for internal use." + ) + + content_hash = str(_hash_string_to_uuid(content)) + + try: + metadata_hash = str(_hash_nested_dict_to_uuid(metadata)) + except Exception as e: + raise ValueError( + f"Failed to hash metadata: {e}. " + f"Please use a dict that can be serialized using json." + ) + + values["content_hash"] = content_hash + values["metadata_hash"] = metadata_hash + values["hash_"] = str(_hash_string_to_uuid(content_hash + metadata_hash)) + + _uid = values.get("uid", None) + + if _uid is None: + values["uid"] = values["hash_"] + return values + + def to_document(self) -> Document: + """Return a Document object.""" + return Document( + page_content=self.page_content, + metadata=self.metadata, + ) + + @classmethod + def from_document( + cls, document: Document, *, uid: Optional[str] = None + ) -> _HashedDocument: + """Create a HashedDocument from a Document.""" + return cls( # type: ignore[call-arg] + uid=uid, # type: ignore[arg-type] + page_content=document.page_content, + metadata=document.metadata, + ) + + +def _batch(size: int, iterable: Iterable[T]) -> Iterator[List[T]]: + """Utility batching function.""" + it = iter(iterable) + while True: + chunk = list(islice(it, size)) + if not chunk: + return + yield chunk + + +async def _abatch(size: int, iterable: AsyncIterable[T]) -> AsyncIterator[List[T]]: + """Utility batching function.""" + batch: List[T] = [] + async for element in iterable: + if len(batch) < size: + batch.append(element) + + if len(batch) >= size: + yield batch + batch = [] + + if batch: + yield batch + + +def _get_source_id_assigner( + source_id_key: Union[str, Callable[[Document], str], None], +) -> Callable[[Document], Union[str, None]]: + """Get the source id from the document.""" + if source_id_key is None: + return lambda doc: None + elif isinstance(source_id_key, str): + return lambda doc: doc.metadata[source_id_key] + elif callable(source_id_key): + return source_id_key + else: + raise ValueError( + f"source_id_key should be either None, a string or a callable. " + f"Got {source_id_key} of type {type(source_id_key)}." + ) + + +def _deduplicate_in_order( + hashed_documents: Iterable[_HashedDocument], +) -> Iterator[_HashedDocument]: + """Deduplicate a list of hashed documents while preserving order.""" + seen: Set[str] = set() + + for hashed_doc in hashed_documents: + if hashed_doc.hash_ not in seen: + seen.add(hashed_doc.hash_) + yield hashed_doc + + +# PUBLIC API + + +class IndexingResult(TypedDict): + """Return a detailed a breakdown of the result of the indexing operation.""" + + num_added: int + """Number of added documents.""" + num_updated: int + """Number of updated documents because they were not up to date.""" + num_deleted: int + """Number of deleted documents.""" + num_skipped: int + """Number of skipped documents because they were already up to date.""" + + +def index( + docs_source: Union[BaseLoader, Iterable[Document]], + record_manager: RecordManager, + vector_store: VectorStore, + *, + batch_size: int = 100, + cleanup: Literal["incremental", "full", None] = None, + source_id_key: Union[str, Callable[[Document], str], None] = None, + cleanup_batch_size: int = 1_000, + force_update: bool = False, +) -> IndexingResult: + """Index data from the loader into the vector store. + + Indexing functionality uses a manager to keep track of which documents + are in the vector store. + + This allows us to keep track of which documents were updated, and which + documents were deleted, which documents should be skipped. + + For the time being, documents are indexed using their hashes, and users + are not able to specify the uid of the document. + + IMPORTANT: + if auto_cleanup is set to True, the loader should be returning + the entire dataset, and not just a subset of the dataset. + Otherwise, the auto_cleanup will remove documents that it is not + supposed to. + + Args: + docs_source: Data loader or iterable of documents to index. + record_manager: Timestamped set to keep track of which documents were + updated. + vector_store: Vector store to index the documents into. + batch_size: Batch size to use when indexing. + cleanup: How to handle clean up of documents. + - Incremental: Cleans up all documents that haven't been updated AND + that are associated with source ids that were seen + during indexing. + Clean up is done continuously during indexing helping + to minimize the probability of users seeing duplicated + content. + - Full: Delete all documents that have not been returned by the loader + during this run of indexing. + Clean up runs after all documents have been indexed. + This means that users may see duplicated content during indexing. + - None: Do not delete any documents. + source_id_key: Optional key that helps identify the original source + of the document. + cleanup_batch_size: Batch size to use when cleaning up documents. + force_update: Force update documents even if they are present in the + record manager. Useful if you are re-indexing with updated embeddings. + + Returns: + Indexing result which contains information about how many documents + were added, updated, deleted, or skipped. + """ + if cleanup not in {"incremental", "full", None}: + raise ValueError( + f"cleanup should be one of 'incremental', 'full' or None. " + f"Got {cleanup}." + ) + + if cleanup == "incremental" and source_id_key is None: + raise ValueError("Source id key is required when cleanup mode is incremental.") + + # Check that the Vectorstore has required methods implemented + methods = ["delete", "add_documents"] + + for method in methods: + if not hasattr(vector_store, method): + raise ValueError( + f"Vectorstore {vector_store} does not have required method {method}" + ) + + if type(vector_store).delete == VectorStore.delete: + # Checking if the vectorstore has overridden the default delete method + # implementation which just raises a NotImplementedError + raise ValueError("Vectorstore has not implemented the delete method") + + if isinstance(docs_source, BaseLoader): + try: + doc_iterator = docs_source.lazy_load() + except NotImplementedError: + doc_iterator = iter(docs_source.load()) + else: + doc_iterator = iter(docs_source) + + source_id_assigner = _get_source_id_assigner(source_id_key) + + # Mark when the update started. + index_start_dt = record_manager.get_time() + num_added = 0 + num_skipped = 0 + num_updated = 0 + num_deleted = 0 + + for doc_batch in _batch(batch_size, doc_iterator): + hashed_docs = list( + _deduplicate_in_order( + [_HashedDocument.from_document(doc) for doc in doc_batch] + ) + ) + + source_ids: Sequence[Optional[str]] = [ + source_id_assigner(doc) for doc in hashed_docs + ] + + if cleanup == "incremental": + # If the cleanup mode is incremental, source ids are required. + for source_id, hashed_doc in zip(source_ids, hashed_docs): + if source_id is None: + raise ValueError( + "Source ids are required when cleanup mode is incremental. " + f"Document that starts with " + f"content: {hashed_doc.page_content[:100]} was not assigned " + f"as source id." + ) + # source ids cannot be None after for loop above. + source_ids = cast(Sequence[str], source_ids) # type: ignore[assignment] + + exists_batch = record_manager.exists([doc.uid for doc in hashed_docs]) + + # Filter out documents that already exist in the record store. + uids = [] + docs_to_index = [] + uids_to_refresh = [] + seen_docs: Set[str] = set() + for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): + if doc_exists: + if force_update: + seen_docs.add(hashed_doc.uid) + else: + uids_to_refresh.append(hashed_doc.uid) + continue + uids.append(hashed_doc.uid) + docs_to_index.append(hashed_doc.to_document()) + + # Update refresh timestamp + if uids_to_refresh: + record_manager.update(uids_to_refresh, time_at_least=index_start_dt) + num_skipped += len(uids_to_refresh) + + # Be pessimistic and assume that all vector store write will fail. + # First write to vector store + if docs_to_index: + vector_store.add_documents(docs_to_index, ids=uids, batch_size=batch_size) + num_added += len(docs_to_index) - len(seen_docs) + num_updated += len(seen_docs) + + # And only then update the record store. + # Update ALL records, even if they already exist since we want to refresh + # their timestamp. + record_manager.update( + [doc.uid for doc in hashed_docs], + group_ids=source_ids, + time_at_least=index_start_dt, + ) + + # If source IDs are provided, we can do the deletion incrementally! + if cleanup == "incremental": + # Get the uids of the documents that were not returned by the loader. + + # mypy isn't good enough to determine that source ids cannot be None + # here due to a check that's happening above, so we check again. + for source_id in source_ids: + if source_id is None: + raise AssertionError("Source ids cannot be None here.") + + _source_ids = cast(Sequence[str], source_ids) + + uids_to_delete = record_manager.list_keys( + group_ids=_source_ids, before=index_start_dt + ) + if uids_to_delete: + # Then delete from vector store. + vector_store.delete(uids_to_delete) + # First delete from record store. + record_manager.delete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + if cleanup == "full": + while uids_to_delete := record_manager.list_keys( + before=index_start_dt, limit=cleanup_batch_size + ): + # First delete from record store. + vector_store.delete(uids_to_delete) + # Then delete from record manager. + record_manager.delete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + return { + "num_added": num_added, + "num_updated": num_updated, + "num_skipped": num_skipped, + "num_deleted": num_deleted, + } + + +# Define an asynchronous generator function +async def _to_async_iterator(iterator: Iterable[T]) -> AsyncIterator[T]: + """Convert an iterable to an async iterator.""" + for item in iterator: + yield item + + +async def aindex( + docs_source: Union[BaseLoader, Iterable[Document], AsyncIterator[Document]], + record_manager: RecordManager, + vector_store: VectorStore, + *, + batch_size: int = 100, + cleanup: Literal["incremental", "full", None] = None, + source_id_key: Union[str, Callable[[Document], str], None] = None, + cleanup_batch_size: int = 1_000, + force_update: bool = False, +) -> IndexingResult: + """Index data from the loader into the vector store. + + Indexing functionality uses a manager to keep track of which documents + are in the vector store. + + This allows us to keep track of which documents were updated, and which + documents were deleted, which documents should be skipped. + + For the time being, documents are indexed using their hashes, and users + are not able to specify the uid of the document. + + IMPORTANT: + if auto_cleanup is set to True, the loader should be returning + the entire dataset, and not just a subset of the dataset. + Otherwise, the auto_cleanup will remove documents that it is not + supposed to. + + Args: + docs_source: Data loader or iterable of documents to index. + record_manager: Timestamped set to keep track of which documents were + updated. + vector_store: Vector store to index the documents into. + batch_size: Batch size to use when indexing. + cleanup: How to handle clean up of documents. + - Incremental: Cleans up all documents that haven't been updated AND + that are associated with source ids that were seen + during indexing. + Clean up is done continuously during indexing helping + to minimize the probability of users seeing duplicated + content. + - Full: Delete all documents that haven to been returned by the loader. + Clean up runs after all documents have been indexed. + This means that users may see duplicated content during indexing. + - None: Do not delete any documents. + source_id_key: Optional key that helps identify the original source + of the document. + cleanup_batch_size: Batch size to use when cleaning up documents. + force_update: Force update documents even if they are present in the + record manager. Useful if you are re-indexing with updated embeddings. + + Returns: + Indexing result which contains information about how many documents + were added, updated, deleted, or skipped. + """ + + if cleanup not in {"incremental", "full", None}: + raise ValueError( + f"cleanup should be one of 'incremental', 'full' or None. " + f"Got {cleanup}." + ) + + if cleanup == "incremental" and source_id_key is None: + raise ValueError("Source id key is required when cleanup mode is incremental.") + + # Check that the Vectorstore has required methods implemented + methods = ["adelete", "aadd_documents"] + + for method in methods: + if not hasattr(vector_store, method): + raise ValueError( + f"Vectorstore {vector_store} does not have required method {method}" + ) + + if type(vector_store).adelete == VectorStore.adelete: + # Checking if the vectorstore has overridden the default delete method + # implementation which just raises a NotImplementedError + raise ValueError("Vectorstore has not implemented the delete method") + + async_doc_iterator: AsyncIterator[Document] + if isinstance(docs_source, BaseLoader): + try: + async_doc_iterator = docs_source.alazy_load() + except NotImplementedError: + # Exception triggered when neither lazy_load nor alazy_load are implemented. + # * The default implementation of alazy_load uses lazy_load. + # * The default implementation of lazy_load raises NotImplementedError. + # In such a case, we use the load method and convert it to an async + # iterator. + async_doc_iterator = _to_async_iterator(docs_source.load()) + else: + if hasattr(docs_source, "__aiter__"): + async_doc_iterator = docs_source # type: ignore[assignment] + else: + async_doc_iterator = _to_async_iterator(docs_source) + + source_id_assigner = _get_source_id_assigner(source_id_key) + + # Mark when the update started. + index_start_dt = await record_manager.aget_time() + num_added = 0 + num_skipped = 0 + num_updated = 0 + num_deleted = 0 + + async for doc_batch in _abatch(batch_size, async_doc_iterator): + hashed_docs = list( + _deduplicate_in_order( + [_HashedDocument.from_document(doc) for doc in doc_batch] + ) + ) + + source_ids: Sequence[Optional[str]] = [ + source_id_assigner(doc) for doc in hashed_docs + ] + + if cleanup == "incremental": + # If the cleanup mode is incremental, source ids are required. + for source_id, hashed_doc in zip(source_ids, hashed_docs): + if source_id is None: + raise ValueError( + "Source ids are required when cleanup mode is incremental. " + f"Document that starts with " + f"content: {hashed_doc.page_content[:100]} was not assigned " + f"as source id." + ) + # source ids cannot be None after for loop above. + source_ids = cast(Sequence[str], source_ids) + + exists_batch = await record_manager.aexists([doc.uid for doc in hashed_docs]) + + # Filter out documents that already exist in the record store. + uids: list[str] = [] + docs_to_index: list[Document] = [] + uids_to_refresh = [] + seen_docs: Set[str] = set() + for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): + if doc_exists: + if force_update: + seen_docs.add(hashed_doc.uid) + else: + uids_to_refresh.append(hashed_doc.uid) + continue + uids.append(hashed_doc.uid) + docs_to_index.append(hashed_doc.to_document()) + + if uids_to_refresh: + # Must be updated to refresh timestamp. + await record_manager.aupdate(uids_to_refresh, time_at_least=index_start_dt) + num_skipped += len(uids_to_refresh) + + # Be pessimistic and assume that all vector store write will fail. + # First write to vector store + if docs_to_index: + await vector_store.aadd_documents( + docs_to_index, ids=uids, batch_size=batch_size + ) + num_added += len(docs_to_index) - len(seen_docs) + num_updated += len(seen_docs) + + # And only then update the record store. + # Update ALL records, even if they already exist since we want to refresh + # their timestamp. + await record_manager.aupdate( + [doc.uid for doc in hashed_docs], + group_ids=source_ids, + time_at_least=index_start_dt, + ) + + # If source IDs are provided, we can do the deletion incrementally! + + if cleanup == "incremental": + # Get the uids of the documents that were not returned by the loader. + + # mypy isn't good enough to determine that source ids cannot be None + # here due to a check that's happening above, so we check again. + for source_id in source_ids: + if source_id is None: + raise AssertionError("Source ids cannot be None here.") + + _source_ids = cast(Sequence[str], source_ids) + + uids_to_delete = await record_manager.alist_keys( + group_ids=_source_ids, before=index_start_dt + ) + if uids_to_delete: + # Then delete from vector store. + await vector_store.adelete(uids_to_delete) + # First delete from record store. + await record_manager.adelete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + if cleanup == "full": + while uids_to_delete := await record_manager.alist_keys( + before=index_start_dt, limit=cleanup_batch_size + ): + # First delete from record store. + await vector_store.adelete(uids_to_delete) + # Then delete from record manager. + await record_manager.adelete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + return { + "num_added": num_added, + "num_updated": num_updated, + "num_skipped": num_skipped, + "num_deleted": num_deleted, + } diff --git a/libs/langchain/langchain/indexes/base.py b/libs/core/langchain_core/indexing/base.py similarity index 82% rename from libs/langchain/langchain/indexes/base.py rename to libs/core/langchain_core/indexing/base.py index 46ef5bf2efa..c91ffa78fd3 100644 --- a/libs/langchain/langchain/indexes/base.py +++ b/libs/core/langchain_core/indexing/base.py @@ -1,11 +1,8 @@ from __future__ import annotations -import uuid from abc import ABC, abstractmethod from typing import List, Optional, Sequence -NAMESPACE_UUID = uuid.UUID(int=1984) - class RecordManager(ABC): """An abstract base class representing the interface for a record manager.""" @@ -64,8 +61,16 @@ class RecordManager(ABC): Args: keys: A list of record keys to upsert. group_ids: A list of group IDs corresponding to the keys. - time_at_least: if provided, updates should only happen if the - updated_at field is at least this time. + time_at_least: Optional timestamp. Implementation can use this + to optionally verify that the timestamp IS at least this time + in the system that stores the data. + + e.g., use to validate that the time in the postgres database + is equal to or larger than the given timestamp, if not + raise an error. + + This is meant to help prevent time-drift issues since + time may not be monotonically increasing! Raises: ValueError: If the length of keys doesn't match the length of group_ids. @@ -84,8 +89,16 @@ class RecordManager(ABC): Args: keys: A list of record keys to upsert. group_ids: A list of group IDs corresponding to the keys. - time_at_least: if provided, updates should only happen if the - updated_at field is at least this time. + time_at_least: Optional timestamp. Implementation can use this + to optionally verify that the timestamp IS at least this time + in the system that stores the data. + + e.g., use to validate that the time in the postgres database + is equal to or larger than the given timestamp, if not + raise an error. + + This is meant to help prevent time-drift issues since + time may not be monotonically increasing! Raises: ValueError: If the length of keys doesn't match the length of group_ids. diff --git a/libs/core/langchain_core/language_models/base.py b/libs/core/langchain_core/language_models/base.py index a6addf8f6b0..a62809fbe9c 100644 --- a/libs/core/langchain_core/language_models/base.py +++ b/libs/core/langchain_core/language_models/base.py @@ -19,7 +19,7 @@ from typing import ( from typing_extensions import TypeAlias -from langchain_core._api import beta, deprecated +from langchain_core._api import deprecated from langchain_core.messages import ( AnyMessage, BaseMessage, @@ -201,7 +201,6 @@ class BaseLanguageModel( prompt and additional model provider-specific output. """ - @beta() def with_structured_output( self, schema: Union[Dict, Type[BaseModel]], **kwargs: Any ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index ee48234df3a..3484e8bba3c 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -2409,8 +2409,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): step_graph.trim_first_node() if step is not self.last: step_graph.trim_last_node() - graph.extend(step_graph) - step_first_node = step_graph.first_node() + step_first_node, _ = graph.extend(step_graph) if not step_first_node: raise ValueError(f"Runnable {step} has no first node") if current_last_node: @@ -3082,11 +3081,9 @@ class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]): if not step_graph: graph.add_edge(input_node, output_node) else: - graph.extend(step_graph) - step_first_node = step_graph.first_node() + step_first_node, step_last_node = graph.extend(step_graph) if not step_first_node: raise ValueError(f"Runnable {step} has no first node") - step_last_node = step_graph.last_node() if not step_last_node: raise ValueError(f"Runnable {step} has no last node") graph.add_edge(input_node, step_first_node) @@ -3779,11 +3776,9 @@ class RunnableLambda(Runnable[Input, Output]): if not dep_graph: graph.add_edge(input_node, output_node) else: - graph.extend(dep_graph) - dep_first_node = dep_graph.first_node() + dep_first_node, dep_last_node = graph.extend(dep_graph) if not dep_first_node: raise ValueError(f"Runnable {dep} has no first node") - dep_last_node = dep_graph.last_node() if not dep_last_node: raise ValueError(f"Runnable {dep} has no last node") graph.add_edge(input_node, dep_first_node) diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index f92b4b064a2..4486fca5252 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -11,6 +11,7 @@ from typing import ( List, NamedTuple, Optional, + Tuple, Type, TypedDict, Union, @@ -236,11 +237,39 @@ class Graph: self.edges.append(edge) return edge - def extend(self, graph: Graph) -> None: + def extend( + self, graph: Graph, *, prefix: str = "" + ) -> Tuple[Optional[Node], Optional[Node]]: """Add all nodes and edges from another graph. Note this doesn't check for duplicates, nor does it connect the graphs.""" - self.nodes.update(graph.nodes) - self.edges.extend(graph.edges) + if all(is_uuid(node.id) for node in graph.nodes.values()): + prefix = "" + + def prefixed(id: str) -> str: + return f"{prefix}:{id}" if prefix else id + + # prefix each node + self.nodes.update( + {prefixed(k): Node(prefixed(k), v.data) for k, v in graph.nodes.items()} + ) + # prefix each edge's source and target + self.edges.extend( + [ + Edge( + prefixed(edge.source), + prefixed(edge.target), + edge.data, + edge.conditional, + ) + for edge in graph.edges + ] + ) + # return (prefixed) first and last nodes of the subgraph + first, last = graph.first_node(), graph.last_node() + return ( + Node(prefixed(first.id), first.data) if first else None, + Node(prefixed(last.id), last.data) if last else None, + ) def first_node(self) -> Optional[Node]: """Find the single node that is not a target of any edge. diff --git a/libs/core/langchain_core/runnables/graph_mermaid.py b/libs/core/langchain_core/runnables/graph_mermaid.py index 93e052d8919..61b922ae68f 100644 --- a/libs/core/langchain_core/runnables/graph_mermaid.py +++ b/libs/core/langchain_core/runnables/graph_mermaid.py @@ -48,7 +48,7 @@ def draw_mermaid( if with_styles: # Node formatting templates default_class_label = "default" - format_dict = {default_class_label: "{0}([{0}]):::otherclass"} + format_dict = {default_class_label: "{0}([{1}]):::otherclass"} if first_node_label is not None: format_dict[first_node_label] = "{0}[{0}]:::startclass" if last_node_label is not None: @@ -57,17 +57,24 @@ def draw_mermaid( # Add nodes to the graph for node in nodes.values(): node_label = format_dict.get(node, format_dict[default_class_label]).format( - _escape_node_label(node) + _escape_node_label(node), _escape_node_label(node.split(":", 1)[-1]) ) mermaid_graph += f"\t{node_label};\n" + subgraph = "" # Add edges to the graph for edge in edges: + src_prefix = edge.source.split(":")[0] + tgt_prefix = edge.target.split(":")[0] + # exit subgraph if source or target is not in the same subgraph + if subgraph and (subgraph != src_prefix or subgraph != tgt_prefix): + mermaid_graph += "\tend\n" + subgraph = "" + # enter subgraph if source and target are in the same subgraph + if not subgraph and src_prefix and src_prefix == tgt_prefix: + mermaid_graph += f"\tsubgraph {src_prefix}\n" + subgraph = src_prefix adjusted_edge = _adjust_mermaid_edge(edge=edge, nodes=nodes) - if ( - adjusted_edge is None - ): # Ignore if it is connection between source and intermediate node - continue source, target = adjusted_edge @@ -96,6 +103,8 @@ def draw_mermaid( f"\t{_escape_node_label(source)}{edge_label}" f"{_escape_node_label(target)};\n" ) + if subgraph: + mermaid_graph += "end\n" # Add custom styles for nodes if with_styles: @@ -111,7 +120,7 @@ def _escape_node_label(node_label: str) -> str: def _adjust_mermaid_edge( edge: Edge, nodes: Dict[str, str], -) -> Optional[Tuple[str, str]]: +) -> Tuple[str, str]: """Adjusts Mermaid edge to map conditional nodes to pure nodes.""" source_node_label = nodes.get(edge.source, edge.source) target_node_label = nodes.get(edge.target, edge.target) diff --git a/libs/core/tests/unit_tests/indexing/__init__.py b/libs/core/tests/unit_tests/indexing/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/core/tests/unit_tests/indexing/in_memory.py b/libs/core/tests/unit_tests/indexing/in_memory.py new file mode 100644 index 00000000000..c9d55f51a79 --- /dev/null +++ b/libs/core/tests/unit_tests/indexing/in_memory.py @@ -0,0 +1,105 @@ +import time +from typing import Dict, List, Optional, Sequence, TypedDict + +from langchain_core.indexing.base import RecordManager + + +class _Record(TypedDict): + group_id: Optional[str] + updated_at: float + + +class InMemoryRecordManager(RecordManager): + """An in-memory record manager for testing purposes.""" + + def __init__(self, namespace: str) -> None: + super().__init__(namespace) + # Each key points to a dictionary + # of {'group_id': group_id, 'updated_at': timestamp} + self.records: Dict[str, _Record] = {} + self.namespace = namespace + + def create_schema(self) -> None: + """In-memory schema creation is simply ensuring the structure is initialized.""" + + async def acreate_schema(self) -> None: + """In-memory schema creation is simply ensuring the structure is initialized.""" + + def get_time(self) -> float: + """Get the current server time as a high resolution timestamp!""" + return time.time() + + async def aget_time(self) -> float: + """Get the current server time as a high resolution timestamp!""" + return self.get_time() + + def update( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + if group_ids and len(keys) != len(group_ids): + raise ValueError("Length of keys must match length of group_ids") + for index, key in enumerate(keys): + group_id = group_ids[index] if group_ids else None + if time_at_least and time_at_least > self.get_time(): + raise ValueError("time_at_least must be in the past") + self.records[key] = {"group_id": group_id, "updated_at": self.get_time()} + + async def aupdate( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + self.update(keys, group_ids=group_ids, time_at_least=time_at_least) + + def exists(self, keys: Sequence[str]) -> List[bool]: + return [key in self.records for key in keys] + + async def aexists(self, keys: Sequence[str]) -> List[bool]: + return self.exists(keys) + + def list_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> List[str]: + result = [] + for key, data in self.records.items(): + if before and data["updated_at"] >= before: + continue + if after and data["updated_at"] <= after: + continue + if group_ids and data["group_id"] not in group_ids: + continue + result.append(key) + if limit: + return result[:limit] + return result + + async def alist_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> List[str]: + return self.list_keys( + before=before, after=after, group_ids=group_ids, limit=limit + ) + + def delete_keys(self, keys: Sequence[str]) -> None: + for key in keys: + if key in self.records: + del self.records[key] + + async def adelete_keys(self, keys: Sequence[str]) -> None: + self.delete_keys(keys) diff --git a/libs/core/tests/unit_tests/indexing/test_hashed_document.py b/libs/core/tests/unit_tests/indexing/test_hashed_document.py new file mode 100644 index 00000000000..34378732852 --- /dev/null +++ b/libs/core/tests/unit_tests/indexing/test_hashed_document.py @@ -0,0 +1,50 @@ +import pytest + +from langchain_core.documents import Document +from langchain_core.indexing.api import _HashedDocument + + +def test_hashed_document_hashing() -> None: + hashed_document = _HashedDocument( # type: ignore[call-arg] + uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"} + ) + assert isinstance(hashed_document.hash_, str) + + +def test_hashing_with_missing_content() -> None: + """Check that ValueError is raised if page_content is missing.""" + with pytest.raises(TypeError): + _HashedDocument( + metadata={"key": "value"}, + ) # type: ignore + + +def test_uid_auto_assigned_to_hash() -> None: + """Test uid is auto-assigned to the hashed_document hash.""" + hashed_document = _HashedDocument( # type: ignore[call-arg] + page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"} + ) + assert hashed_document.uid == hashed_document.hash_ + + +def test_to_document() -> None: + """Test to_document method.""" + hashed_document = _HashedDocument( # type: ignore[call-arg] + page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"} + ) + doc = hashed_document.to_document() + assert isinstance(doc, Document) + assert doc.page_content == "Lorem ipsum dolor sit amet" + assert doc.metadata == {"key": "value"} + + +def test_from_document() -> None: + """Test from document class method.""" + document = Document( + page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"} + ) + + hashed_document = _HashedDocument.from_document(document) + # hash should be deterministic + assert hashed_document.hash_ == "fd1dc827-051b-537d-a1fe-1fa043e8b276" + assert hashed_document.uid == hashed_document.hash_ diff --git a/libs/core/tests/unit_tests/indexing/test_in_memory_record_manager.py b/libs/core/tests/unit_tests/indexing/test_in_memory_record_manager.py new file mode 100644 index 00000000000..ea88724513f --- /dev/null +++ b/libs/core/tests/unit_tests/indexing/test_in_memory_record_manager.py @@ -0,0 +1,223 @@ +from datetime import datetime +from unittest.mock import patch + +import pytest +import pytest_asyncio + +from tests.unit_tests.indexing.in_memory import InMemoryRecordManager + + +@pytest.fixture() +def manager() -> InMemoryRecordManager: + """Initialize the test database and yield the TimestampedSet instance.""" + # Initialize and yield the TimestampedSet instance + record_manager = InMemoryRecordManager(namespace="kittens") + record_manager.create_schema() + return record_manager + + +@pytest_asyncio.fixture() +async def amanager() -> InMemoryRecordManager: + """Initialize the test database and yield the TimestampedSet instance.""" + # Initialize and yield the TimestampedSet instance + record_manager = InMemoryRecordManager(namespace="kittens") + await record_manager.acreate_schema() + return record_manager + + +def test_update(manager: InMemoryRecordManager) -> None: + """Test updating records in the database.""" + # no keys should be present in the set + read_keys = manager.list_keys() + assert read_keys == [] + # Insert records + keys = ["key1", "key2", "key3"] + manager.update(keys) + # Retrieve the records + read_keys = manager.list_keys() + assert read_keys == ["key1", "key2", "key3"] + + +async def test_aupdate(amanager: InMemoryRecordManager) -> None: + """Test updating records in the database.""" + # no keys should be present in the set + read_keys = await amanager.alist_keys() + assert read_keys == [] + # Insert records + keys = ["key1", "key2", "key3"] + await amanager.aupdate(keys) + # Retrieve the records + read_keys = await amanager.alist_keys() + assert read_keys == ["key1", "key2", "key3"] + + +def test_update_timestamp(manager: InMemoryRecordManager) -> None: + """Test updating records in the database.""" + # no keys should be present in the set + with patch.object( + manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + manager.update(["key1"]) + + assert manager.list_keys() == ["key1"] + assert manager.list_keys(before=datetime(2021, 1, 1).timestamp()) == [] + assert manager.list_keys(after=datetime(2021, 1, 1).timestamp()) == ["key1"] + assert manager.list_keys(after=datetime(2021, 1, 3).timestamp()) == [] + + # Update the timestamp + with patch.object( + manager, "get_time", return_value=datetime(2023, 1, 5).timestamp() + ): + manager.update(["key1"]) + + assert manager.list_keys() == ["key1"] + assert manager.list_keys(before=datetime(2023, 1, 1).timestamp()) == [] + assert manager.list_keys(after=datetime(2023, 1, 1).timestamp()) == ["key1"] + assert manager.list_keys(after=datetime(2023, 1, 3).timestamp()) == ["key1"] + + +async def test_aupdate_timestamp(manager: InMemoryRecordManager) -> None: + """Test updating records in the database.""" + # no keys should be present in the set + with patch.object( + manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + await manager.aupdate(["key1"]) + + assert await manager.alist_keys() == ["key1"] + assert await manager.alist_keys(before=datetime(2021, 1, 1).timestamp()) == [] + assert await manager.alist_keys(after=datetime(2021, 1, 1).timestamp()) == ["key1"] + assert await manager.alist_keys(after=datetime(2021, 1, 3).timestamp()) == [] + + # Update the timestamp + with patch.object( + manager, "get_time", return_value=datetime(2023, 1, 5).timestamp() + ): + await manager.aupdate(["key1"]) + + assert await manager.alist_keys() == ["key1"] + assert await manager.alist_keys(before=datetime(2023, 1, 1).timestamp()) == [] + assert await manager.alist_keys(after=datetime(2023, 1, 1).timestamp()) == ["key1"] + assert await manager.alist_keys(after=datetime(2023, 1, 3).timestamp()) == ["key1"] + + +def test_exists(manager: InMemoryRecordManager) -> None: + """Test checking if keys exist in the database.""" + # Insert records + keys = ["key1", "key2", "key3"] + manager.update(keys) + # Check if the keys exist in the database + exists = manager.exists(keys) + assert len(exists) == len(keys) + assert exists == [True, True, True] + + exists = manager.exists(["key1", "key4"]) + assert len(exists) == 2 + assert exists == [True, False] + + +async def test_aexists(amanager: InMemoryRecordManager) -> None: + """Test checking if keys exist in the database.""" + # Insert records + keys = ["key1", "key2", "key3"] + await amanager.aupdate(keys) + # Check if the keys exist in the database + exists = await amanager.aexists(keys) + assert len(exists) == len(keys) + assert exists == [True, True, True] + + exists = await amanager.aexists(["key1", "key4"]) + assert len(exists) == 2 + assert exists == [True, False] + + +async def test_list_keys(manager: InMemoryRecordManager) -> None: + """Test listing keys based on the provided date range.""" + # Insert records + assert manager.list_keys() == [] + assert await manager.alist_keys() == [] + + with patch.object( + manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + manager.update(["key1", "key2"]) + manager.update(["key3"], group_ids=["group1"]) + manager.update(["key4"], group_ids=["group2"]) + + with patch.object( + manager, "get_time", return_value=datetime(2021, 1, 10).timestamp() + ): + manager.update(["key5"]) + + assert sorted(manager.list_keys()) == ["key1", "key2", "key3", "key4", "key5"] + assert sorted(await manager.alist_keys()) == [ + "key1", + "key2", + "key3", + "key4", + "key5", + ] + + # By group + assert manager.list_keys(group_ids=["group1"]) == ["key3"] + assert await manager.alist_keys(group_ids=["group1"]) == ["key3"] + + # Before + assert sorted(manager.list_keys(before=datetime(2021, 1, 3).timestamp())) == [ + "key1", + "key2", + "key3", + "key4", + ] + assert sorted( + await manager.alist_keys(before=datetime(2021, 1, 3).timestamp()) + ) == [ + "key1", + "key2", + "key3", + "key4", + ] + + # After + assert sorted(manager.list_keys(after=datetime(2021, 1, 3).timestamp())) == ["key5"] + assert sorted(await manager.alist_keys(after=datetime(2021, 1, 3).timestamp())) == [ + "key5" + ] + + results = manager.list_keys(limit=1) + assert len(results) == 1 + assert results[0] in ["key1", "key2", "key3", "key4", "key5"] + + results = await manager.alist_keys(limit=1) + assert len(results) == 1 + assert results[0] in ["key1", "key2", "key3", "key4", "key5"] + + +def test_delete_keys(manager: InMemoryRecordManager) -> None: + """Test deleting keys from the database.""" + # Insert records + keys = ["key1", "key2", "key3"] + manager.update(keys) + + # Delete some keys + keys_to_delete = ["key1", "key2"] + manager.delete_keys(keys_to_delete) + + # Check if the deleted keys are no longer in the database + remaining_keys = manager.list_keys() + assert remaining_keys == ["key3"] + + +async def test_adelete_keys(amanager: InMemoryRecordManager) -> None: + """Test deleting keys from the database.""" + # Insert records + keys = ["key1", "key2", "key3"] + await amanager.aupdate(keys) + + # Delete some keys + keys_to_delete = ["key1", "key2"] + await amanager.adelete_keys(keys_to_delete) + + # Check if the deleted keys are no longer in the database + remaining_keys = await amanager.alist_keys() + assert remaining_keys == ["key3"] diff --git a/libs/core/tests/unit_tests/indexing/test_indexing.py b/libs/core/tests/unit_tests/indexing/test_indexing.py new file mode 100644 index 00000000000..701204363ab --- /dev/null +++ b/libs/core/tests/unit_tests/indexing/test_indexing.py @@ -0,0 +1,1398 @@ +from datetime import datetime +from typing import ( + Any, + AsyncIterator, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Type, +) +from unittest.mock import patch + +import pytest +import pytest_asyncio + +from langchain_core.document_loaders.base import BaseLoader +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.indexing import aindex, index +from langchain_core.indexing.api import _abatch, _HashedDocument +from langchain_core.vectorstores import VST, VectorStore +from tests.unit_tests.indexing.in_memory import InMemoryRecordManager + + +class ToyLoader(BaseLoader): + """Toy loader that always returns the same documents.""" + + def __init__(self, documents: Sequence[Document]) -> None: + """Initialize with the documents to return.""" + self.documents = documents + + def lazy_load( + self, + ) -> Iterator[Document]: + yield from self.documents + + async def alazy_load( + self, + ) -> AsyncIterator[Document]: + for document in self.documents: + yield document + + +class InMemoryVectorStore(VectorStore): + """In-memory implementation of VectorStore using a dictionary.""" + + def __init__(self, permit_upserts: bool = False) -> None: + """Vector store interface for testing things in memory.""" + self.store: Dict[str, Document] = {} + self.permit_upserts = permit_upserts + + def delete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None: + """Delete the given documents from the store using their IDs.""" + if ids: + for _id in ids: + self.store.pop(_id, None) + + async def adelete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None: + """Delete the given documents from the store using their IDs.""" + if ids: + for _id in ids: + self.store.pop(_id, None) + + def add_documents( # type: ignore + self, + documents: Sequence[Document], + *, + ids: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> List[str]: + """Add the given documents to the store (insert behavior).""" + if ids and len(ids) != len(documents): + raise ValueError( + f"Expected {len(ids)} ids, got {len(documents)} documents." + ) + + if not ids: + raise NotImplementedError("This is not implemented yet.") + + for _id, document in zip(ids, documents): + if _id in self.store and not self.permit_upserts: + raise ValueError( + f"Document with uid {_id} already exists in the store." + ) + self.store[_id] = document + + return list(ids) + + async def aadd_documents( + self, + documents: Sequence[Document], + *, + ids: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> List[str]: + if ids and len(ids) != len(documents): + raise ValueError( + f"Expected {len(ids)} ids, got {len(documents)} documents." + ) + + if not ids: + raise NotImplementedError("This is not implemented yet.") + + for _id, document in zip(ids, documents): + if _id in self.store and not self.permit_upserts: + raise ValueError( + f"Document with uid {_id} already exists in the store." + ) + self.store[_id] = document + return list(ids) + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[Dict[Any, Any]]] = None, + **kwargs: Any, + ) -> List[str]: + """Add the given texts to the store (insert behavior).""" + raise NotImplementedError() + + @classmethod + def from_texts( + cls: Type[VST], + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[Dict[Any, Any]]] = None, + **kwargs: Any, + ) -> VST: + """Create a vector store from a list of texts.""" + raise NotImplementedError() + + def similarity_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> List[Document]: + """Find the most similar documents to the given query.""" + raise NotImplementedError() + + +@pytest.fixture +def record_manager() -> InMemoryRecordManager: + """Timestamped set fixture.""" + record_manager = InMemoryRecordManager(namespace="hello") + record_manager.create_schema() + return record_manager + + +@pytest_asyncio.fixture # type: ignore +async def arecord_manager() -> InMemoryRecordManager: + """Timestamped set fixture.""" + record_manager = InMemoryRecordManager(namespace="hello") + await record_manager.acreate_schema() + return record_manager + + +@pytest.fixture +def vector_store() -> InMemoryVectorStore: + """Vector store fixture.""" + return InMemoryVectorStore() + + +@pytest.fixture +def upserting_vector_store() -> InMemoryVectorStore: + """Vector store fixture.""" + return InMemoryVectorStore(permit_upserts=True) + + +def test_indexing_same_content( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Indexing some content to confirm it gets added only once.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + ), + Document( + page_content="This is another document.", + ), + ] + ) + + assert index(loader, record_manager, vector_store) == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + assert len(list(vector_store.store)) == 2 + + for _ in range(2): + # Run the indexing again + assert index(loader, record_manager, vector_store) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + +async def test_aindexing_same_content( + arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Indexing some content to confirm it gets added only once.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + ), + Document( + page_content="This is another document.", + ), + ] + ) + + assert await aindex(loader, arecord_manager, vector_store) == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + assert len(list(vector_store.store)) == 2 + + for _ in range(2): + # Run the indexing again + assert await aindex(loader, arecord_manager, vector_store) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + +def test_index_simple_delete_full( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Indexing some content to confirm it gets added only once.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + ), + Document( + page_content="This is another document.", + ), + ] + ) + + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp() + ): + assert index(loader, record_manager, vector_store, cleanup="full") == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp() + ): + assert index(loader, record_manager, vector_store, cleanup="full") == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + loader = ToyLoader( + documents=[ + Document( + page_content="mutated document 1", + ), + Document( + page_content="This is another document.", # <-- Same as original + ), + ] + ) + + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + indexing_result = index(loader, record_manager, vector_store, cleanup="full") + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == {"mutated document 1", "This is another document."} + + assert indexing_result == { + "num_added": 1, + "num_deleted": 1, + "num_skipped": 1, + "num_updated": 0, + } + + # Attempt to index again verify that nothing changes + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index(loader, record_manager, vector_store, cleanup="full") == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + +async def test_aindex_simple_delete_full( + arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Indexing some content to confirm it gets added only once.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + ), + Document( + page_content="This is another document.", + ), + ] + ) + + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp() + ): + assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp() + ): + assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + loader = ToyLoader( + documents=[ + Document( + page_content="mutated document 1", + ), + Document( + page_content="This is another document.", # <-- Same as original + ), + ] + ) + + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == { + "num_added": 1, + "num_deleted": 1, + "num_skipped": 1, + "num_updated": 0, + } + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == {"mutated document 1", "This is another document."} + + # Attempt to index again verify that nothing changes + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + +def test_incremental_fails_with_bad_source_ids( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with incremental deletion strategy.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + Document( + page_content="This is yet another document.", + metadata={"source": None}, + ), + ] + ) + + with pytest.raises(ValueError): + # Should raise an error because no source id function was specified + index(loader, record_manager, vector_store, cleanup="incremental") + + with pytest.raises(ValueError): + # Should raise an error because no source id function was specified + index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) + + +async def test_aincremental_fails_with_bad_source_ids( + arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with incremental deletion strategy.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + Document( + page_content="This is yet another document.", + metadata={"source": None}, + ), + ] + ) + + with pytest.raises(ValueError): + # Should raise an error because no source id function was specified + await aindex( + loader, + arecord_manager, + vector_store, + cleanup="incremental", + ) + + with pytest.raises(ValueError): + # Should raise an error because no source id function was specified + await aindex( + loader, + arecord_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) + + +def test_no_delete( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing without a deletion strategy.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + ] + ) + + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup=None, + source_id_key="source", + ) == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + # If we add the same content twice it should be skipped + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup=None, + source_id_key="source", + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + loader = ToyLoader( + documents=[ + Document( + page_content="mutated content", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + ] + ) + + # Should result in no updates or deletions! + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup=None, + source_id_key="source", + ) == { + "num_added": 1, + "num_deleted": 0, + "num_skipped": 1, + "num_updated": 0, + } + + +async def test_ano_delete( + arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing without a deletion strategy.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + ] + ) + + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert await aindex( + loader, + arecord_manager, + vector_store, + cleanup=None, + source_id_key="source", + ) == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + # If we add the same content twice it should be skipped + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert await aindex( + loader, + arecord_manager, + vector_store, + cleanup=None, + source_id_key="source", + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + loader = ToyLoader( + documents=[ + Document( + page_content="mutated content", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + ] + ) + + # Should result in no updates or deletions! + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert await aindex( + loader, + arecord_manager, + vector_store, + cleanup=None, + source_id_key="source", + ) == { + "num_added": 1, + "num_deleted": 0, + "num_skipped": 1, + "num_updated": 0, + } + + +def test_incremental_delete( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with incremental deletion strategy.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + ] + ) + + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == {"This is another document.", "This is a test document."} + + # Attempt to index again verify that nothing changes + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + # Create 2 documents from the same source all with mutated content + loader = ToyLoader( + documents=[ + Document( + page_content="mutated document 1", + metadata={"source": "1"}, + ), + Document( + page_content="mutated document 2", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", # <-- Same as original + metadata={"source": "2"}, + ), + ] + ) + + # Attempt to index again verify that nothing changes + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 3).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) == { + "num_added": 2, + "num_deleted": 1, + "num_skipped": 1, + "num_updated": 0, + } + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == { + "mutated document 1", + "mutated document 2", + "This is another document.", + } + + +def test_incremental_indexing_with_batch_size( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with incremental indexing""" + loader = ToyLoader( + documents=[ + Document( + page_content="1", + metadata={"source": "1"}, + ), + Document( + page_content="2", + metadata={"source": "1"}, + ), + Document( + page_content="3", + metadata={"source": "1"}, + ), + Document( + page_content="4", + metadata={"source": "1"}, + ), + ] + ) + + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + batch_size=2, + ) == { + "num_added": 4, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + assert index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + batch_size=2, + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 4, + "num_updated": 0, + } + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == {"1", "2", "3", "4"} + + +def test_incremental_delete_with_batch_size( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with incremental deletion strategy and batch size.""" + loader = ToyLoader( + documents=[ + Document( + page_content="1", + metadata={"source": "1"}, + ), + Document( + page_content="2", + metadata={"source": "2"}, + ), + Document( + page_content="3", + metadata={"source": "3"}, + ), + Document( + page_content="4", + metadata={"source": "4"}, + ), + ] + ) + + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + batch_size=3, + ) == { + "num_added": 4, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == {"1", "2", "3", "4"} + + # Attempt to index again verify that nothing changes + with patch.object( + record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert index( + loader, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + batch_size=3, + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 4, + "num_updated": 0, + } + + # Attempt to index again verify that nothing changes + with patch.object( + record_manager, "get_time", return_value=datetime(2022, 1, 3).timestamp() + ): + # Docs with same content + docs = [ + Document( + page_content="1", + metadata={"source": "1"}, + ), + Document( + page_content="2", + metadata={"source": "2"}, + ), + ] + assert index( + docs, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + batch_size=1, + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + # Attempt to index again verify that nothing changes + with patch.object( + record_manager, "get_time", return_value=datetime(2023, 1, 3).timestamp() + ): + # Docs with same content + docs = [ + Document( + page_content="1", + metadata={"source": "1"}, + ), + Document( + page_content="2", + metadata={"source": "2"}, + ), + ] + assert index( + docs, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + batch_size=1, + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + # Try to index with changed docs now + with patch.object( + record_manager, "get_time", return_value=datetime(2024, 1, 3).timestamp() + ): + # Docs with same content + docs = [ + Document( + page_content="changed 1", + metadata={"source": "1"}, + ), + Document( + page_content="changed 2", + metadata={"source": "2"}, + ), + ] + assert index( + docs, + record_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) == { + "num_added": 2, + "num_deleted": 2, + "num_skipped": 0, + "num_updated": 0, + } + + +async def test_aincremental_delete( + arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with incremental deletion strategy.""" + loader = ToyLoader( + documents=[ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + ] + ) + + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert await aindex( + loader.lazy_load(), + arecord_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == {"This is another document.", "This is a test document."} + + # Attempt to index again verify that nothing changes + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() + ): + assert await aindex( + loader.lazy_load(), + arecord_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + # Create 2 documents from the same source all with mutated content + loader = ToyLoader( + documents=[ + Document( + page_content="mutated document 1", + metadata={"source": "1"}, + ), + Document( + page_content="mutated document 2", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", # <-- Same as original + metadata={"source": "2"}, + ), + ] + ) + + # Attempt to index again verify that nothing changes + with patch.object( + arecord_manager, "get_time", return_value=datetime(2021, 1, 3).timestamp() + ): + assert await aindex( + loader.lazy_load(), + arecord_manager, + vector_store, + cleanup="incremental", + source_id_key="source", + ) == { + "num_added": 2, + "num_deleted": 1, + "num_skipped": 1, + "num_updated": 0, + } + + doc_texts = set( + # Ignoring type since doc should be in the store and not a None + vector_store.store.get(uid).page_content # type: ignore + for uid in vector_store.store + ) + assert doc_texts == { + "mutated document 1", + "mutated document 2", + "This is another document.", + } + + +def test_indexing_with_no_docs( + record_manager: InMemoryRecordManager, vector_store: VectorStore +) -> None: + """Check edge case when loader returns no new docs.""" + loader = ToyLoader(documents=[]) + + assert index(loader, record_manager, vector_store, cleanup="full") == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + +async def test_aindexing_with_no_docs( + arecord_manager: InMemoryRecordManager, vector_store: VectorStore +) -> None: + """Check edge case when loader returns no new docs.""" + loader = ToyLoader(documents=[]) + + assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + +def test_deduplication( + record_manager: InMemoryRecordManager, vector_store: VectorStore +) -> None: + """Check edge case when loader returns no new docs.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + ] + + # Should result in only a single document being added + assert index(docs, record_manager, vector_store, cleanup="full") == { + "num_added": 1, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + +async def test_adeduplication( + arecord_manager: InMemoryRecordManager, vector_store: VectorStore +) -> None: + """Check edge case when loader returns no new docs.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + ] + + # Should result in only a single document being added + assert await aindex(docs, arecord_manager, vector_store, cleanup="full") == { + "num_added": 1, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + +def test_cleanup_with_different_batchsize( + record_manager: InMemoryRecordManager, vector_store: VectorStore +) -> None: + """Check that we can clean up with different batch size.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": str(d)}, + ) + for d in range(1000) + ] + + assert index(docs, record_manager, vector_store, cleanup="full") == { + "num_added": 1000, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + docs = [ + Document( + page_content="Different doc", + metadata={"source": str(d)}, + ) + for d in range(1001) + ] + + assert index( + docs, record_manager, vector_store, cleanup="full", cleanup_batch_size=17 + ) == { + "num_added": 1001, + "num_deleted": 1000, + "num_skipped": 0, + "num_updated": 0, + } + + +async def test_async_cleanup_with_different_batchsize( + arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Check that we can clean up with different batch size.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": str(d)}, + ) + for d in range(1000) + ] + + assert await aindex(docs, arecord_manager, vector_store, cleanup="full") == { + "num_added": 1000, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + docs = [ + Document( + page_content="Different doc", + metadata={"source": str(d)}, + ) + for d in range(1001) + ] + + assert await aindex( + docs, arecord_manager, vector_store, cleanup="full", cleanup_batch_size=17 + ) == { + "num_added": 1001, + "num_deleted": 1000, + "num_skipped": 0, + "num_updated": 0, + } + + +def test_deduplication_v2( + record_manager: InMemoryRecordManager, vector_store: VectorStore +) -> None: + """Check edge case when loader returns no new docs.""" + docs = [ + Document( + page_content="1", + metadata={"source": "1"}, + ), + Document( + page_content="1", + metadata={"source": "1"}, + ), + Document( + page_content="2", + metadata={"source": "2"}, + ), + Document( + page_content="3", + metadata={"source": "3"}, + ), + ] + + assert index(docs, record_manager, vector_store, cleanup="full") == { + "num_added": 3, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + # using in memory implementation here + assert isinstance(vector_store, InMemoryVectorStore) + contents = sorted( + [document.page_content for document in vector_store.store.values()] + ) + assert contents == ["1", "2", "3"] + + +async def _to_async_iter(it: Iterable[Any]) -> AsyncIterator[Any]: + """Convert an iterable to an async iterator.""" + for i in it: + yield i + + +async def test_abatch() -> None: + """Test the abatch function.""" + batches = _abatch(5, _to_async_iter(range(12))) + assert isinstance(batches, AsyncIterator) + assert [batch async for batch in batches] == [ + [0, 1, 2, 3, 4], + [5, 6, 7, 8, 9], + [10, 11], + ] + + batches = _abatch(1, _to_async_iter(range(3))) + assert isinstance(batches, AsyncIterator) + assert [batch async for batch in batches] == [[0], [1], [2]] + + batches = _abatch(2, _to_async_iter(range(5))) + assert isinstance(batches, AsyncIterator) + assert [batch async for batch in batches] == [[0, 1], [2, 3], [4]] + + +def test_indexing_force_update( + record_manager: InMemoryRecordManager, upserting_vector_store: VectorStore +) -> None: + """Test indexing with force update.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + ] + + assert index(docs, record_manager, upserting_vector_store, cleanup="full") == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + assert index(docs, record_manager, upserting_vector_store, cleanup="full") == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + assert index( + docs, record_manager, upserting_vector_store, cleanup="full", force_update=True + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 2, + } + + +async def test_aindexing_force_update( + arecord_manager: InMemoryRecordManager, upserting_vector_store: VectorStore +) -> None: + """Test indexing with force update.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + Document( + page_content="This is another document.", + metadata={"source": "2"}, + ), + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + ] + + assert await aindex( + docs, arecord_manager, upserting_vector_store, cleanup="full" + ) == { + "num_added": 2, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 0, + } + + assert await aindex( + docs, arecord_manager, upserting_vector_store, cleanup="full" + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 2, + "num_updated": 0, + } + + assert await aindex( + docs, + arecord_manager, + upserting_vector_store, + cleanup="full", + force_update=True, + ) == { + "num_added": 0, + "num_deleted": 0, + "num_skipped": 0, + "num_updated": 2, + } + + +def test_indexing_custom_batch_size( + record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with a custom batch size.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + ] + ids = [_HashedDocument.from_document(doc).uid for doc in docs] + + batch_size = 1 + with patch.object(vector_store, "add_documents") as mock_add_documents: + index(docs, record_manager, vector_store, batch_size=batch_size) + args, kwargs = mock_add_documents.call_args + assert args == (docs,) + assert kwargs == {"ids": ids, "batch_size": batch_size} + + +async def test_aindexing_custom_batch_size( + arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore +) -> None: + """Test indexing with a custom batch size.""" + docs = [ + Document( + page_content="This is a test document.", + metadata={"source": "1"}, + ), + ] + ids = [_HashedDocument.from_document(doc).uid for doc in docs] + + batch_size = 1 + with patch.object(vector_store, "aadd_documents") as mock_add_documents: + await aindex(docs, arecord_manager, vector_store, batch_size=batch_size) + args, kwargs = mock_add_documents.call_args + assert args == (docs,) + assert kwargs == {"ids": ids, "batch_size": batch_size} diff --git a/libs/core/tests/unit_tests/indexing/test_public_api.py b/libs/core/tests/unit_tests/indexing/test_public_api.py new file mode 100644 index 00000000000..8c0b367dd85 --- /dev/null +++ b/libs/core/tests/unit_tests/indexing/test_public_api.py @@ -0,0 +1,12 @@ +from langchain_core.indexing import __all__ + + +def test_all() -> None: + """Use to catch obvious breaking changes.""" + assert __all__ == sorted(__all__, key=str.lower) + assert __all__ == [ + "aindex", + "index", + "IndexingResult", + "RecordManager", + ] diff --git a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py index 50b60390f84..4f6004df6d7 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py @@ -8,10 +8,9 @@ from langchain_community.tools.vectorstore.tool import ( ) from langchain_core.language_models import BaseLanguageModel from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.tools import BaseTool from langchain_core.vectorstores import VectorStore -from langchain.tools import BaseTool - class VectorStoreInfo(BaseModel): """Information about a VectorStore.""" diff --git a/libs/langchain/langchain/chains/flare/base.py b/libs/langchain/langchain/chains/flare/base.py index 8beb5b82e2a..8070fc12374 100644 --- a/libs/langchain/langchain/chains/flare/base.py +++ b/libs/langchain/langchain/chains/flare/base.py @@ -5,7 +5,6 @@ from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple import numpy as np -from langchain_community.llms.openai import OpenAI from langchain_core.callbacks import ( CallbackManagerForChainRun, ) @@ -56,11 +55,7 @@ class _ResponseChain(LLMChain): class _OpenAIResponseChain(_ResponseChain): """Chain that generates responses from user input and context.""" - llm: OpenAI = Field( - default_factory=lambda: OpenAI( - max_tokens=32, model_kwargs={"logprobs": 1}, temperature=0 - ) - ) + llm: BaseLanguageModel def _extract_tokens_and_log_probs( self, generations: List[Generation] @@ -118,7 +113,7 @@ class FlareChain(Chain): question_generator_chain: QuestionGeneratorChain """Chain that generates questions from uncertain spans.""" - response_chain: _ResponseChain = Field(default_factory=_OpenAIResponseChain) + response_chain: _ResponseChain """Chain that generates responses from user input and context.""" output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser) """Parser that determines whether the chain is finished.""" @@ -255,6 +250,14 @@ class FlareChain(Chain): Returns: FlareChain class with the given language model. """ + try: + from langchain_openai import OpenAI + except ImportError: + raise ImportError( + "OpenAI is required for FlareChain. " + "Please install langchain-openai." + "pip install langchain-openai" + ) question_gen_chain = QuestionGeneratorChain(llm=llm) response_llm = OpenAI( max_tokens=max_generation_len, model_kwargs={"logprobs": 1}, temperature=0 diff --git a/libs/langchain/langchain/evaluation/comparison/eval_chain.py b/libs/langchain/langchain/evaluation/comparison/eval_chain.py index b0fd1e27a27..2bcf5632612 100644 --- a/libs/langchain/langchain/evaluation/comparison/eval_chain.py +++ b/libs/langchain/langchain/evaluation/comparison/eval_chain.py @@ -5,8 +5,6 @@ import logging import re from typing import Any, Dict, List, Optional, Union -from langchain_community.chat_models.azure_openai import AzureChatOpenAI -from langchain_community.chat_models.openai import ChatOpenAI from langchain_core.callbacks.manager import Callbacks from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser @@ -254,10 +252,8 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): ValueError: If the input variables are not as expected. """ - if not ( - isinstance(llm, (ChatOpenAI, AzureChatOpenAI)) - and llm.model_name.startswith("gpt-4") - ): + # Check if the model is GPT-4 if not raise a warning + if not hasattr(llm, "model_name") or not llm.model_name.startswith("gpt-4"): logger.warning( "This chain was only tested with GPT-4. \ Performance may be significantly worse with other models." diff --git a/libs/langchain/langchain/evaluation/criteria/eval_chain.py b/libs/langchain/langchain/evaluation/criteria/eval_chain.py index 4a18aa081b5..9df38531958 100644 --- a/libs/langchain/langchain/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain/evaluation/criteria/eval_chain.py @@ -193,7 +193,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain_community.chat_models import ChatAnthropic + >>> from langchain_anthropic import ChatAnthropic >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = ChatAnthropic(temperature=0) >>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"} @@ -205,7 +205,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): 'score': 0, } - >>> from langchain_community.chat_models import ChatOpenAI + >>> from langchain_openai import ChatOpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = ChatOpenAI(model="gpt-4", temperature=0) >>> criteria = "correctness" @@ -344,7 +344,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain_community.llms import OpenAI + >>> from langchain_openai import OpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = OpenAI() >>> criteria = { @@ -432,7 +432,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain_community.llms import OpenAI + >>> from langchain_openai import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" @@ -487,7 +487,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain_community.llms import OpenAI + >>> from langchain_openai import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" @@ -568,7 +568,7 @@ class LabeledCriteriaEvalChain(CriteriaEvalChain): Examples -------- - >>> from langchain_community.llms import OpenAI + >>> from langchain_openai import OpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = OpenAI() >>> criteria = { diff --git a/libs/langchain/langchain/evaluation/loading.py b/libs/langchain/langchain/evaluation/loading.py index fbb17325854..27a8f348e48 100644 --- a/libs/langchain/langchain/evaluation/loading.py +++ b/libs/langchain/langchain/evaluation/loading.py @@ -1,7 +1,6 @@ """Loading datasets and evaluators.""" from typing import Any, Dict, List, Optional, Sequence, Type, Union -from langchain_community.chat_models.openai import ChatOpenAI from langchain_core.language_models import BaseLanguageModel from langchain.chains.base import Chain @@ -131,6 +130,20 @@ def load_evaluator( evaluator_cls = _EVALUATOR_MAP[evaluator] if issubclass(evaluator_cls, LLMEvalChain): try: + try: + from langchain_openai import ChatOpenAI + except ImportError: + try: + from langchain_community.chat_models.openai import ChatOpenAI + except ImportError: + raise ImportError( + "Could not import langchain_openai or fallback onto " + "langchain_community. Please install langchain_openai " + "or specify a language model explicitly. " + "It's recommended to install langchain_openai AND " + "specify a language model explicitly." + ) + llm = llm or ChatOpenAI( # type: ignore[call-arg] model="gpt-4", model_kwargs={"seed": 42}, temperature=0 ) diff --git a/libs/langchain/langchain/indexes/__init__.py b/libs/langchain/langchain/indexes/__init__.py index 02e766b02b3..f6c9c5d535a 100644 --- a/libs/langchain/langchain/indexes/__init__.py +++ b/libs/langchain/langchain/indexes/__init__.py @@ -11,7 +11,8 @@ Importantly, Index keeps on working even if the content being written is derived via a set of transformations from some source content (e.g., indexing children documents that were derived from parent documents by chunking.) """ -from langchain.indexes._api import IndexingResult, aindex, index +from langchain_core.indexing.api import IndexingResult, aindex, index + from langchain.indexes._sql_record_manager import SQLRecordManager from langchain.indexes.graph import GraphIndexCreator from langchain.indexes.vectorstore import VectorstoreIndexCreator diff --git a/libs/langchain/langchain/indexes/_api.py b/libs/langchain/langchain/indexes/_api.py index f41a221795a..d5919af972b 100644 --- a/libs/langchain/langchain/indexes/_api.py +++ b/libs/langchain/langchain/indexes/_api.py @@ -1,600 +1,5 @@ -"""Module contains logic for indexing documents into vector stores.""" -from __future__ import annotations +from langchain_core.indexing.api import _abatch, _batch, _HashedDocument -import hashlib -import json -import uuid -from itertools import islice -from typing import ( - Any, - AsyncIterable, - AsyncIterator, - Callable, - Dict, - Iterable, - Iterator, - List, - Literal, - Optional, - Sequence, - Set, - TypedDict, - TypeVar, - Union, - cast, -) - -from langchain_community.document_loaders.base import BaseLoader -from langchain_core.documents import Document -from langchain_core.pydantic_v1 import root_validator -from langchain_core.vectorstores import VectorStore - -from langchain.indexes.base import NAMESPACE_UUID, RecordManager - -T = TypeVar("T") - - -def _hash_string_to_uuid(input_string: str) -> uuid.UUID: - """Hashes a string and returns the corresponding UUID.""" - hash_value = hashlib.sha1(input_string.encode("utf-8")).hexdigest() - return uuid.uuid5(NAMESPACE_UUID, hash_value) - - -def _hash_nested_dict_to_uuid(data: dict[Any, Any]) -> uuid.UUID: - """Hashes a nested dictionary and returns the corresponding UUID.""" - serialized_data = json.dumps(data, sort_keys=True) - hash_value = hashlib.sha1(serialized_data.encode("utf-8")).hexdigest() - return uuid.uuid5(NAMESPACE_UUID, hash_value) - - -class _HashedDocument(Document): - """A hashed document with a unique ID.""" - - uid: str - hash_: str - """The hash of the document including content and metadata.""" - content_hash: str - """The hash of the document content.""" - metadata_hash: str - """The hash of the document metadata.""" - - @classmethod - def is_lc_serializable(cls) -> bool: - return False - - @root_validator(pre=True) - def calculate_hashes(cls, values: Dict[str, Any]) -> Dict[str, Any]: - """Root validator to calculate content and metadata hash.""" - content = values.get("page_content", "") - metadata = values.get("metadata", {}) - - forbidden_keys = ("hash_", "content_hash", "metadata_hash") - - for key in forbidden_keys: - if key in metadata: - raise ValueError( - f"Metadata cannot contain key {key} as it " - f"is reserved for internal use." - ) - - content_hash = str(_hash_string_to_uuid(content)) - - try: - metadata_hash = str(_hash_nested_dict_to_uuid(metadata)) - except Exception as e: - raise ValueError( - f"Failed to hash metadata: {e}. " - f"Please use a dict that can be serialized using json." - ) - - values["content_hash"] = content_hash - values["metadata_hash"] = metadata_hash - values["hash_"] = str(_hash_string_to_uuid(content_hash + metadata_hash)) - - _uid = values.get("uid", None) - - if _uid is None: - values["uid"] = values["hash_"] - return values - - def to_document(self) -> Document: - """Return a Document object.""" - return Document( - page_content=self.page_content, - metadata=self.metadata, - ) - - @classmethod - def from_document( - cls, document: Document, *, uid: Optional[str] = None - ) -> _HashedDocument: - """Create a HashedDocument from a Document.""" - return cls( # type: ignore[call-arg] - uid=uid, # type: ignore[arg-type] - page_content=document.page_content, - metadata=document.metadata, - ) - - -def _batch(size: int, iterable: Iterable[T]) -> Iterator[List[T]]: - """Utility batching function.""" - it = iter(iterable) - while True: - chunk = list(islice(it, size)) - if not chunk: - return - yield chunk - - -async def _abatch(size: int, iterable: AsyncIterable[T]) -> AsyncIterator[List[T]]: - """Utility batching function.""" - batch: List[T] = [] - async for element in iterable: - if len(batch) < size: - batch.append(element) - - if len(batch) >= size: - yield batch - batch = [] - - if batch: - yield batch - - -def _get_source_id_assigner( - source_id_key: Union[str, Callable[[Document], str], None], -) -> Callable[[Document], Union[str, None]]: - """Get the source id from the document.""" - if source_id_key is None: - return lambda doc: None - elif isinstance(source_id_key, str): - return lambda doc: doc.metadata[source_id_key] - elif callable(source_id_key): - return source_id_key - else: - raise ValueError( - f"source_id_key should be either None, a string or a callable. " - f"Got {source_id_key} of type {type(source_id_key)}." - ) - - -def _deduplicate_in_order( - hashed_documents: Iterable[_HashedDocument], -) -> Iterator[_HashedDocument]: - """Deduplicate a list of hashed documents while preserving order.""" - seen: Set[str] = set() - - for hashed_doc in hashed_documents: - if hashed_doc.hash_ not in seen: - seen.add(hashed_doc.hash_) - yield hashed_doc - - -# PUBLIC API - - -class IndexingResult(TypedDict): - """Return a detailed a breakdown of the result of the indexing operation.""" - - num_added: int - """Number of added documents.""" - num_updated: int - """Number of updated documents because they were not up to date.""" - num_deleted: int - """Number of deleted documents.""" - num_skipped: int - """Number of skipped documents because they were already up to date.""" - - -def index( - docs_source: Union[BaseLoader, Iterable[Document]], - record_manager: RecordManager, - vector_store: VectorStore, - *, - batch_size: int = 100, - cleanup: Literal["incremental", "full", None] = None, - source_id_key: Union[str, Callable[[Document], str], None] = None, - cleanup_batch_size: int = 1_000, - force_update: bool = False, -) -> IndexingResult: - """Index data from the loader into the vector store. - - Indexing functionality uses a manager to keep track of which documents - are in the vector store. - - This allows us to keep track of which documents were updated, and which - documents were deleted, which documents should be skipped. - - For the time being, documents are indexed using their hashes, and users - are not able to specify the uid of the document. - - IMPORTANT: - if auto_cleanup is set to True, the loader should be returning - the entire dataset, and not just a subset of the dataset. - Otherwise, the auto_cleanup will remove documents that it is not - supposed to. - - Args: - docs_source: Data loader or iterable of documents to index. - record_manager: Timestamped set to keep track of which documents were - updated. - vector_store: Vector store to index the documents into. - batch_size: Batch size to use when indexing. - cleanup: How to handle clean up of documents. - - Incremental: Cleans up all documents that haven't been updated AND - that are associated with source ids that were seen - during indexing. - Clean up is done continuously during indexing helping - to minimize the probability of users seeing duplicated - content. - - Full: Delete all documents that haven to been returned by the loader. - Clean up runs after all documents have been indexed. - This means that users may see duplicated content during indexing. - - None: Do not delete any documents. - source_id_key: Optional key that helps identify the original source - of the document. - cleanup_batch_size: Batch size to use when cleaning up documents. - force_update: Force update documents even if they are present in the - record manager. Useful if you are re-indexing with updated embeddings. - - Returns: - Indexing result which contains information about how many documents - were added, updated, deleted, or skipped. - """ - if cleanup not in {"incremental", "full", None}: - raise ValueError( - f"cleanup should be one of 'incremental', 'full' or None. " - f"Got {cleanup}." - ) - - if cleanup == "incremental" and source_id_key is None: - raise ValueError("Source id key is required when cleanup mode is incremental.") - - # Check that the Vectorstore has required methods implemented - methods = ["delete", "add_documents"] - - for method in methods: - if not hasattr(vector_store, method): - raise ValueError( - f"Vectorstore {vector_store} does not have required method {method}" - ) - - if type(vector_store).delete == VectorStore.delete: - # Checking if the vectorstore has overridden the default delete method - # implementation which just raises a NotImplementedError - raise ValueError("Vectorstore has not implemented the delete method") - - if isinstance(docs_source, BaseLoader): - try: - doc_iterator = docs_source.lazy_load() - except NotImplementedError: - doc_iterator = iter(docs_source.load()) - else: - doc_iterator = iter(docs_source) - - source_id_assigner = _get_source_id_assigner(source_id_key) - - # Mark when the update started. - index_start_dt = record_manager.get_time() - num_added = 0 - num_skipped = 0 - num_updated = 0 - num_deleted = 0 - - for doc_batch in _batch(batch_size, doc_iterator): - hashed_docs = list( - _deduplicate_in_order( - [_HashedDocument.from_document(doc) for doc in doc_batch] - ) - ) - - source_ids: Sequence[Optional[str]] = [ - source_id_assigner(doc) for doc in hashed_docs - ] - - if cleanup == "incremental": - # If the cleanup mode is incremental, source ids are required. - for source_id, hashed_doc in zip(source_ids, hashed_docs): - if source_id is None: - raise ValueError( - "Source ids are required when cleanup mode is incremental. " - f"Document that starts with " - f"content: {hashed_doc.page_content[:100]} was not assigned " - f"as source id." - ) - # source ids cannot be None after for loop above. - source_ids = cast(Sequence[str], source_ids) # type: ignore[assignment] - - exists_batch = record_manager.exists([doc.uid for doc in hashed_docs]) - - # Filter out documents that already exist in the record store. - uids = [] - docs_to_index = [] - uids_to_refresh = [] - seen_docs: Set[str] = set() - for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): - if doc_exists: - if force_update: - seen_docs.add(hashed_doc.uid) - else: - uids_to_refresh.append(hashed_doc.uid) - continue - uids.append(hashed_doc.uid) - docs_to_index.append(hashed_doc.to_document()) - - # Update refresh timestamp - if uids_to_refresh: - record_manager.update(uids_to_refresh, time_at_least=index_start_dt) - num_skipped += len(uids_to_refresh) - - # Be pessimistic and assume that all vector store write will fail. - # First write to vector store - if docs_to_index: - vector_store.add_documents(docs_to_index, ids=uids, batch_size=batch_size) - num_added += len(docs_to_index) - len(seen_docs) - num_updated += len(seen_docs) - - # And only then update the record store. - # Update ALL records, even if they already exist since we want to refresh - # their timestamp. - record_manager.update( - [doc.uid for doc in hashed_docs], - group_ids=source_ids, - time_at_least=index_start_dt, - ) - - # If source IDs are provided, we can do the deletion incrementally! - if cleanup == "incremental": - # Get the uids of the documents that were not returned by the loader. - - # mypy isn't good enough to determine that source ids cannot be None - # here due to a check that's happening above, so we check again. - for source_id in source_ids: - if source_id is None: - raise AssertionError("Source ids cannot be None here.") - - _source_ids = cast(Sequence[str], source_ids) - - uids_to_delete = record_manager.list_keys( - group_ids=_source_ids, before=index_start_dt - ) - if uids_to_delete: - # Then delete from vector store. - vector_store.delete(uids_to_delete) - # First delete from record store. - record_manager.delete_keys(uids_to_delete) - num_deleted += len(uids_to_delete) - - if cleanup == "full": - while uids_to_delete := record_manager.list_keys( - before=index_start_dt, limit=cleanup_batch_size - ): - # First delete from record store. - vector_store.delete(uids_to_delete) - # Then delete from record manager. - record_manager.delete_keys(uids_to_delete) - num_deleted += len(uids_to_delete) - - return { - "num_added": num_added, - "num_updated": num_updated, - "num_skipped": num_skipped, - "num_deleted": num_deleted, - } - - -# Define an asynchronous generator function -async def _to_async_iterator(iterator: Iterable[T]) -> AsyncIterator[T]: - """Convert an iterable to an async iterator.""" - for item in iterator: - yield item - - -async def aindex( - docs_source: Union[BaseLoader, Iterable[Document], AsyncIterator[Document]], - record_manager: RecordManager, - vector_store: VectorStore, - *, - batch_size: int = 100, - cleanup: Literal["incremental", "full", None] = None, - source_id_key: Union[str, Callable[[Document], str], None] = None, - cleanup_batch_size: int = 1_000, - force_update: bool = False, -) -> IndexingResult: - """Index data from the loader into the vector store. - - Indexing functionality uses a manager to keep track of which documents - are in the vector store. - - This allows us to keep track of which documents were updated, and which - documents were deleted, which documents should be skipped. - - For the time being, documents are indexed using their hashes, and users - are not able to specify the uid of the document. - - IMPORTANT: - if auto_cleanup is set to True, the loader should be returning - the entire dataset, and not just a subset of the dataset. - Otherwise, the auto_cleanup will remove documents that it is not - supposed to. - - Args: - docs_source: Data loader or iterable of documents to index. - record_manager: Timestamped set to keep track of which documents were - updated. - vector_store: Vector store to index the documents into. - batch_size: Batch size to use when indexing. - cleanup: How to handle clean up of documents. - - Incremental: Cleans up all documents that haven't been updated AND - that are associated with source ids that were seen - during indexing. - Clean up is done continuously during indexing helping - to minimize the probability of users seeing duplicated - content. - - Full: Delete all documents that haven to been returned by the loader. - Clean up runs after all documents have been indexed. - This means that users may see duplicated content during indexing. - - None: Do not delete any documents. - source_id_key: Optional key that helps identify the original source - of the document. - cleanup_batch_size: Batch size to use when cleaning up documents. - force_update: Force update documents even if they are present in the - record manager. Useful if you are re-indexing with updated embeddings. - - Returns: - Indexing result which contains information about how many documents - were added, updated, deleted, or skipped. - """ - - if cleanup not in {"incremental", "full", None}: - raise ValueError( - f"cleanup should be one of 'incremental', 'full' or None. " - f"Got {cleanup}." - ) - - if cleanup == "incremental" and source_id_key is None: - raise ValueError("Source id key is required when cleanup mode is incremental.") - - # Check that the Vectorstore has required methods implemented - methods = ["adelete", "aadd_documents"] - - for method in methods: - if not hasattr(vector_store, method): - raise ValueError( - f"Vectorstore {vector_store} does not have required method {method}" - ) - - if type(vector_store).adelete == VectorStore.adelete: - # Checking if the vectorstore has overridden the default delete method - # implementation which just raises a NotImplementedError - raise ValueError("Vectorstore has not implemented the delete method") - - async_doc_iterator: AsyncIterator[Document] - if isinstance(docs_source, BaseLoader): - try: - async_doc_iterator = docs_source.alazy_load() - except NotImplementedError: - # Exception triggered when neither lazy_load nor alazy_load are implemented. - # * The default implementation of alazy_load uses lazy_load. - # * The default implementation of lazy_load raises NotImplementedError. - # In such a case, we use the load method and convert it to an async - # iterator. - async_doc_iterator = _to_async_iterator(docs_source.load()) - else: - if hasattr(docs_source, "__aiter__"): - async_doc_iterator = docs_source # type: ignore[assignment] - else: - async_doc_iterator = _to_async_iterator(docs_source) - - source_id_assigner = _get_source_id_assigner(source_id_key) - - # Mark when the update started. - index_start_dt = await record_manager.aget_time() - num_added = 0 - num_skipped = 0 - num_updated = 0 - num_deleted = 0 - - async for doc_batch in _abatch(batch_size, async_doc_iterator): - hashed_docs = list( - _deduplicate_in_order( - [_HashedDocument.from_document(doc) for doc in doc_batch] - ) - ) - - source_ids: Sequence[Optional[str]] = [ - source_id_assigner(doc) for doc in hashed_docs - ] - - if cleanup == "incremental": - # If the cleanup mode is incremental, source ids are required. - for source_id, hashed_doc in zip(source_ids, hashed_docs): - if source_id is None: - raise ValueError( - "Source ids are required when cleanup mode is incremental. " - f"Document that starts with " - f"content: {hashed_doc.page_content[:100]} was not assigned " - f"as source id." - ) - # source ids cannot be None after for loop above. - source_ids = cast(Sequence[str], source_ids) - - exists_batch = await record_manager.aexists([doc.uid for doc in hashed_docs]) - - # Filter out documents that already exist in the record store. - uids: list[str] = [] - docs_to_index: list[Document] = [] - uids_to_refresh = [] - seen_docs: Set[str] = set() - for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): - if doc_exists: - if force_update: - seen_docs.add(hashed_doc.uid) - else: - uids_to_refresh.append(hashed_doc.uid) - continue - uids.append(hashed_doc.uid) - docs_to_index.append(hashed_doc.to_document()) - - if uids_to_refresh: - # Must be updated to refresh timestamp. - await record_manager.aupdate(uids_to_refresh, time_at_least=index_start_dt) - num_skipped += len(uids_to_refresh) - - # Be pessimistic and assume that all vector store write will fail. - # First write to vector store - if docs_to_index: - await vector_store.aadd_documents( - docs_to_index, ids=uids, batch_size=batch_size - ) - num_added += len(docs_to_index) - len(seen_docs) - num_updated += len(seen_docs) - - # And only then update the record store. - # Update ALL records, even if they already exist since we want to refresh - # their timestamp. - await record_manager.aupdate( - [doc.uid for doc in hashed_docs], - group_ids=source_ids, - time_at_least=index_start_dt, - ) - - # If source IDs are provided, we can do the deletion incrementally! - - if cleanup == "incremental": - # Get the uids of the documents that were not returned by the loader. - - # mypy isn't good enough to determine that source ids cannot be None - # here due to a check that's happening above, so we check again. - for source_id in source_ids: - if source_id is None: - raise AssertionError("Source ids cannot be None here.") - - _source_ids = cast(Sequence[str], source_ids) - - uids_to_delete = await record_manager.alist_keys( - group_ids=_source_ids, before=index_start_dt - ) - if uids_to_delete: - # Then delete from vector store. - await vector_store.adelete(uids_to_delete) - # First delete from record store. - await record_manager.adelete_keys(uids_to_delete) - num_deleted += len(uids_to_delete) - - if cleanup == "full": - while uids_to_delete := await record_manager.alist_keys( - before=index_start_dt, limit=cleanup_batch_size - ): - # First delete from record store. - await vector_store.adelete(uids_to_delete) - # Then delete from record manager. - await record_manager.adelete_keys(uids_to_delete) - num_deleted += len(uids_to_delete) - - return { - "num_added": num_added, - "num_updated": num_updated, - "num_skipped": num_skipped, - "num_deleted": num_deleted, - } +# Please do not use these in your application. These are private APIs. +# Here to avoid changing unit tests during a migration. +__all__ = ["_HashedDocument", "_abatch", "_batch"] diff --git a/libs/langchain/langchain/indexes/_sql_record_manager.py b/libs/langchain/langchain/indexes/_sql_record_manager.py index 435f34e11cc..a61be7776c9 100644 --- a/libs/langchain/langchain/indexes/_sql_record_manager.py +++ b/libs/langchain/langchain/indexes/_sql_record_manager.py @@ -18,6 +18,7 @@ import decimal import uuid from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence, Union +from langchain_core.indexing import RecordManager from sqlalchemy import ( URL, Column, @@ -41,8 +42,6 @@ from sqlalchemy.ext.asyncio import ( from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Query, Session, sessionmaker -from langchain.indexes.base import RecordManager - Base = declarative_base() diff --git a/libs/partners/ai21/tests/integration_tests/test_standard.py b/libs/partners/ai21/tests/integration_tests/test_standard.py index 5c62b025999..2070ba3a701 100644 --- a/libs/partners/ai21/tests/integration_tests/test_standard.py +++ b/libs/partners/ai21/tests/integration_tests/test_standard.py @@ -19,3 +19,25 @@ class TestAI21Standard(ChatModelIntegrationTests): return { "model": "j2-ultra", } + + @pytest.mark.xfail(reason="Emits AIMessage instead of AIMessageChunk.") + def test_stream( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + ) -> None: + super().test_stream( + chat_model_class, + chat_model_params, + ) + + @pytest.mark.xfail(reason="Emits AIMessage instead of AIMessageChunk.") + async def test_astream( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + ) -> None: + await super().test_astream( + chat_model_class, + chat_model_params, + ) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 5b2c715029a..29a75ba9bdd 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -580,7 +580,6 @@ class ChatAnthropic(BaseChatModel): formatted_tools = [convert_to_anthropic_tool(tool) for tool in tools] return self.bind(tools=formatted_tools, **kwargs) - @beta() def with_structured_output( self, schema: Union[Dict, Type[BaseModel]], diff --git a/libs/partners/fireworks/langchain_fireworks/chat_models.py b/libs/partners/fireworks/langchain_fireworks/chat_models.py index fc5960eea98..52dea080894 100644 --- a/libs/partners/fireworks/langchain_fireworks/chat_models.py +++ b/libs/partners/fireworks/langchain_fireworks/chat_models.py @@ -24,7 +24,6 @@ from typing import ( ) from fireworks.client import AsyncFireworks, Fireworks # type: ignore -from langchain_core._api import beta from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -671,7 +670,6 @@ class ChatFireworks(BaseChatModel): kwargs["tool_choice"] = tool_choice return super().bind(tools=formatted_tools, **kwargs) - @beta() def with_structured_output( self, schema: Optional[Union[Dict, Type[BaseModel]]] = None, diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py index 61fecd7b461..86db80e8e22 100644 --- a/libs/partners/groq/langchain_groq/chat_models.py +++ b/libs/partners/groq/langchain_groq/chat_models.py @@ -23,7 +23,6 @@ from typing import ( cast, ) -from langchain_core._api import beta from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -469,7 +468,7 @@ class ChatGroq(BaseChatModel): token_usage = output["token_usage"] if token_usage is not None: for k, v in token_usage.items(): - if k in overall_token_usage: + if k in overall_token_usage and v is not None: overall_token_usage[k] += v else: overall_token_usage[k] = v @@ -595,7 +594,6 @@ class ChatGroq(BaseChatModel): kwargs["tool_choice"] = tool_choice return super().bind(tools=formatted_tools, **kwargs) - @beta() def with_structured_output( self, schema: Optional[Union[Dict, Type[BaseModel]]] = None, diff --git a/libs/partners/groq/poetry.lock b/libs/partners/groq/poetry.lock index 6fb18686953..8082af3019f 100644 --- a/libs/partners/groq/poetry.lock +++ b/libs/partners/groq/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -16,13 +16,13 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "4.2.0" +version = "4.3.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, - {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, + {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, + {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, ] [package.dependencies] @@ -38,13 +38,13 @@ trio = ["trio (>=0.23)"] [[package]] name = "certifi" -version = "2023.11.17" +version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] [[package]] @@ -176,24 +176,24 @@ files = [ [[package]] name = "distro" -version = "1.8.0" +version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" files = [ - {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, - {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, ] [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -201,13 +201,13 @@ test = ["pytest (>=6)"] [[package]] name = "groq" -version = "0.4.1" +version = "0.5.0" description = "The official Python library for the groq API" optional = false python-versions = ">=3.7" files = [ - {file = "groq-0.4.1-py3-none-any.whl", hash = "sha256:2939ff96e3fc633416e5d9ab26bbfd63c70b2226338a507f8c2b0b3aa27c9dec"}, - {file = "groq-0.4.1.tar.gz", hash = "sha256:f2285c0a7d64abefcdec3d61e8bc1a61ff04d887ed30b991ac7fe53ae1e10251"}, + {file = "groq-0.5.0-py3-none-any.whl", hash = "sha256:a7e6be1118bcdfea3ed071ec00f505a34d4e6ec28c435adb5a5afd33545683a1"}, + {file = "groq-0.5.0.tar.gz", hash = "sha256:d476cdc3383b45d2a4dc1876142a9542e663ea1029f9e07a05de24f895cae48c"}, ] [package.dependencies] @@ -231,13 +231,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.2" +version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"}, - {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"}, + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, ] [package.dependencies] @@ -248,17 +248,17 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.23.0)"] +trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.25.2" +version = "0.27.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, - {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, ] [package.dependencies] @@ -276,13 +276,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -323,7 +323,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.1.42" +version = "0.1.45" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -364,16 +364,17 @@ url = "../../standard-tests" [[package]] name = "langsmith" -version = "0.1.4" +version = "0.1.50" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.4-py3-none-any.whl", hash = "sha256:13ea90c030a3ef472e00f4dd31b9c89f165f98f0c870309eca3366c93fcaa29f"}, - {file = "langsmith-0.1.4.tar.gz", hash = "sha256:b45ea1001f67c4c233b3521eb578326863e32e0eb738e52900c035261deec368"}, + {file = "langsmith-0.1.50-py3-none-any.whl", hash = "sha256:a81e9809fcaa277bfb314d729e58116554f186d1478fcfdf553b1c2ccce54b85"}, + {file = "langsmith-0.1.50.tar.gz", hash = "sha256:9fd22df8c689c044058536ea5af66f5302067e7551b60d7a335fede8d479572b"}, ] [package.dependencies] +orjson = ">=3.9.14,<4.0.0" pydantic = ">=1,<3" requests = ">=2,<3" @@ -438,6 +439,66 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "orjson" +version = "3.10.1" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8ec2fc456d53ea4a47768f622bb709be68acd455b0c6be57e91462259741c4f3"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e900863691d327758be14e2a491931605bd0aded3a21beb6ce133889830b659"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab6ecbd6fe57785ebc86ee49e183f37d45f91b46fc601380c67c5c5e9c0014a2"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af7c68b01b876335cccfb4eee0beef2b5b6eae1945d46a09a7c24c9faac7a77"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:915abfb2e528677b488a06eba173e9d7706a20fdfe9cdb15890b74ef9791b85e"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe3fd4a36eff9c63d25503b439531d21828da9def0059c4f472e3845a081aa0b"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d229564e72cfc062e6481a91977a5165c5a0fdce11ddc19ced8471847a67c517"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9e00495b18304173ac843b5c5fbea7b6f7968564d0d49bef06bfaeca4b656f4e"}, + {file = "orjson-3.10.1-cp310-none-win32.whl", hash = "sha256:fd78ec55179545c108174ba19c1795ced548d6cac4d80d014163033c047ca4ea"}, + {file = "orjson-3.10.1-cp310-none-win_amd64.whl", hash = "sha256:50ca42b40d5a442a9e22eece8cf42ba3d7cd4cd0f2f20184b4d7682894f05eec"}, + {file = "orjson-3.10.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b345a3d6953628df2f42502297f6c1e1b475cfbf6268013c94c5ac80e8abc04c"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caa7395ef51af4190d2c70a364e2f42138e0e5fcb4bc08bc9b76997659b27dab"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b01d701decd75ae092e5f36f7b88a1e7a1d3bb7c9b9d7694de850fb155578d5a"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b5028981ba393f443d8fed9049211b979cadc9d0afecf162832f5a5b152c6297"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31ff6a222ea362b87bf21ff619598a4dc1106aaafaea32b1c4876d692891ec27"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e852a83d7803d3406135fb7a57cf0c1e4a3e73bac80ec621bd32f01c653849c5"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2567bc928ed3c3fcd90998009e8835de7c7dc59aabcf764b8374d36044864f3b"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4ce98cac60b7bb56457bdd2ed7f0d5d7f242d291fdc0ca566c83fa721b52e92d"}, + {file = "orjson-3.10.1-cp311-none-win32.whl", hash = "sha256:813905e111318acb356bb8029014c77b4c647f8b03f314e7b475bd9ce6d1a8ce"}, + {file = "orjson-3.10.1-cp311-none-win_amd64.whl", hash = "sha256:03a3ca0b3ed52bed1a869163a4284e8a7b0be6a0359d521e467cdef7e8e8a3ee"}, + {file = "orjson-3.10.1-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f02c06cee680b1b3a8727ec26c36f4b3c0c9e2b26339d64471034d16f74f4ef5"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1aa2f127ac546e123283e437cc90b5ecce754a22306c7700b11035dad4ccf85"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2cf29b4b74f585225196944dffdebd549ad2af6da9e80db7115984103fb18a96"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1b130c20b116f413caf6059c651ad32215c28500dce9cd029a334a2d84aa66f"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d31f9a709e6114492136e87c7c6da5e21dfedebefa03af85f3ad72656c493ae9"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d1d169461726f271ab31633cf0e7e7353417e16fb69256a4f8ecb3246a78d6e"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57c294d73825c6b7f30d11c9e5900cfec9a814893af7f14efbe06b8d0f25fba9"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d7f11dbacfa9265ec76b4019efffabaabba7a7ebf14078f6b4df9b51c3c9a8ea"}, + {file = "orjson-3.10.1-cp312-none-win32.whl", hash = "sha256:d89e5ed68593226c31c76ab4de3e0d35c760bfd3fbf0a74c4b2be1383a1bf123"}, + {file = "orjson-3.10.1-cp312-none-win_amd64.whl", hash = "sha256:aa76c4fe147fd162107ce1692c39f7189180cfd3a27cfbc2ab5643422812da8e"}, + {file = "orjson-3.10.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a2c6a85c92d0e494c1ae117befc93cf8e7bca2075f7fe52e32698da650b2c6d1"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9813f43da955197d36a7365eb99bed42b83680801729ab2487fef305b9ced866"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec917b768e2b34b7084cb6c68941f6de5812cc26c6f1a9fecb728e36a3deb9e8"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5252146b3172d75c8a6d27ebca59c9ee066ffc5a277050ccec24821e68742fdf"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:536429bb02791a199d976118b95014ad66f74c58b7644d21061c54ad284e00f4"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dfed3c3e9b9199fb9c3355b9c7e4649b65f639e50ddf50efdf86b45c6de04b5"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2b230ec35f188f003f5b543644ae486b2998f6afa74ee3a98fc8ed2e45960afc"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:01234249ba19c6ab1eb0b8be89f13ea21218b2d72d496ef085cfd37e1bae9dd8"}, + {file = "orjson-3.10.1-cp38-none-win32.whl", hash = "sha256:8a884fbf81a3cc22d264ba780920d4885442144e6acaa1411921260416ac9a54"}, + {file = "orjson-3.10.1-cp38-none-win_amd64.whl", hash = "sha256:dab5f802d52b182163f307d2b1f727d30b1762e1923c64c9c56dd853f9671a49"}, + {file = "orjson-3.10.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a51fd55d4486bc5293b7a400f9acd55a2dc3b5fc8420d5ffe9b1d6bb1a056a5e"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53521542a6db1411b3bfa1b24ddce18605a3abdc95a28a67b33f9145f26aa8f2"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27d610df96ac18ace4931411d489637d20ab3b8f63562b0531bba16011998db0"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79244b1456e5846d44e9846534bd9e3206712936d026ea8e6a55a7374d2c0694"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d751efaa8a49ae15cbebdda747a62a9ae521126e396fda8143858419f3b03610"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27ff69c620a4fff33267df70cfd21e0097c2a14216e72943bd5414943e376d77"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ebc58693464146506fde0c4eb1216ff6d4e40213e61f7d40e2f0dde9b2f21650"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5be608c3972ed902e0143a5b8776d81ac1059436915d42defe5c6ae97b3137a4"}, + {file = "orjson-3.10.1-cp39-none-win32.whl", hash = "sha256:4ae10753e7511d359405aadcbf96556c86e9dbf3a948d26c2c9f9a150c52b091"}, + {file = "orjson-3.10.1-cp39-none-win_amd64.whl", hash = "sha256:fb5bc4caa2c192077fdb02dce4e5ef8639e7f20bec4e3a834346693907362932"}, + {file = "orjson-3.10.1.tar.gz", hash = "sha256:a883b28d73370df23ed995c466b4f6c708c1f7a9bdc400fe89165c96c7603204"}, +] + [[package]] name = "packaging" version = "23.2" @@ -451,13 +512,13 @@ files = [ [[package]] name = "pluggy" -version = "1.3.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -466,18 +527,18 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.5.2" +version = "2.7.1" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-2.5.2-py3-none-any.whl", hash = "sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0"}, - {file = "pydantic-2.5.2.tar.gz", hash = "sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd"}, + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.14.5" +pydantic-core = "2.18.2" typing-extensions = ">=4.6.1" [package.extras] @@ -485,116 +546,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.14.5" -description = "" +version = "2.18.2" +description = "Core functionality for Pydantic validation and serialization" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.14.5-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd"}, - {file = "pydantic_core-2.14.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66"}, - {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997"}, - {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093"}, - {file = "pydantic_core-2.14.5-cp310-none-win32.whl", hash = "sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720"}, - {file = "pydantic_core-2.14.5-cp310-none-win_amd64.whl", hash = "sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b"}, - {file = "pydantic_core-2.14.5-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459"}, - {file = "pydantic_core-2.14.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4"}, - {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada"}, - {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda"}, - {file = "pydantic_core-2.14.5-cp311-none-win32.whl", hash = "sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651"}, - {file = "pydantic_core-2.14.5-cp311-none-win_amd64.whl", hash = "sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077"}, - {file = "pydantic_core-2.14.5-cp311-none-win_arm64.whl", hash = "sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf"}, - {file = "pydantic_core-2.14.5-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093"}, - {file = "pydantic_core-2.14.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e"}, - {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69"}, - {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d"}, - {file = "pydantic_core-2.14.5-cp312-none-win32.whl", hash = "sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260"}, - {file = "pydantic_core-2.14.5-cp312-none-win_amd64.whl", hash = "sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36"}, - {file = "pydantic_core-2.14.5-cp312-none-win_arm64.whl", hash = "sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325"}, - {file = "pydantic_core-2.14.5-cp37-none-win32.whl", hash = "sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405"}, - {file = "pydantic_core-2.14.5-cp37-none-win_amd64.whl", hash = "sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588"}, - {file = "pydantic_core-2.14.5-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf"}, - {file = "pydantic_core-2.14.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b"}, - {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec"}, - {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124"}, - {file = "pydantic_core-2.14.5-cp38-none-win32.whl", hash = "sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867"}, - {file = "pydantic_core-2.14.5-cp38-none-win_amd64.whl", hash = "sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d"}, - {file = "pydantic_core-2.14.5-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7"}, - {file = "pydantic_core-2.14.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955"}, - {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5"}, - {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209"}, - {file = "pydantic_core-2.14.5-cp39-none-win32.whl", hash = "sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6"}, - {file = "pydantic_core-2.14.5-cp39-none-win_amd64.whl", hash = "sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3"}, - {file = "pydantic_core-2.14.5.tar.gz", hash = "sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, ] [package.dependencies] @@ -602,13 +637,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pytest" -version = "7.4.3" +version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, - {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, ] [package.dependencies] @@ -642,30 +677,30 @@ testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy [[package]] name = "pytest-mock" -version = "3.12.0" +version = "3.14.0" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, - {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, ] [package.dependencies] -pytest = ">=5.0" +pytest = ">=6.2.5" [package.extras] dev = ["pre-commit", "pytest-asyncio", "tox"] [[package]] name = "pytest-watcher" -version = "0.3.4" +version = "0.3.5" description = "Automatically rerun your tests on file modifications" optional = false python-versions = ">=3.7.0,<4.0.0" files = [ - {file = "pytest_watcher-0.3.4-py3-none-any.whl", hash = "sha256:edd2bd9c8a1fb14d48c9f4947234065eb9b4c1acedc0bf213b1f12501dfcffd3"}, - {file = "pytest_watcher-0.3.4.tar.gz", hash = "sha256:d39491ba15b589221bb9a78ef4bed3d5d1503aed08209b1a138aeb95b9117a18"}, + {file = "pytest_watcher-0.3.5-py3-none-any.whl", hash = "sha256:af00ca52c7be22dc34c0fd3d7ffef99057207a73b05dc5161fe3b2fe91f58130"}, + {file = "pytest_watcher-0.3.5.tar.gz", hash = "sha256:8896152460ba2b1a8200c12117c6611008ec96c8b2d811f0a05ab8a82b043ff8"}, ] [package.dependencies] @@ -755,39 +790,39 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.1.8" +version = "0.1.15" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.8-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7de792582f6e490ae6aef36a58d85df9f7a0cfd1b0d4fe6b4fb51803a3ac96fa"}, - {file = "ruff-0.1.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c8e3255afd186c142eef4ec400d7826134f028a85da2146102a1172ecc7c3696"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff78a7583020da124dd0deb835ece1d87bb91762d40c514ee9b67a087940528b"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd8ee69b02e7bdefe1e5da2d5b6eaaddcf4f90859f00281b2333c0e3a0cc9cd6"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a05b0ddd7ea25495e4115a43125e8a7ebed0aa043c3d432de7e7d6e8e8cd6448"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e6f08ca730f4dc1b76b473bdf30b1b37d42da379202a059eae54ec7fc1fbcfed"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f35960b02df6b827c1b903091bb14f4b003f6cf102705efc4ce78132a0aa5af3"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d076717c67b34c162da7c1a5bda16ffc205e0e0072c03745275e7eab888719f"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6a21ab023124eafb7cef6d038f835cb1155cd5ea798edd8d9eb2f8b84be07d9"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ce697c463458555027dfb194cb96d26608abab920fa85213deb5edf26e026664"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:db6cedd9ffed55548ab313ad718bc34582d394e27a7875b4b952c2d29c001b26"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:05ffe9dbd278965271252704eddb97b4384bf58b971054d517decfbf8c523f05"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5daaeaf00ae3c1efec9742ff294b06c3a2a9db8d3db51ee4851c12ad385cda30"}, - {file = "ruff-0.1.8-py3-none-win32.whl", hash = "sha256:e49fbdfe257fa41e5c9e13c79b9e79a23a79bd0e40b9314bc53840f520c2c0b3"}, - {file = "ruff-0.1.8-py3-none-win_amd64.whl", hash = "sha256:f41f692f1691ad87f51708b823af4bb2c5c87c9248ddd3191c8f088e66ce590a"}, - {file = "ruff-0.1.8-py3-none-win_arm64.whl", hash = "sha256:aa8ee4f8440023b0a6c3707f76cadce8657553655dcbb5fc9b2f9bb9bee389f6"}, - {file = "ruff-0.1.8.tar.gz", hash = "sha256:f7ee467677467526cfe135eab86a40a0e8db43117936ac4f9b469ce9cdb3fb62"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, + {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, + {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, + {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, + {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, ] [[package]] name = "sniffio" -version = "1.3.0" +version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" files = [ - {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, - {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] [[package]] @@ -817,65 +852,68 @@ files = [ [[package]] name = "typing-extensions" -version = "4.9.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, - {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] [[package]] name = "urllib3" -version = "2.1.0" +version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "watchdog" -version = "3.0.0" +version = "4.0.0" description = "Filesystem events monitoring" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, - {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, - {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, - {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, - {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, - {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, - {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, - {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, - {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, ] [package.extras] @@ -884,4 +922,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "1692a375c2817216876453275294e5aa2500364b7e36ae2b4b0ec1fe1837402e" +content-hash = "f7c1b8895e465d5faf7988bf946b0fa95509835b95629f442ad57adb116022df" diff --git a/libs/partners/groq/pyproject.toml b/libs/partners/groq/pyproject.toml index 16bfa90a1ff..eb9b632a659 100644 --- a/libs/partners/groq/pyproject.toml +++ b/libs/partners/groq/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-groq" -version = "0.1.2" +version = "0.1.3" description = "An integration package connecting Groq and LangChain" authors = [] readme = "README.md" @@ -12,7 +12,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -langchain-core = "^0.1.42" +langchain-core = "^0.1.45" groq = ">=0.4.1,<1" [tool.poetry.group.test] @@ -24,7 +24,7 @@ pytest-mock = "^3.10.0" pytest-watcher = "^0.3.4" pytest-asyncio = "^0.21.1" langchain-core = { path = "../../core", develop = true } -langchain-standard-tests = {path = "../../standard-tests", develop = true} +langchain-standard-tests = { path = "../../standard-tests", develop = true } [tool.poetry.group.codespell] optional = true @@ -89,7 +89,7 @@ markers = [ filterwarnings = [ "error", 'ignore::ResourceWarning:', - 'ignore:The function `with_structured_output` is in beta', + 'ignore:The method `ChatGroq.with_structured_output` is in beta', # Maintain support for pydantic 1.X 'default:The `dict` method is deprecated; use `model_dump` instead:DeprecationWarning', ] diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index 83ca841caa2..4048f7e8f6a 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -13,3 +13,14 @@ class TestMistralStandard(ChatModelIntegrationTests): @pytest.fixture def chat_model_class(self) -> Type[BaseChatModel]: return ChatGroq + + @pytest.mark.xfail(reason="Not yet implemented.") + def test_tool_message_histories_list_content( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + chat_model_has_tool_calling: bool, + ) -> None: + super().test_tool_message_histories_list_content( + chat_model_class, chat_model_params, chat_model_has_tool_calling + ) diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index ab3027c94d9..a00943d2c00 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -22,7 +22,6 @@ from typing import ( import httpx from httpx_sse import EventSource, aconnect_sse, connect_sse -from langchain_core._api import beta from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -588,7 +587,6 @@ class ChatMistralAI(BaseChatModel): formatted_tools = [convert_to_openai_tool(tool) for tool in tools] return super().bind(tools=formatted_tools, **kwargs) - @beta() def with_structured_output( self, schema: Union[Dict, Type[BaseModel]], diff --git a/libs/partners/mistralai/langchain_mistralai/embeddings.py b/libs/partners/mistralai/langchain_mistralai/embeddings.py index 4a6c28b753d..2003cd7e43c 100644 --- a/libs/partners/mistralai/langchain_mistralai/embeddings.py +++ b/libs/partners/mistralai/langchain_mistralai/embeddings.py @@ -1,5 +1,6 @@ import asyncio import logging +import warnings from typing import Dict, Iterable, List, Optional import httpx @@ -19,6 +20,13 @@ logger = logging.getLogger(__name__) MAX_TOKENS = 16_000 +class DummyTokenizer: + """Dummy tokenizer for when tokenizer cannot be accessed (e.g., via Huggingface)""" + + def encode_batch(self, texts: List[str]) -> List[List[str]]: + return [list(text) for text in texts] + + class MistralAIEmbeddings(BaseModel, Embeddings): """MistralAI embedding models. @@ -83,9 +91,18 @@ class MistralAIEmbeddings(BaseModel, Embeddings): timeout=values["timeout"], ) if values["tokenizer"] is None: - values["tokenizer"] = Tokenizer.from_pretrained( - "mistralai/Mixtral-8x7B-v0.1" - ) + try: + values["tokenizer"] = Tokenizer.from_pretrained( + "mistralai/Mixtral-8x7B-v0.1" + ) + except IOError: # huggingface_hub GatedRepoError + warnings.warn( + "Could not download mistral tokenizer from Huggingface for " + "calculating batch sizes. Set a Huggingface token via the " + "HF_TOKEN environment variable to download the real tokenizer. " + "Falling back to a dummy tokenizer that uses `len()`." + ) + values["tokenizer"] = DummyTokenizer() return values def _get_batches(self, texts: List[str]) -> Iterable[List[str]]: @@ -100,7 +117,10 @@ class MistralAIEmbeddings(BaseModel, Embeddings): for text, text_tokens in zip(texts, text_token_lengths): if batch_tokens + text_tokens > MAX_TOKENS: - yield batch + if len(batch) > 0: + # edge case where first batch exceeds max tokens + # should not yield an empty batch. + yield batch batch = [text] batch_tokens = text_tokens else: diff --git a/libs/partners/mistralai/poetry.lock b/libs/partners/mistralai/poetry.lock index a381bec597e..0d2b2118573 100644 --- a/libs/partners/mistralai/poetry.lock +++ b/libs/partners/mistralai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -176,13 +176,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -342,13 +342,13 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -389,7 +389,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.1.42" +version = "0.1.45" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -430,13 +430,13 @@ url = "../../standard-tests" [[package]] name = "langsmith" -version = "0.1.42" +version = "0.1.50" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.42-py3-none-any.whl", hash = "sha256:1101c3b5cbd9e8d65471f32fbb99736403f1bc30954fdd233b2991a40c65aa03"}, - {file = "langsmith-0.1.42.tar.gz", hash = "sha256:e41236fd043c83a39329913ec607ae31cd46dad78a09c4924eab4a29e954da17"}, + {file = "langsmith-0.1.50-py3-none-any.whl", hash = "sha256:a81e9809fcaa277bfb314d729e58116554f186d1478fcfdf553b1c2ccce54b85"}, + {file = "langsmith-0.1.50.tar.gz", hash = "sha256:9fd22df8c689c044058536ea5af66f5302067e7551b60d7a335fede8d479572b"}, ] [package.dependencies] @@ -507,62 +507,62 @@ files = [ [[package]] name = "orjson" -version = "3.10.0" +version = "3.10.1" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.0-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:47af5d4b850a2d1328660661f0881b67fdbe712aea905dadd413bdea6f792c33"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c90681333619d78360d13840c7235fdaf01b2b129cb3a4f1647783b1971542b6"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:400c5b7c4222cb27b5059adf1fb12302eebcabf1978f33d0824aa5277ca899bd"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5dcb32e949eae80fb335e63b90e5808b4b0f64e31476b3777707416b41682db5"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7d507c7493252c0a0264b5cc7e20fa2f8622b8a83b04d819b5ce32c97cf57b"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e286a51def6626f1e0cc134ba2067dcf14f7f4b9550f6dd4535fd9d79000040b"}, - {file = "orjson-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8acd4b82a5f3a3ec8b1dc83452941d22b4711964c34727eb1e65449eead353ca"}, - {file = "orjson-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:30707e646080dd3c791f22ce7e4a2fc2438765408547c10510f1f690bd336217"}, - {file = "orjson-3.10.0-cp310-none-win32.whl", hash = "sha256:115498c4ad34188dcb73464e8dc80e490a3e5e88a925907b6fedcf20e545001a"}, - {file = "orjson-3.10.0-cp310-none-win_amd64.whl", hash = "sha256:6735dd4a5a7b6df00a87d1d7a02b84b54d215fb7adac50dd24da5997ffb4798d"}, - {file = "orjson-3.10.0-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9587053e0cefc284e4d1cd113c34468b7d3f17666d22b185ea654f0775316a26"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bef1050b1bdc9ea6c0d08468e3e61c9386723633b397e50b82fda37b3563d72"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d16c6963ddf3b28c0d461641517cd312ad6b3cf303d8b87d5ef3fa59d6844337"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4251964db47ef090c462a2d909f16c7c7d5fe68e341dabce6702879ec26d1134"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73bbbdc43d520204d9ef0817ac03fa49c103c7f9ea94f410d2950755be2c349c"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:414e5293b82373606acf0d66313aecb52d9c8c2404b1900683eb32c3d042dbd7"}, - {file = "orjson-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:feaed5bb09877dc27ed0d37f037ddef6cb76d19aa34b108db270d27d3d2ef747"}, - {file = "orjson-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5127478260db640323cea131ee88541cb1a9fbce051f0b22fa2f0892f44da302"}, - {file = "orjson-3.10.0-cp311-none-win32.whl", hash = "sha256:b98345529bafe3c06c09996b303fc0a21961820d634409b8639bc16bd4f21b63"}, - {file = "orjson-3.10.0-cp311-none-win_amd64.whl", hash = "sha256:658ca5cee3379dd3d37dbacd43d42c1b4feee99a29d847ef27a1cb18abdfb23f"}, - {file = "orjson-3.10.0-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4329c1d24fd130ee377e32a72dc54a3c251e6706fccd9a2ecb91b3606fddd998"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef0f19fdfb6553342b1882f438afd53c7cb7aea57894c4490c43e4431739c700"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4f60db24161534764277f798ef53b9d3063092f6d23f8f962b4a97edfa997a0"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1de3fd5c7b208d836f8ecb4526995f0d5877153a4f6f12f3e9bf11e49357de98"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f93e33f67729d460a177ba285002035d3f11425ed3cebac5f6ded4ef36b28344"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:237ba922aef472761acd697eef77fef4831ab769a42e83c04ac91e9f9e08fa0e"}, - {file = "orjson-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98c1bfc6a9bec52bc8f0ab9b86cc0874b0299fccef3562b793c1576cf3abb570"}, - {file = "orjson-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:30d795a24be16c03dca0c35ca8f9c8eaaa51e3342f2c162d327bd0225118794a"}, - {file = "orjson-3.10.0-cp312-none-win32.whl", hash = "sha256:6a3f53dc650bc860eb26ec293dfb489b2f6ae1cbfc409a127b01229980e372f7"}, - {file = "orjson-3.10.0-cp312-none-win_amd64.whl", hash = "sha256:983db1f87c371dc6ffc52931eb75f9fe17dc621273e43ce67bee407d3e5476e9"}, - {file = "orjson-3.10.0-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9a667769a96a72ca67237224a36faf57db0c82ab07d09c3aafc6f956196cfa1b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade1e21dfde1d37feee8cf6464c20a2f41fa46c8bcd5251e761903e46102dc6b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:23c12bb4ced1c3308eff7ba5c63ef8f0edb3e4c43c026440247dd6c1c61cea4b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2d014cf8d4dc9f03fc9f870de191a49a03b1bcda51f2a957943fb9fafe55aac"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eadecaa16d9783affca33597781328e4981b048615c2ddc31c47a51b833d6319"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd583341218826f48bd7c6ebf3310b4126216920853cbc471e8dbeaf07b0b80e"}, - {file = "orjson-3.10.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:90bfc137c75c31d32308fd61951d424424426ddc39a40e367704661a9ee97095"}, - {file = "orjson-3.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13b5d3c795b09a466ec9fcf0bd3ad7b85467d91a60113885df7b8d639a9d374b"}, - {file = "orjson-3.10.0-cp38-none-win32.whl", hash = "sha256:5d42768db6f2ce0162544845facb7c081e9364a5eb6d2ef06cd17f6050b048d8"}, - {file = "orjson-3.10.0-cp38-none-win_amd64.whl", hash = "sha256:33e6655a2542195d6fd9f850b428926559dee382f7a862dae92ca97fea03a5ad"}, - {file = "orjson-3.10.0-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4050920e831a49d8782a1720d3ca2f1c49b150953667eed6e5d63a62e80f46a2"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1897aa25a944cec774ce4a0e1c8e98fb50523e97366c637b7d0cddabc42e6643"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9bf565a69e0082ea348c5657401acec3cbbb31564d89afebaee884614fba36b4"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b6ebc17cfbbf741f5c1a888d1854354536f63d84bee537c9a7c0335791bb9009"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2817877d0b69f78f146ab305c5975d0618df41acf8811249ee64231f5953fee"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57d017863ec8aa4589be30a328dacd13c2dc49de1c170bc8d8c8a98ece0f2925"}, - {file = "orjson-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:22c2f7e377ac757bd3476ecb7480c8ed79d98ef89648f0176deb1da5cd014eb7"}, - {file = "orjson-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e62ba42bfe64c60c1bc84799944f80704e996592c6b9e14789c8e2a303279912"}, - {file = "orjson-3.10.0-cp39-none-win32.whl", hash = "sha256:60c0b1bdbccd959ebd1575bd0147bd5e10fc76f26216188be4a36b691c937077"}, - {file = "orjson-3.10.0-cp39-none-win_amd64.whl", hash = "sha256:175a41500ebb2fdf320bf78e8b9a75a1279525b62ba400b2b2444e274c2c8bee"}, - {file = "orjson-3.10.0.tar.gz", hash = "sha256:ba4d8cac5f2e2cff36bea6b6481cdb92b38c202bcec603d6f5ff91960595a1ed"}, + {file = "orjson-3.10.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8ec2fc456d53ea4a47768f622bb709be68acd455b0c6be57e91462259741c4f3"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e900863691d327758be14e2a491931605bd0aded3a21beb6ce133889830b659"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab6ecbd6fe57785ebc86ee49e183f37d45f91b46fc601380c67c5c5e9c0014a2"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af7c68b01b876335cccfb4eee0beef2b5b6eae1945d46a09a7c24c9faac7a77"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:915abfb2e528677b488a06eba173e9d7706a20fdfe9cdb15890b74ef9791b85e"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe3fd4a36eff9c63d25503b439531d21828da9def0059c4f472e3845a081aa0b"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d229564e72cfc062e6481a91977a5165c5a0fdce11ddc19ced8471847a67c517"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9e00495b18304173ac843b5c5fbea7b6f7968564d0d49bef06bfaeca4b656f4e"}, + {file = "orjson-3.10.1-cp310-none-win32.whl", hash = "sha256:fd78ec55179545c108174ba19c1795ced548d6cac4d80d014163033c047ca4ea"}, + {file = "orjson-3.10.1-cp310-none-win_amd64.whl", hash = "sha256:50ca42b40d5a442a9e22eece8cf42ba3d7cd4cd0f2f20184b4d7682894f05eec"}, + {file = "orjson-3.10.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b345a3d6953628df2f42502297f6c1e1b475cfbf6268013c94c5ac80e8abc04c"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caa7395ef51af4190d2c70a364e2f42138e0e5fcb4bc08bc9b76997659b27dab"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b01d701decd75ae092e5f36f7b88a1e7a1d3bb7c9b9d7694de850fb155578d5a"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b5028981ba393f443d8fed9049211b979cadc9d0afecf162832f5a5b152c6297"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31ff6a222ea362b87bf21ff619598a4dc1106aaafaea32b1c4876d692891ec27"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e852a83d7803d3406135fb7a57cf0c1e4a3e73bac80ec621bd32f01c653849c5"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2567bc928ed3c3fcd90998009e8835de7c7dc59aabcf764b8374d36044864f3b"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4ce98cac60b7bb56457bdd2ed7f0d5d7f242d291fdc0ca566c83fa721b52e92d"}, + {file = "orjson-3.10.1-cp311-none-win32.whl", hash = "sha256:813905e111318acb356bb8029014c77b4c647f8b03f314e7b475bd9ce6d1a8ce"}, + {file = "orjson-3.10.1-cp311-none-win_amd64.whl", hash = "sha256:03a3ca0b3ed52bed1a869163a4284e8a7b0be6a0359d521e467cdef7e8e8a3ee"}, + {file = "orjson-3.10.1-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f02c06cee680b1b3a8727ec26c36f4b3c0c9e2b26339d64471034d16f74f4ef5"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1aa2f127ac546e123283e437cc90b5ecce754a22306c7700b11035dad4ccf85"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2cf29b4b74f585225196944dffdebd549ad2af6da9e80db7115984103fb18a96"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1b130c20b116f413caf6059c651ad32215c28500dce9cd029a334a2d84aa66f"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d31f9a709e6114492136e87c7c6da5e21dfedebefa03af85f3ad72656c493ae9"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d1d169461726f271ab31633cf0e7e7353417e16fb69256a4f8ecb3246a78d6e"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57c294d73825c6b7f30d11c9e5900cfec9a814893af7f14efbe06b8d0f25fba9"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d7f11dbacfa9265ec76b4019efffabaabba7a7ebf14078f6b4df9b51c3c9a8ea"}, + {file = "orjson-3.10.1-cp312-none-win32.whl", hash = "sha256:d89e5ed68593226c31c76ab4de3e0d35c760bfd3fbf0a74c4b2be1383a1bf123"}, + {file = "orjson-3.10.1-cp312-none-win_amd64.whl", hash = "sha256:aa76c4fe147fd162107ce1692c39f7189180cfd3a27cfbc2ab5643422812da8e"}, + {file = "orjson-3.10.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a2c6a85c92d0e494c1ae117befc93cf8e7bca2075f7fe52e32698da650b2c6d1"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9813f43da955197d36a7365eb99bed42b83680801729ab2487fef305b9ced866"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec917b768e2b34b7084cb6c68941f6de5812cc26c6f1a9fecb728e36a3deb9e8"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5252146b3172d75c8a6d27ebca59c9ee066ffc5a277050ccec24821e68742fdf"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:536429bb02791a199d976118b95014ad66f74c58b7644d21061c54ad284e00f4"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dfed3c3e9b9199fb9c3355b9c7e4649b65f639e50ddf50efdf86b45c6de04b5"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2b230ec35f188f003f5b543644ae486b2998f6afa74ee3a98fc8ed2e45960afc"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:01234249ba19c6ab1eb0b8be89f13ea21218b2d72d496ef085cfd37e1bae9dd8"}, + {file = "orjson-3.10.1-cp38-none-win32.whl", hash = "sha256:8a884fbf81a3cc22d264ba780920d4885442144e6acaa1411921260416ac9a54"}, + {file = "orjson-3.10.1-cp38-none-win_amd64.whl", hash = "sha256:dab5f802d52b182163f307d2b1f727d30b1762e1923c64c9c56dd853f9671a49"}, + {file = "orjson-3.10.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a51fd55d4486bc5293b7a400f9acd55a2dc3b5fc8420d5ffe9b1d6bb1a056a5e"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53521542a6db1411b3bfa1b24ddce18605a3abdc95a28a67b33f9145f26aa8f2"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27d610df96ac18ace4931411d489637d20ab3b8f63562b0531bba16011998db0"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79244b1456e5846d44e9846534bd9e3206712936d026ea8e6a55a7374d2c0694"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d751efaa8a49ae15cbebdda747a62a9ae521126e396fda8143858419f3b03610"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27ff69c620a4fff33267df70cfd21e0097c2a14216e72943bd5414943e376d77"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ebc58693464146506fde0c4eb1216ff6d4e40213e61f7d40e2f0dde9b2f21650"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5be608c3972ed902e0143a5b8776d81ac1059436915d42defe5c6ae97b3137a4"}, + {file = "orjson-3.10.1-cp39-none-win32.whl", hash = "sha256:4ae10753e7511d359405aadcbf96556c86e9dbf3a948d26c2c9f9a150c52b091"}, + {file = "orjson-3.10.1-cp39-none-win_amd64.whl", hash = "sha256:fb5bc4caa2c192077fdb02dce4e5ef8639e7f20bec4e3a834346693907362932"}, + {file = "orjson-3.10.1.tar.gz", hash = "sha256:a883b28d73370df23ed995c466b4f6c708c1f7a9bdc400fe89165c96c7603204"}, ] [[package]] @@ -578,13 +578,13 @@ files = [ [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -593,18 +593,18 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.6.4" +version = "2.7.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, - {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.16.3" +pydantic-core = "2.18.2" typing-extensions = ">=4.6.1" [package.extras] @@ -612,90 +612,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.16.3" -description = "" +version = "2.18.2" +description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, - {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, - {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, - {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, - {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, - {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, - {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, - {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, - {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, - {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, - {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, - {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, - {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, - {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, ] [package.dependencies] diff --git a/libs/partners/mistralai/pyproject.toml b/libs/partners/mistralai/pyproject.toml index 07e0c5e0de8..af506e75cc0 100644 --- a/libs/partners/mistralai/pyproject.toml +++ b/libs/partners/mistralai/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-mistralai" -version = "0.1.2" +version = "0.1.3" description = "An integration package connecting Mistral and LangChain" authors = [] readme = "README.md" diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 915557baa56..e68377c2a85 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -29,7 +29,6 @@ from typing import ( import openai import tiktoken -from langchain_core._api import beta from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -885,7 +884,6 @@ class ChatOpenAI(BaseChatModel): ) -> Runnable[LanguageModelInput, _DictOrPydantic]: ... - @beta() def with_structured_output( self, schema: Optional[_DictOrPydanticClass] = None, diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py index 734283f7298..15a92d7133f 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py @@ -117,13 +117,16 @@ class ChatModelIntegrationTests(ABC): assert isinstance(result.content, str) assert len(result.content) > 0 - def test_tool_message_histories( + def test_tool_message_histories_string_content( self, chat_model_class: Type[BaseChatModel], chat_model_params: dict, chat_model_has_tool_calling: bool, ) -> None: - """Test that message histories are compatible across providers.""" + """ + Test that message histories are compatible with string tool contents + (e.g. OpenAI). + """ if not chat_model_has_tool_calling: pytest.skip("Test requires tool calling.") model = chat_model_class(**chat_model_params) @@ -131,55 +134,71 @@ class ChatModelIntegrationTests(ABC): function_name = "my_adder_tool" function_args = {"a": "1", "b": "2"} - human_message = HumanMessage(content="What is 1 + 2") - tool_message = ToolMessage( - name=function_name, - content=json.dumps({"result": 3}), - tool_call_id="abc123", - ) - - # String content (e.g., OpenAI) - string_content_msg = AIMessage( - content="", - tool_calls=[ - { - "name": function_name, - "args": function_args, - "id": "abc123", - }, - ], - ) - messages = [ - human_message, - string_content_msg, - tool_message, + messages_string_content = [ + HumanMessage(content="What is 1 + 2"), + # string content (e.g. OpenAI) + AIMessage( + content="", + tool_calls=[ + { + "name": function_name, + "args": function_args, + "id": "abc123", + }, + ], + ), + ToolMessage( + name=function_name, + content=json.dumps({"result": 3}), + tool_call_id="abc123", + ), ] - result = model_with_tools.invoke(messages) - assert isinstance(result, AIMessage) + result_string_content = model_with_tools.invoke(messages_string_content) + assert isinstance(result_string_content, AIMessage) - # List content (e.g., Anthropic) - list_content_msg = AIMessage( - content=[ - {"type": "text", "text": "some text"}, - { - "type": "tool_use", - "id": "abc123", - "name": function_name, - "input": function_args, - }, - ], - tool_calls=[ - { - "name": function_name, - "args": function_args, - "id": "abc123", - }, - ], - ) - messages = [ - human_message, - list_content_msg, - tool_message, + def test_tool_message_histories_list_content( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + chat_model_has_tool_calling: bool, + ) -> None: + """ + Test that message histories are compatible with list tool contents + (e.g. Anthropic). + """ + if not chat_model_has_tool_calling: + pytest.skip("Test requires tool calling.") + model = chat_model_class(**chat_model_params) + model_with_tools = model.bind_tools([my_adder_tool]) + function_name = "my_adder_tool" + function_args = {"a": 1, "b": 2} + + messages_list_content = [ + HumanMessage(content="What is 1 + 2"), + # List content (e.g., Anthropic) + AIMessage( + content=[ + {"type": "text", "text": "some text"}, + { + "type": "tool_use", + "id": "abc123", + "name": function_name, + "input": function_args, + }, + ], + tool_calls=[ + { + "name": function_name, + "args": function_args, + "id": "abc123", + }, + ], + ), + ToolMessage( + name=function_name, + content=json.dumps({"result": 3}), + tool_call_id="abc123", + ), ] - result = model_with_tools.invoke(messages) - assert isinstance(result, AIMessage) + result_list_content = model_with_tools.invoke(messages_list_content) + assert isinstance(result_list_content, AIMessage)