diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml
index 92dd272b769..d02248d8006 100644
--- a/.github/workflows/_release.yml
+++ b/.github/workflows/_release.yml
@@ -100,15 +100,32 @@ jobs:
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
VERSION: ${{ needs.build.outputs.version }}
run: |
- PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"; [[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
+ # Handle regular versions and pre-release versions differently
+ if [[ "$VERSION" == *"-"* ]]; then
+ # This is a pre-release version (contains a hyphen)
+ # Extract the base version without the pre-release suffix
+ BASE_VERSION=${VERSION%%-*}
+ # Look for the latest release of the same base version
+ REGEX="^$PKG_NAME==$BASE_VERSION\$"
+ PREV_TAG=$(git tag --sort=-creatordate | (grep -P "$REGEX" || true) | head -1)
+
+ # If no exact base version match, look for the latest release of any kind
+ if [ -z "$PREV_TAG" ]; then
+ REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
+ PREV_TAG=$(git tag --sort=-creatordate | (grep -P "$REGEX" || true) | head -1)
+ fi
+ else
+ # Regular version handling
+ PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"; [[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
- # backup case if releasing e.g. 0.3.0, looks up last release
- # note if last release (chronologically) was e.g. 0.1.47 it will get
- # that instead of the last 0.2 release
- if [ -z "$PREV_TAG" ]; then
- REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
- echo $REGEX
- PREV_TAG=$(git tag --sort=-creatordate | (grep -P $REGEX || true) | head -1)
+ # backup case if releasing e.g. 0.3.0, looks up last release
+ # note if last release (chronologically) was e.g. 0.1.47 it will get
+ # that instead of the last 0.2 release
+ if [ -z "$PREV_TAG" ]; then
+ REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
+ echo $REGEX
+ PREV_TAG=$(git tag --sort=-creatordate | (grep -P $REGEX || true) | head -1)
+ fi
fi
# if PREV_TAG is empty, let it be empty
@@ -312,12 +329,87 @@ jobs:
run: make integration_tests
working-directory: ${{ inputs.working-directory }}
+ # Test select published packages against new core
+ test-prior-published-packages-against-new-core:
+ needs:
+ - build
+ - release-notes
+ - test-pypi-publish
+ - pre-release-checks
+ if: ${{ startsWith(inputs.working-directory, 'libs/core') }}
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ partner: [openai, anthropic]
+ fail-fast: false # Continue testing other partners if one fails
+ env:
+ ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
+ AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
+ AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
+ AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
+ AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME }}
+ AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
+ AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python + uv
+ uses: "./.github/actions/uv_setup"
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+
+ - uses: actions/download-artifact@v4
+ with:
+ name: dist
+ path: ${{ inputs.working-directory }}/dist/
+
+ - name: Test against ${{ matrix.partner }}
+ run: |
+ # Identify latest tag
+ LATEST_PACKAGE_TAG="$(
+ git ls-remote --tags origin "langchain-${{ matrix.partner }}*" \
+ | awk '{print $2}' \
+ | sed 's|refs/tags/||' \
+ | sort -Vr \
+ | head -n 1
+ )"
+ echo "Latest package tag: $LATEST_PACKAGE_TAG"
+
+ # Shallow-fetch just that single tag
+ git fetch --depth=1 origin tag "$LATEST_PACKAGE_TAG"
+
+ # Checkout the latest package files
+ rm -rf $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}/*
+ cd $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}
+ git checkout "$LATEST_PACKAGE_TAG" -- .
+
+ # Print as a sanity check
+ echo "Version number from pyproject.toml: "
+ cat pyproject.toml | grep "version = "
+
+ # Run tests
+ uv sync --group test --group test_integration
+ uv pip install ../../core/dist/*.whl
+ make integration_tests
+
publish:
needs:
- build
- release-notes
- test-pypi-publish
- pre-release-checks
+ - test-prior-published-packages-against-new-core
+ if: >
+ always() &&
+ needs.build.result == 'success' &&
+ needs.release-notes.result == 'success' &&
+ needs.test-pypi-publish.result == 'success' &&
+ needs.pre-release-checks.result == 'success' && (
+ (startsWith(inputs.working-directory, 'libs/core') && needs.test-prior-published-packages-against-new-core.result == 'success')
+ || (!startsWith(inputs.working-directory, 'libs/core'))
+ )
runs-on: ubuntu-latest
permissions:
# This permission is used for trusted publishing:
diff --git a/Makefile b/Makefile
index 58084a735c9..2ae8a1f3db9 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,6 @@
.EXPORT_ALL_VARIABLES:
UV_FROZEN = true
-UV_NO_SYNC = true
## help: Show this help info.
help: Makefile
diff --git a/README.md b/README.md
index 26c58cae21f..801cd6532c2 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,12 @@
-# 🦜️🔗 LangChain
+
+
+
+
+
-⚡ Build context-aware reasoning applications ⚡
+
+
+
[](https://github.com/langchain-ai/langchain/releases)
[](https://github.com/langchain-ai/langchain/actions/workflows/check_diffs.yml)
@@ -12,131 +18,65 @@
[](https://codespaces.new/langchain-ai/langchain)
[](https://twitter.com/langchainai)
-Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
+> [!NOTE]
+> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
-To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
-[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
-Fill out [this form](https://www.langchain.com/contact-sales) to speak with our sales team.
-
-## Quick Install
-
-With pip:
+LangChain is a framework for building LLM-powered applications. It helps you chain
+together interoperable components and third-party integrations to simplify AI
+application development — all while future-proofing decisions as the underlying
+technology evolves.
```bash
-pip install langchain
+pip install -U langchain
```
-With conda:
+To learn more about LangChain, check out
+[the docs](https://python.langchain.com/docs/introduction/). If you’re looking for more
+advanced customization or agent orchestration, check out
+[LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building
+controllable agent workflows.
-```bash
-conda install langchain -c conda-forge
-```
+## Why use LangChain?
-## 🤔 What is LangChain?
+LangChain helps developers build applications powered by LLMs through a standard
+interface for models, embeddings, vector stores, and more.
-**LangChain** is a framework for developing applications powered by large language models (LLMs).
+Use LangChain for:
+- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and
+external / internal systems, drawing from LangChain’s vast library of integrations with
+model providers, tools, vector stores, retrievers, and more.
+- **Model interoperability**. Swap models in and out as your engineering team
+experiments to find the best choice for your application’s needs. As the industry
+frontier evolves, adapt quickly — LangChain’s abstractions keep you moving without
+losing momentum.
-For these applications, LangChain simplifies the entire application lifecycle:
+## LangChain’s ecosystem
+While the LangChain framework can be used standalone, it also integrates seamlessly
+with any LangChain product, giving developers a full suite of tools when building LLM
+applications.
+To improve your LLM application development, pair LangChain with:
-- **Open-source libraries**: Build your applications using LangChain's open-source
-[components](https://python.langchain.com/docs/concepts/) and
-[third-party integrations](https://python.langchain.com/docs/integrations/providers/).
- Use [LangGraph](https://langchain-ai.github.io/langgraph/) to build stateful agents with first-class streaming and human-in-the-loop support.
-- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
-- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Platform](https://langchain-ai.github.io/langgraph/cloud/).
+- [LangSmith](http://www.langchain.com/langsmith) - Helpful for agent evals and
+observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain
+visibility in production, and improve performance over time.
+- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can
+reliably handle complex tasks with LangGraph, our low-level agent orchestration
+framework. LangGraph offers customizable architecture, long-term memory, and
+human-in-the-loop workflows — and is trusted in production by companies like LinkedIn,
+Uber, Klarna, and GitLab.
+- [LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform) - Deploy
+and scale agents effortlessly with a purpose-built deployment platform for long
+running, stateful workflows. Discover, reuse, configure, and share agents across
+teams — and iterate quickly with visual prototyping in
+[LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
-### Open-source libraries
-
-- **`langchain-core`**: Base abstractions.
-- **Integration packages** (e.g. **`langchain-openai`**, **`langchain-anthropic`**, etc.): Important integrations have been split into lightweight packages that are co-maintained by the LangChain team and the integration developers.
-- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
-- **`langchain-community`**: Third-party integrations that are community maintained.
-- **[LangGraph](https://langchain-ai.github.io/langgraph)**: LangGraph powers production-grade agents, trusted by Linkedin, Uber, Klarna, GitLab, and many more. Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it. To learn more about LangGraph, check out our first LangChain Academy course, *Introduction to LangGraph*, available [here](https://academy.langchain.com/courses/intro-to-langgraph).
-
-### Productionization:
-
-- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
-
-### Deployment:
-
-- **[LangGraph Platform](https://langchain-ai.github.io/langgraph/cloud/)**: Turn your LangGraph applications into production-ready APIs and Assistants.
-
-
-
-
-## 🧱 What can you build with LangChain?
-
-**❓ Question answering with RAG**
-
-- [Documentation](https://python.langchain.com/docs/tutorials/rag/)
-- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
-
-**🧱 Extracting structured output**
-
-- [Documentation](https://python.langchain.com/docs/tutorials/extraction/)
-- End-to-end Example: [LangChain Extract](https://github.com/langchain-ai/langchain-extract/)
-
-**🤖 Chatbots**
-
-- [Documentation](https://python.langchain.com/docs/tutorials/chatbot/)
-- End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain)
-
-And much more! Head to the [Tutorials](https://python.langchain.com/docs/tutorials/) section of the docs for more.
-
-## 🚀 How does LangChain help?
-
-The main value props of the LangChain libraries are:
-
-1. **Components**: composable building blocks, tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not.
-2. **Easy orchestration with LangGraph**: [LangGraph](https://langchain-ai.github.io/langgraph/),
-built on top of `langchain-core`, has built-in support for [messages](https://python.langchain.com/docs/concepts/messages/), [tools](https://python.langchain.com/docs/concepts/tools/),
-and other LangChain abstractions. This makes it easy to combine components into
-production-ready applications with persistence, streaming, and other key features.
-Check out the LangChain [tutorials page](https://python.langchain.com/docs/tutorials/#orchestration) for examples.
-
-## Components
-
-Components fall into the following **modules**:
-
-**📃 Model I/O**
-
-This includes [prompt management](https://python.langchain.com/docs/concepts/prompt_templates/)
-and a generic interface for [chat models](https://python.langchain.com/docs/concepts/chat_models/), including a consistent interface for [tool-calling](https://python.langchain.com/docs/concepts/tool_calling/) and [structured output](https://python.langchain.com/docs/concepts/structured_outputs/) across model providers.
-
-**📚 Retrieval**
-
-Retrieval Augmented Generation involves [loading data](https://python.langchain.com/docs/concepts/document_loaders/) from a variety of sources, [preparing it](https://python.langchain.com/docs/concepts/text_splitters/), then [searching over (a.k.a. retrieving from)](https://python.langchain.com/docs/concepts/retrievers/) it for use in the generation step.
-
-**🤖 Agents**
-
-Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. [LangGraph](https://langchain-ai.github.io/langgraph/) makes it easy to use
-LangChain components to build both [custom](https://langchain-ai.github.io/langgraph/tutorials/)
-and [built-in](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/)
-LLM agents.
-
-## 📖 Documentation
-
-Please see [here](https://python.langchain.com) for full documentation, which includes:
-
-- [Introduction](https://python.langchain.com/docs/introduction/): Overview of the framework and the structure of the docs.
-- [Tutorials](https://python.langchain.com/docs/tutorials/): If you're looking to build something specific or are more of a hands-on learner, check out our tutorials. This is the best place to get started.
-- [How-to guides](https://python.langchain.com/docs/how_to/): Answers to “How do I….?” type questions. These guides are goal-oriented and concrete; they're meant to help you complete a specific task.
-- [Conceptual guide](https://python.langchain.com/docs/concepts/): Conceptual explanations of the key parts of the framework.
-- [API Reference](https://python.langchain.com/api_reference/): Thorough documentation of every class and method.
-
-## 🌐 Ecosystem
-
-- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
-- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
-- [🦜🕸️ LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform): Deploy LLM applications built with LangGraph into production.
-
-## 💁 Contributing
-
-As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
-
-For detailed information on how to contribute, see [here](https://python.langchain.com/docs/contributing/).
-
-## 🌟 Contributors
-
-[](https://github.com/langchain-ai/langchain/graphs/contributors)
+## Additional resources
+- [Tutorials](https://python.langchain.com/docs/tutorials/): Simple walkthroughs with
+guided examples on getting started with LangChain.
+- [How-to Guides](https://python.langchain.com/docs/how_to/): Quick, actionable code
+snippets for topics such as tool calling, RAG use cases, and more.
+- [Conceptual Guides](https://python.langchain.com/docs/concepts/): Explanations of key
+concepts behind the LangChain framework.
+- [API Reference](https://python.langchain.com/api_reference/): Detailed reference on
+navigating base packages and integrations for LangChain.
diff --git a/docs/cassettes/configure_093d1c7d-1a64-4e6a-849f-075526b9b3ca.msgpack.zlib b/docs/cassettes/configure_093d1c7d-1a64-4e6a-849f-075526b9b3ca.msgpack.zlib
new file mode 100644
index 00000000000..88cb5927c17
--- /dev/null
+++ b/docs/cassettes/configure_093d1c7d-1a64-4e6a-849f-075526b9b3ca.msgpack.zlib
@@ -0,0 +1 @@
+eNrtVnl0E3Ueb4GtgAesT1dQjpgVdbWTziSTs+RBD5IW0jNJL4p1MvNLM81cnaNtWrpWLD4flC0p4AG7rpTSYFsK2AJySlFXFFRA0aUooOyKBy5KQURQ9pc0peWh7/n28d66+8wfyWR+3+PzPX+feaEKIEo0z8V20JwMRIKU4R+paV5IBOUKkOT6VhbIPp5qyc5yulYpIn34AZ8sC5IlIYEQaA0vAI6gNSTPJlRgCaSPkBPgs8CAiJkWD08FemNTatQskCSiFEhqy+waNclDV5ystqjzocJ9kkr2AVUlIOCPqKI5VTIvyTw3TR2vFnkGQDFFAqK6dk68muUpwMAXpYKM4DzC0hwNpSRZBASrtngJRgLxahmwAoxEVkSoi2pQ+IbnmX7XckAIG/QqXCRQqHzl0VKj5gg2fFoK5JIoHChAAYkUaaFfRm0H8lC4GiggECLUg8mTwjYEEeZElGkQ+cfwJDFgPeoboqW5UnVtLQwP5pgWAQWhDUrCMKOSvKcMkDKUrJ1TG/IBgoIuGlt8MDvBzquTv44gSQBzAjiSp6D14NrSalqIV1HAyxAyaIMZ50AkzGCbHwABIRi6ArT2awXXE4LA0P3uE8oknuuIVggJA7n2uC1cDwSWk5OD3VkQRFJ6QnYAdgmnwjQGTIOtr0IkmaA5BlYdYQiIp1WInG8beiAQpB8aQaIdGGztV+4cKsNLwdUZBJnlvMokIZK+4GpCZA1419D3osLJNAuCoZTsa91FDwfd6TSYVmPacJVhKcCRwdWRRtp8lTKQxQBC8tBGcCXaOZAfBnClsi+4SqfVrRGBJMCeB4+1QjVZkea1wFqAfXtC0d5vzpo1UMSjMXe0pMK6BHe4fEq8CjWoMghRpUW1ehVmsOh0FhxX2TNcHSlRN64fLcMGl0hwkheWYsZA2UOkT+H8gGpL+dGC7wgXHEYThg9HCwFVAi8BJIoq2FGA5PZPPZKe2tXfXQgvlhIcXR1xG9wRqXxldVUlRSoU5auoZFFzNa6jPUAhvd1RFTgCYTcQEMJKwVW4Tt8ZPRnIfRuMFUUwFEGxrVUInFXA0CwN8xn5jq4eKdiiR1H0xWsFZN4POCkYwtHIZ+dQCRGwsGhh34NmcLPZvP3HhQZM6aCI2ajferWUBIaiwbSs9OK1AlETzajUUTUgjdBU8PA98E8JoaXMHr0eJylCZzR4dVrMRGBmwmPEUEyvA8SW8D4goZVwMQVelBEJkHDPyoHg4XiWqArPmVUHRQ0w0kS4HklGoYBT8aTy4RikRJUgAoYnqHWkFyEJ0geQ/v4LhlILM5My0lPanBBkCs/7adDUGzuupIT0lnhYK5UXKChnUhiHO60cNYvJQoEsukRjEup345WEwNJYSpqU5EzPsfMIZsQxrdFk0uIIpkE1cEoRh8dv0uWSdjTfp+NLAnaz35lRaMygdTPNmWhBmZMRGCXTpfHmutOqzclV2pmObHNWkjbXVpXkypNmZmp0eHlmJenT0kwuhuL+VNLuNThmCTnp2Q5TmQE3G4n8HFdWieyHUQtw2VoTElWwYeG+lKzRsUHg2CDhoTFa0IGhSVRRkcRYNVevyERVGry3sjgmkKhyhjMM4C/c205aBtZMngOHl8LEKBU0Zc0kst0FAb3bnWVMwryluRTvtpFKURrhChQVlZXn8YYkD12ur/R4MoZkxmjEETSaHAOKmyKtOQj9P0S1qQAZugWQrMhFBIvL8RJHe72tTiDCqQq2kQyvUHDbi6A1xYbkJhUGu80YqcMxwmMwGSicBFokGe7RAWtXdkZL+KoIEQxsvAoy2OXTWdUwlTp1ooolrCYDnLHINf5oa//F9Wrs95MXjoyJfIY3OHP4FejYP57MLzg/4d7krulVs7ue722zyssMa/fWF0t1iWTNpvTZMy6cXnJ3bBM4WDX9vdrztZXnD2++cWTsQ9NtdYfe3WBb+JUl4zS//Ejo3N6LzT98c+GdD/s+fODW+FtnT3kP7bvp3Lf1+yYW8k+XdU9/bdkpwrvYcratj8Ial+CL7pi4f82w+SF37/wbbzcUn0H/9PGMcVM+mmpd7391fNGePY11Y5MrPjx2/g83zn20IbWLbEny0/MX35xeX5ez215XtHftqoM9zIhnmm5Zzc/ZlOyICS3dM/52T/wHj2x7PYAvHB1cN21u+iPH+iqPbT7w/pEzJ777W+ekT1NWOD4/d3DdVv8aZspdjuXHU6YmPmgbmdYrfzB6zg8N7x7yvfVpfYzefeQ5eafyd3b6/bYdx+NeuLn4wS8ulY+5r7tW+OyS9PETPS9sdE3osLxy/rHzye7eNfYa75SlxeLsk3HPf2lbbrlrQruiPGfdMnW/Td/7z8zvkz8Dd6QnjD5x8c6eC0eGf7p0lach4cTh32zcPt0eqNz8suRZeqigYd/XhQc2HS8bNmLJ65e5T5oqplmfOnR5T2MMuWbi77YYCoQKy1dPe/fzx146GNe47fGe5sPi41tO+C6xtxUczDubGPvNIQtzpmnv6EWG7bNvul+ZhCxpr+/sbJ4y4uLxMbDOly8Pj1m499ZX7h0RE3M9meGwnOvDDOOHaHIKw1w5JuAtBLchfN9PAktIgvlJJkhDHqYOC5SkzMg1lTvppMIKNG+WNz3FLpmLiCLF+HPoIiGWKixEAr2oa4qv0LpitUVVrO7HX6yuVYc53VDY6vRwuJLCcQHNYHhhzEOhl/wMjL/y5F958i+VJ+vN15kn6/+feDJu/l/hyej158l6j8ELtJQR1eEoYaSMmM6DkqhJqzNjqAkjTP81nqxPqp4VcNjQnGxthZ9zC7NYxlxt53J9AYdU5GYdgUwnmYWasllHziAb1F/hyRk+Zx5fVe0nBV7Qp4KkDGehKblcq/X4gSYnA6W5yjRCyVNm+sopv4Mql/SFvNGuoXL4alsZW00bZPsMtsgrAQOa7MqbZeRmujWKN4DZZ2T7s3MdnLmoxGPSZqQmp5vSfy5P1l8nnmxOE205zvI02sPYyqkiW7U5tyDgd3kU3FjIYeXmUszmrKCNKD4THZIZHDX+MnmyVw8oyuQ1UNeJJ68Z5MkRDtWQ28OtQm/b3nf7k2+vHPZGXBO52p1ndvdavyybX3wxllv0V/+0TYvaLl18Y/HjLnb1Q6Z928d7nz02a/e49xfw45/aoTKblhx999zbk2uwf710+vTkWxKPLdv8xZucvfHcae3J9jW7ttUsPtu2tclkGt5y4K3us6qRzf6HlgVyDilfs+o418PNHdLSJ5++pffz7j0LtG+24euyPp+0sX7X5lNZu8eRCe8YP+ke/eyFJ0F7PlU3NvWGuTWTPA01+G8/Uo+YmFbwcfv8pb5Rp277Pn/MqVEjzh1/ZtOpuOErgWU9WlTz4lZXwPeayiMm3intbjw5Li7x5NeaP3+rO1rWnnKsamwsewmo5LmexlHkI6GtJ1bEfRCYMEFfP+bhVYR7X8XcwjPJXOaEu0d17VJ2frerI2d68gphA6KZfyoJeQNs9LjuTHjiH81nLsZPDry9ceOXOz0vjc2d9/vsRX3Pdc89Ujm+b2zCV10b+haEdk2tuxyagrf0vFaavX/BgX2VPS/fPfamjHb2hqMB62T7F8uZOXPyPdJf1gZExw/D+yntPW9mrnwAPv8bzps/8g==
\ No newline at end of file
diff --git a/docs/cassettes/configure_277e3232-9b77-4828-8082-b62f4d97127f.msgpack.zlib b/docs/cassettes/configure_277e3232-9b77-4828-8082-b62f4d97127f.msgpack.zlib
new file mode 100644
index 00000000000..bc85c69f183
--- /dev/null
+++ b/docs/cassettes/configure_277e3232-9b77-4828-8082-b62f4d97127f.msgpack.zlib
@@ -0,0 +1 @@
+eNqdVXtwVNUZ3220OjKDBRGwHe12BxBr7u597TOzbZNNWJIQNskueVSZ9Oy5Z3dvcu89l/vY7C4CFTs6KOJcH6NC21ESdp1tComkKAgqdup0QFQmOhpbqrYVaXV0aJyxiiU9u9mUZOCv3j9277nf6/d9v+/7zvZiBmm6iBX7iKgYSAPQIAfd2l7U0CYT6cYvCjIy0lgYbo/G4kOmJk7+MG0Yqh50u4EqurCKFCC6IJbdGcYN08Bwk3dVQhU3wwks5CZTm50y0nWQQrozeMdmJ8QkkmI4g861SJKws9apYQmRo6kjzbllY61TxgKSyIeUalA8pmRREYmWbmgIyM5gEkg6qnUaSFYJYMPUiC3torcU0wgIJJuHhtNYN6z98/EdABAi4g8pEAuikrJ+m8qLaq1DQEkJGKhEUCmokr1VGkBIpYAkZlBhxsoaBaoqiRCU5e5+HSsj1SwoI6eiy8Wlci4USVkxrPEoAVHf7G7PkUIqDsblZVzMaJbSDSAqEqkMJQGCp6BW5C/MFagADhAnVJUkqzBjvH+uDtatfW0ARmPzXAINpq19QJO9/MG53zVTMUQZWcVw++XhqsJL4TgXw7r8Y/Mc6zkFWvsqJDw3zxgZWo6CmPiwnqb3z9ZHQkrKSFtDDM09oyFdJW2B7ikQM8PUtw8TLtBrfyxW+2NvtHWWxL/Ylg03El6sY/G0WeugvY42oDlYmvU4GG+Q44I854i0xUfC1TDxK9IwFteAoicJFU2ztBdh2lQGkFAKX5HwY2XCSTZl+KQtKZRVsY6oKiprpIfqnBkMqrnx4Ex3UVhLAUXMV8JaxyrMD+azgwI0BSGdGZTpQJ7nxAQyYXK8aqJquByGAKJk3Rpiae/+qmS29iWSK00xNEUzR7IU6XMkibJI6ln5rU6nbg17aJp+/nIFAw8gMsdFnq48L87V0JBMSCvHvuSGDwQCR6+sNOuKIyoB/3w0hFE0Fw3DyvrzlytUXeyl9ZHsrDYlCtbkCnLoYz0AIS/rpZlkgKYTwMvxnM+T8EGO9UEv7z1MJl+ExEuZTBVrBqUjSFaRkbMma2WQLc9ZiGM8nJdkWucQFSiZAoqZiUZczkGvc6gakjAQDsAkBQFMI2qm/6xiY+/6+rbmcClGQIYxHhDRw+/Zl/f1wWRfQg7lUNwTiAs+RsFrNrV0xF2NmmfA71Kjme56LHd3x2TI5tRIjO1rohgfz7A+v5/lKMZFu8iUUk39PIwMCp2eVj7CmevyeqSzKSA2RrubeiMZF611NRiDjJoRcS4Qz/Zgo9/or491d3DqYKx9TS6Xp1vCrfGe5oS/q1Py+dZJ0J/IJnztZnN8fdf6dfl6r9ri6W/pEPPJNpIiMNIhd52DNKxIih6qjg1FxoYqD40vSM8OTZ1DqBQm5Jq/Iusca8lqjypSrs4RK1cYkX8go5hooNB6rKDJR0lhzIwohMxNG3LxCNfLh1k6BnSlK94Ge/yg2aer9AbIrkl1cJGGgWRLfBDPqQzrZSi6WhwvzfsrrXkJ+v+J6lAPNXcLUFF15g4rKlhXxGSyEEMamSqrBCVsCmTba6gQXkN11vda4wEGcjwDOC7hY3mB4akGskdnvf1vZwyXr4oikEjjZaB1MM2FnEGe55x1DhmE/F4yY5Wb7u5CuVGV1B/sQ99/4Fpb5anZ2Xl81wS95OjHt9/yI9/q5pOrvtg+fk18/OSOBYsbLPjSiuTNPz6ZWXqiNL3rl+Ntpat3tC7k8Crujc9X2n/W8myIPj3l+PZvJi6Uvv7Xafma9z960K9O//uFc3vOfvj0ha+/2vh7b0f9qeuXfLg3f3t69Lv3OLmJV35w/fKNU9rjy9CGG/+0Y/navbueff1eI/KPp3a2PDDy7r7eJ8++3XvmkcXnJm95caXN5p7wneUXbxsLCXsWsE99eTR86KWFdjoSdO7cfOCuD1aNnmq4jrWfWvKfn/904nv+N6Pf+fPLtz5xtTq86FvNW78ZXRqUIm/ALNe/5dXoOyt2Bz955nzq4v3PnVj9/jsLD996w/Q/a3q9Y8WrPrjwVvbobcdHho7fG3zzht3pnlOK1Dv1ykcfo9Gf3P3JjVsXnXni7cU13e8dWPrqVctCJ5oOS48/1Hn6/rVdj/Xd99WeL++848nPtn26rOHacTuSb25ZqSxww4V/W3Rx4NMjf72Nnjr368deDq42zt+04tBdb02J9LYv7Pcd2VAIp361+/Oh35mZbqX14tY9Z8b+/ug3dptterrG9hlmzlM1Ntt/AUZXjK8=
\ No newline at end of file
diff --git a/docs/cassettes/configure_42647fb7.msgpack.zlib b/docs/cassettes/configure_42647fb7.msgpack.zlib
index 44b91719df5..fa44d21f4d7 100644
--- a/docs/cassettes/configure_42647fb7.msgpack.zlib
+++ b/docs/cassettes/configure_42647fb7.msgpack.zlib
@@ -1 +1 @@
-eNptU39QFGUYPhQbUhmIGZv6R3ZupBqHvdtjr+OApomOEEQ6hBsGRhM/dj9ul9vbXXe/xQO0ApnUKZlZSy2omYjjzllJpSgkMSMpayAHGkeHqRwrM5pKBRRicqDvTjAZ3b++fX887/s+z/s2hmugovKSGNPJiwgqgEH4R9UbwwrcpkEVNYX8EHESGyxyl3jaNYUfTeEQktVMqxXIvAWIiFMkmWcsjOS31tisfqiqwAvVYKXE1o4eqDf7QaACST4oquZMwkal2VMJ80IQtmyqNyuSAPHLrKlQMWMvI+FORBQxeaAgEH5IAKIaQxCgUtIQUQmBopp3vhQBklgoRAIZAWgsJGmSA7xPI9NwHYqm0iNwCPplPBjSlEgVykLtDHMQsHjsS6bEICepSO+6b5RjgGGgjEgoMhLLi179Q28dL6cSLKwSAIIG7lGEUa50wwehTAKBr4GhO1n6cSDLAs+AiN9arUpi5/xMJKqV4f1uIzI6iRkRkd6TvdCHtagWMy/ilmmHhToeIFUEeFHA3JECwC2F5Kj/5L0OGTA+jEPOq6qH7iQfvTdGUvWOQsC4SxZBAoXh9A6g+B32j++1K5qIeD/Uw66i+8vNO/8vR1tsNkt61yJgtVZk9I4qIKiw6y7Jd1MMrBVNUg6SsvUsgoZIqSUZCVfQ26ijCwQKUPQiTm+32TMOK1CV8bLCXSGchjS1MYjFgkPfhOf36wN3wYLUbwRzsGz6KQ+npRK2dMLNICKyJITNnkmnZdopYl2hp9M1X8TzQJW6PAoQ1Sqs1AsLWxFmOE30QdZwPXAfjPkjInlW78PvCspGF+Ruc2nFG9VAYVFengLp9EKQzX8SIBlB0lgS4QuEZHTYANJHiUo7nWG3MU4nTdMOSDkhpKg0J2ScjN3BVjrp9hoe6IbNYiO8kuQV4DFXLukCDAfJkiglejin/MXswnxXZxlZLFVKSCU9wKsHRUmEoRKoYBV0I1oa77UCQzi9OLtc73aydqqqislgYYbdARhAPo/XZYGeu+MHI0cRvfQGLIGCTV/FbE9+Pc4U/ZZu2Fjh++G5lXNrW7ZM/NLU/XDs7eKxM3knv9/t79gFm1rdzdKzBefyD51fMXPtzLWH6pNuz64fa+xn9r63emqw9eKtyetTJ2Zvjxe03hz/Z+I7/+eXXvlyiX+M9+hGVuKjb69pfrUkvro5fsVfAyUDSUfGPgrPVF86MXx15u+JmlObyfSyrSnCwGu/TQ+ezmnvGcnal9TzFhe6fPnJ3tjTwZTDKf1E74/jzRWtMX03hp7Y8vMzhscovb717LSntzP2wrBpX9ZI/fpCNjl3uvQcvcz37k/9S8uWX+mPXftHnJLw+9Z453Dir6adVXE79geW7whIm74orTMm8xL2tx0cWt3YQO+5cv79FSNnYzYfevoGiHGzw490p3rjbnEttXvaOro+fXy7mnqhqGX51QtF617+rK7hYvIyI/+0+4rnsYI1U/WDe8etbWhoYBX4NuPrJZM335lobPhXqH/zYFlXX/mqp0ZndzuO/HkrATM8N7fURLZuWDkdYzL9Byv+bUg=
\ No newline at end of file
+eNptVGtsFFUUXh6RthaiwUQMaCcbEB+925ndZemuMXRtS+1KH7RLbQHFu3fu7kx3dmaYudP0AYlUIU2BkMFQAsEX3e7i2lKaklSiqDxtgKRAiVqJVQMiVqBi8AHa1NvSAg3Mr5l7zvm+c77v3KmPV2FNFxV5UqsoE6xBROiHbtbHNbzawDp5OxbBRFD4aHFRqb/Z0MS+eQIhqu7JyICqaIMyETRFFZENKZGMKi4jgnUdhrAeDSh8Td/GOmsEVq8iShjLutXDsXZnunU8xepZUWfVFAlbPVZDx5o13YoU2oRM6IEfSxITwQxkKmkxAwOKQZgAhppuXfsaxVB4LNE0JEGDx8ABBCiGDWCnBKyDXUihCI6odB5iaBSftbFr4wKGPB223/JoVFB0YnbcN0A7RAirBGAZKbwoh8y2UK2opjM8DkqQ4ARtT8ajCpmJMMYqgJJYhTurgU6gKEt0LkDECKatmh8VFvlX5eWX5RbGboOa+6CqSiKCI+UZlboit45NC0iNiu8PJ0Y0AVQomZhd3vE2M4prqB0yw9qcbhu7715qCdKOY+po/NN7AypEYYoDxqw2Y7eL996bo+hmSwFERaUTIKGGBLMFahGXc8KUmiGPDGrGs4vvpxsL3qVz2Di7LbNjArBeIyOzJQglHXfc8eBOSYIa6QCsC7Bc1wRoTLQagBTKYH7I7h0XUMJyiAhmM+dw79GwrtINxm/FaBkx9Poo9RKf6o6Prd3uolfGN2FTNIe6ah70C0Y6w7qYAqgxlHgBw7k8DodnAcvkFfhbs8dI/A90qcOvQVkPUqdyx5cmjgRDDmM+kf3AdUmM3Swg8uZn9H0Vy+VFjBLVtzQYUivD4mpfdkmuXIjzDtzVRdFCUBZrR2lH6vrmOtwuxwLeEQA4EOSB0525ELjddg4E7PZM3pnJLXTyruYqEZoJzsYxIUUJSbgdBQGCSMDgtjRmPKei0FuQn91aDkqUgEJ04IchMyorMo6VYo26YSaQpBg8XX8Nx7IXgxJvhbnfzSGHk0OIDbpYJ0J28BJdm3GZ7sgQHbk7o7+BddQKjR4dm9SYtjHJMvpM4U3vlt6s1PXDjddbtu/3dTX4DrRmzWl882RK8qTcM+wFsmHJFfaCmLxs+AVf07asXek363p6mtpnzXFlXixdoaRdORL+/otzXWn/3fht6Je2J6/GH59Ztqf35c5GzTX/cMo7Rw9AbUbF9OfLMhuaG6Z/JzQ2/ylfLbh88tbez+teHxx4sUxYv62846a2dueeG/pftpZDU+Ytx+zvm5fUD56dSba+sfXsnJvdaUNTC6tbHu5RdyyrkN4f/Km7vXvn1uRzvVP7d/262pcX7Qn+7bV9nDqwuf/I7NOPda9LKX8i6UTS9g+eSr34yIWpCCataaqesaZ2yfkvvYsHTmya/+zpfDjNe+iZHHsFTDpzLPlUsfZj70P95uGs050rGdIp/sxseO9S+aLBYPm50uWTvx4Qma6VBw+d/yal88S3T9srZs2eW9XWdPSfafytpQ0AHpl3POXkHzsuNa77anebr2hXR9+15/Yfr62MWJu9nwy9Wrzoes7xoZ7o5R/eHbpW9e90i2V4eIrF9ARTHZMtlv8BHmuAkg==
\ No newline at end of file
diff --git a/docs/cassettes/configure_48b45337.msgpack.zlib b/docs/cassettes/configure_48b45337.msgpack.zlib
index d76b8245df2..51f5976b330 100644
--- a/docs/cassettes/configure_48b45337.msgpack.zlib
+++ b/docs/cassettes/configure_48b45337.msgpack.zlib
@@ -1 +1 @@
-eNqdVW1sG0UadluuQoJDOqVAdRLqYsEJQnez6931V3Cp4zjNR12ncUJT0J1vvDu2N96v7sw6cXL9cYWrQNypXVqoysePUscuJm2aNrQcbenpuCvtFXEH3J0IEhVV+VFAgBAoIEGBWce5Jmp/3f7weGbeeT+e531mtlWL0EKKoS+ZUHQMLSBhMkHOtqoFt9gQ4UcrGsR5Qy73JlP9+21LmWnOY2yicEsLMBXGMKEOFEYytJYi1yLlAW4h/00V1t2UM4Zcmtk55tUgQiAHkTdMPTzmlQwSSsdk4u2HqkppkALUkFEgQ8awMZWBwELe1ZTXMlToWtkIWt6tvyYrmiFD1V3KmZjmGZHGtpUxXFudrHJkRNiCQCOTLFARJAsYaiYpjBi6vlgmsLWah0AmZe8o5w2EnUOLC5kEkgSJd6hLhqzoOedgblQxV1MyzKoAwxrJXod1mJxaAUKTBqpShJW5U85hYJqqIgF3v2UIGfpEo1oal0x47XbNrY0m2OjYmU6SJKJdLb0lgrhOcYwQZNjDIzTCQNFVAiGtApJPxazvn1i4YQKpQJzQDTadytzhQwttDOSMJ4CUTC1yCSwp74wDS/MLRxeuW7aOFQ061VjvteEam1fD8QzHMYGpRY5RSZec8ToNxxcdhtgq0ZJBfDj72EPz+KhQz+G8s58ThAMWRCbpH/hIhRzDNtpWJlzAN89WG430QrJnnsQLntvL7YQX51R/3l5NcQEqKWHKx/oEihPCvC/MB6l1if6JWCNM/3VpmOq3gI6yhIr4PO1VKW/rBSjXYtcl/JRLOKnGTZ/0KQ1HTANBupGVMzFI980piO5qPzrXXbRh5YCujNbDOqfqzA+PjgzLki3L+eKwxoZGBV7JQFvKTjeOmJbhhiEJ0Rpy9vt57lBjZx77GqmVpTmWZrlXR2jS6FBVNIXgWf9tyBg5ZZFl2VeuNcBEd0TwVYGtf68ttLCgRkhzY191I4RCoZPXN5p3xROTUEB8dbEVgguz4XwaeuVag4aLF1g0MTJvTSuyM3MXmaSDok/MhPggHwqwPlEUWAGEIBviQhzL+WR/6M9E/IpEvLhkmoaFaQQlcmfhkjOzWgMjrs4iPCfyflJpK6XokmrLMGVn2g23BtRKmRZUDSBPxjroGJDykE7V+8+ptm/eEE10xWopkmTMMAoKfPL9JcvSaSmbzmiRbpvBw1GspyxN8aVD6YQh2pIciOfibahL797UWSxuiveYWxL2MM0FfCEuIIpikOYYluEYjjZ4vbetLbNe7tR7Ep1tfWzRH+gG4kDM6var6eSD3XhTVttUjJo+f1f/6IC4rs0uDPWW1qc5ExaVUDZbTG5WChrGMcvkt8QHbMbfNxQl1QCcj7S0UqQ3FYJvpKEQmiiEdvUhhtl5fbRSch2DCLP4NmylOsl1n9TVUiuVcsGEZAQaTCkYRjYYOpzZTTCwi4ocCchxkOhghmzWig32GcxwMj2QeyjR548XgqZtGsPRQk6IM2aiJ7cABJHjabaBg58VgvUuvJr6/5nVsUF6oeDppDn3rlV1A+lKNltJQYsIyKlJqmHL5GK3YIVw3hfd7EwHZYHNZiWBC/plnvVxdBu5Mue9/e96KLuvQhWopMeKknM0z0e8YUHgva2UBiJBP5FT/fX7fcXtST339yVo1RM3eurfMnVjIrmUW3Hyi8mR2Zu3/yxw4+37f3XXL/WO9Sti3d9MPXx59j+zB52VP5z4+MjvVp76mn1371vrTzd59iFu+b7uczWrMvZd8MrIoFGrHrxjw8Y1H1xUhbGts98cPffb5vDG6Hmxec0B5taBt6jCkbGHSrv/+LZ4x+y6j9nzRz7dfuCedU3wD5c+/9D3qfXZnn9/Nn5lYuDp8q3Lue3Hb/B8eCIkdTx7ac/K2CRa23xz9LZ7zrzR4/nrrscf23nn5b+8c+Tyc53xp9/2T6X/QX90Q8/uNWvvfeAX3S9euW1F1y0B1Hp81/uPfPW3A5nJJ9CqHS+Fz773+n/PTH/7+co3ey/eT8fOr/ryqWPh3Tft8O38051LdzX9/ESP58KzSz5JP//oV/2h37Davzpex4ePN1XP/fPs9N17B59punD6k+8vHTvrjPsvqmv3lZu5B+4dO7f55dPGD7mpyar25Rt3K8+3v7f8zNDUO8fWfnFSf62819rzPUH3xx+XeW46fd+7zlKP5yeHyHrs
\ No newline at end of file
+eNqdVX1sE+cdDnSsdNqg7WhHitrerFYgyDl3vvP5S+4WO1kwIbWTeCFmouH13Wv74rt7L/fh2CaglbVoKF3bK6hp1Q/RJNhtloQAUUrDR2Gj2ibaIjZ1ItCVdm01NqXrumkIqSvda8cZieCvWbLPd+/v83l+z+92FjNQ00WkLBoRFQNqgDfwjW7tLGqw24S68VhBhkYKCUORcFt00NTE6bUpw1B1b20tUEU7UqECRDuP5NoMXcungFGL/6sSLIcZiiMhNy1ts8lQ10ES6jbvT7bZeIQzKYbNa4tCSSJkSACiC6XxJY5Mg4hDoOm2GpuGJIhtTB1qtu1bamwyEqCEHyRVg2TsTtIwtTjCdrqhQSDbvAkg6XB7MQWBgFt6aiiFdMMaW1jkAcDzEPtDhUeCqCSt0WReVGsIASYkYMBhXJoCyxBYw2kIVRJIYgYWZr2scaCqksiD0nltl46UkUorpJFT4Y3Hw6XaSdy3YlgTYVxEXag2ksNoKgRt52g7PZ4ldQOIioThISWA6ymo5fOj8w9UwKdxELLClFWYdR6bb4N0a38z4MNtC0ICjU9Z+4Emc+zh+c81UzFEGVrFYOTGdJXD6+kYO+2wuw8uCKznFN7aX4b89QXO0NByJI9wDOsVamwOHwkqSSNlDdKU+1UN6iqeDfizAnYzTH3nEOYCvv3bYmVIBsJNcyR+UPW9oXrMi3U8mjJrCIojmoFGOCiHk6A5L8N4WQ/R2BwdCVbSRG9Kw8GoBhQ9galomKO9yKdMJQ2F4eBNCT9eIhx3UyofjyEJsyrSIVmpyhrpIFtn1UGG6g/PTheJtCRQxHw5rXW8zHxPPtsj8KYgpDI9MuXJs4wYhyafmKi4qBoqpcEFkbJuDTI0O1Y5mcN+GPdKkTRFUvRUltQwFJIoixjP8m9Foro15KQo6siNBgZWFRZzkaXKnxPzLTQoY9JKua+HYT0ez7GbG82FYrCJx8VNLbTS4fxqaIesH7nRoBJigNJHsnPWpChY0w/gm06WYwDnctJ0nIlzgHYybifPO+Jx2gGcXMLlfAPrXORxlBKZKtIMUoc83kdGzpqukUG2pDM/g/043KmPEBVeMgXYZsbrUakH3UeoGpQQEA7wCZIHfAqSs/NnFetjD9c1h4LDbbjIIEJpET5zYdHKzk4+0RmX/XqCozktHNmoC4w9vSnTwWyoz6wPqmZrY0NQkNrzrhTNBc0mj9xD0i6WdrjcboeHpO2UHauUpELOphArpSI9kiMUYlvr8jFRocS0XbBvyCWNpBM1mNFQw8b2bgwzn3C2bwyCSDYaVpxNdTnUGg8505nG9nadk9nopqA7nd0cyNY1uzqCsXSek1uEaCJmpDwUZW+I4RaBkfLX+gg8sCIG3V+RDYllQ5ZE4/JSc6LxEUIZGL994Yr0Eevxfg8rUs5HtJUQhvgKZNgmGtD/MFLg9F4MjJkRBb87Bto7pM0bOsXubn0zciZBc93miJnPIEHgskoItCQ6+fWmqnW75yHD0k6SqoDDUay7PJrXS/8/q5rsIOdvATKszr7IigrSFTGRKLRBDavKGuYlZAp422uwEPwR2VoXsyY8NM+wNO9h3dDjTiQ8ZADv0blo/9sZQ6VXRRFIePAyvHU4xfhtXpZlbD5CBn43hzVWft09WigNqpJ8a9Gx+/uWVpU/t+Dv118/0XpKuUh9+/hf1rlf2jOQufv0O/CHj27Z9TK3Ynp82a6ac5P3xO6sz8daPrl0q0eZ2bpqvG9Zb2/4c6t3S9XSwbeWPdb14C/t59/b3tKbi5y9OPjum5/9AHoe6n3yhQPTH3187WB2xZnT/3iPWbuD/9OaS0v6pGO+X68dEDYOT1/Z/izqs7256qfvpid2//Ebj4wibt3f+U83vR47Uf38qOtboZkPv1xc9aGZ3dNY/KL//KnP/jn6++9bUmTG3lxly7+wNnBndfyvHTWrI7nfXP7mxS+vLJk4d5IMPO4I3NVy377lKvt+IL30nu/K937H+9Wil9/fc7TxwW2BfXd/tOa2fP/PDx26qlwLnZ5a/Xxg6oMLB3dMHZk8cdu5Teyf+/Pyi69MPrHmd0c/efr8rfS+4pKTTwHvVz/eesfA54v7Z5Zffmbr5ENndov3M6pv50zswmsn7ludH23vEsSadwrr9kbJv+32jAauXLhaPf4cvPaHty9NnH3jF+lrjTsW7/3VyVfP3N6yyrw329X33Ni/O+X+6uW7loD/iCuelaiCPzUC0dmeBy4/vvLoIW7s1MqB6ifF8/8Sv1hVouiWqgF09VwT5uu/A4Sdew==
\ No newline at end of file
diff --git a/docs/cassettes/configure_4f83245c.msgpack.zlib b/docs/cassettes/configure_4f83245c.msgpack.zlib
index 4a6b8305065..260199926f1 100644
--- a/docs/cassettes/configure_4f83245c.msgpack.zlib
+++ b/docs/cassettes/configure_4f83245c.msgpack.zlib
@@ -1 +1 @@
-eNptVWtsFFUULhIU/CMiKlEC40JCgM50Zmf21Vqx7FJobNnSXWiXh5u7d+52pzuvzmPZLaKhEklEsKPGxII8t7ultuVRVARKAgmCkYAGDFlMIEGMKCKJ0SiBgHe3W2kD82N37j3nfuc753z3THs2gTRdUOQxvYJsIA1AAy90qz2roVYT6cb6jISMmMKn6/2B4G5TE3JzYoah6uVlZUAVKEVFMhAoqEhlCaYMxoBRht9VERVg0hGFT+XeXWOTkK6DZqTbyokVa2xQwaFkAy9sqgDjBCA0IPOKRMimFEGarZSwaYqI8nZTx+u1q/COpPBIzG81qwbJUg7SMLWIkveV8S6D/3VDQ0DCiygQdYQ3DCSpOCXsmMeiKc/abAwBHid8uWRiOqbohtU/Oom9AEKE8ZEMFV6Qm62+5jZBLSV4FBWBgXowcxkVSmT1xBFSSSAKCZQZOmXtA6oqChDk7WUtuiL3FjMljZSKHjb35LMjcV1kwzroxySqasrqU7jaMsFQnJui9yVJ3QCCLOLykSLAfDJqwX5kpEEFMI5ByGInrczQ4f6RPopuddUB6A+MggQajFldQJOc3MDIfc2UDUFCVtZb/3C4ovFBOJZiGMq1fxSwnpKh1VVoxJejDiNDS5FQwRjWTjoDFSUuICv3ZzgMo+GIVEkvCqyGSbfaEoQN1aK5mIm0LnC5mpY6fLpz4aKGUFgPaouESFiohiTjsnsYl8PhYEmGoimGYkifSYVbW5OaZ+GS1jgdFms4KVXbxFeFFtY7l4nVcktESbqF+fZQi1iPElEoBhtbwi2IYpUmuQ60ur1L6huqG+mQ7EkmI0Ba4vVG/WJVBYHZmQmBr+SWsUq9BFoT82vbgn7GHpcSTVRYEhvtXkOJL/fVsgbV5mUXBVtCI+jRmCFdZOikOTedf/qHtSEiudmIWbsZ1t2tIV3F9wa9ncElM0y9PY11iM6czhYv0C7/aw8k/FzahzVpDQZjZinBuAg/NAg7becIhitn7eUsSyysC/Z6i2GCj5Tg/iC+enoUy3DBsOSzMGbKccT3eB8p9sG82HEn8/TxLSVRUlV0RBZZWb1NZMPQ5CBrfANDN4tUtGYgC22FsNZgQfWr25KreWjyfCyxWqI9bRwrRJAJoweLR1RNyYfBhEhJt3Y7HPb+omVYdz04V5pkaJJmDidJfM2RKEgCrmfhtzi+dCvtwMU+9LAD7hfCgy7LFbpBHxvpoSEJCzYf+wEM5/F4jj7aaRiKxS4el+vwaC8djWTD2CX90MMORYhdtN6bHPYmBd7KzcSLsJPnaIhQBEVRlHaxHIMflkEOYI9GgRugr/DoEyBGyTdTVTSD1BHEs9pIWblSCSTzM6aSZRysE2daQQgyFE0eBcyIT8nnoFcQqoZEBfB7vdWkF8AYIgMF/VlZX2hxVV2N94smcqSQSL869J3IyoouC9FoJoA03BirB4qKyeNhqaEMxmqoClkH3Zg95umMuu2Ig9BJzsdjaBjtf9ml85M2C0TMPQGtgRhbaSvnONZWQUig0u3EbSp8TdZl8rnKzSfHLJm+cXxJ4RkrdhyXT9ATfbduPxk/N8G2Y7L14qSWcX1t3RcCSxdsHphJbb55aUvPs1evzN0wueH4yQ2pbblcpe+9db1EM/HC4l2fzwlJ21+5+OOGU0fu/PLzxUtKmB/M/hDf8nFiYJ3bXXt7Xfu9lc+A5Z91pL+fVTpVO9HRSUW/NSyUO/pE3576cSuk2Z/4D6wvb+zkO88eOJ6bMvs779nLtq+nVPylXO+ee/edrXU/zetacCu1qWP7PObxs907Sv55ve1qJ3HsVcDt/COe3cioh268/NRJI3thx/RzH360p4/tmv5v6EoNuTbgvnb5esdv3zRd/5WJ/D54pftO46Retq+r/cw0c3zt+cDUlbK7fewM78pN10JvzOjt7jgUfNOYOHEl17Rq6/nYwdzfp1c8nb2Z7t/bsOulCXffnyVu/PSCo/wG/9bVex+cuv58Scn9+2NL7m5bOg08VlLyH7A2NRs=
\ No newline at end of file
+eNptVXlsFGUUb8FYg4RUg2iIqduVxAQ725md2ZNU6LW1Qnd7LJTSYPn2m293pztX59ijFdGCRCIIg3IJVqBlF2stYhsuqTFeoCiJHNESQvxDMFYTbqMkgN9ut9IGJtnNzLz3/d7vvfd7bzpTUaSonCTm9nGihhQANfygGp0pBbXpSNVWJQWkhSW2p9bX4O/WFW54dljTZNVdXAxkziLJSAScBUpCcZQqhmGgFeN7mUcZmJ6AxCaG4x1mAakqCCHV7G7uMEMJRxI1s9ssczBiAiYFiKwkmERdCCDFXGRWJB5hq67ip+VLi8yCxCIevwjJGkFbbISmKwEJ+6magoBgdgcBr6Iis4YEGWeArfg0aXEtT4URYHF6F3Lye8KSqhn9EynvAxAijIlEKLGcGDI+DrVzcpGJRUEeaKgXExVRpiBGbwQhmQA8F0XJ0VPGJ0CWeQ6CtL24VZXEvmxihJaQ0f3m3nQ+BK6CqBmDPkyitLq4NoFrK5ooi52yUJ/ECVUDnMjjYhE8wHyScsb+2XiDDGAEgxDZvhnJ0cP9430k1dhTA6CvYQIkUGDY2AMUwc4MjH+v6KLGCchIldfeHy5rvBeOtlBWi3P/BGA1IUJjT6YNByccRpqSIKCEMYxdZBJKUoRDxrncvJYWGGwJCCWJKqUxHvR4xPkeuSHKxSo4ts7ZtiDm9NQtbK3iWgKKpC9ssQVtchNBORjK6nA6rTaCspAWnDNRHobWhL4gZqulyqpF2MjRkTaucYE34q22+ivraOgi+RoZvORzufyNXrHyRVQpu1wKFKOeejVQB1B9VX29FtNL6/yoIVIdb5VsdY3WJaXQWeubXweiMsW3Ly5nHKxVnWPClPUox5Z4I1RFqz8QtdlfXBimvazPW9nkrWTb2LL6Jb4oYnxtLQ0tiwOLrNVwHGcnZSfILG07yTjJ9NU/phgeiSEtbHRTVnKvglQZzw5amcSF1HS1swerE/1wPJUdot2++feEPaOnAivVGPKH9SITaTfVAMVkJa02E2V307SbsZuqavx95dkw/gcKc78fD6AaxOKsHBuEFAzrYgSxveUPHIGh9Ajg/qbp42ElUFyWVERkWRl9i4n60e1BVFcMjM4bISkhIHLtmbDGUGYWYu3xGAt1lg1HYwLpamdoLoB0GBzMHpEVKR0GEyIE1ei2knR/1jKmxl6cK0lQJEFSR+IEnn3EcwKH65n5z64w1eix4WIfut9BkyIIL7sUk+kG+fl4DwUJWMbp2PdgGJfLdfTBTmNQNHZxORxHJnqpaDwbyiqoh+53yELsJtW++Jg3wbHG8Cz80GIjaZvL6bJbqQAJSARJu9NJIRsZDFIOysY4DuNtyEGMkm6mLCkaoSKI97WWMIaLBBBPb54SmrLRdpzpHBMnQl5nUYMeqJDSOWCBywriJcDug0ECAhhGxKj+jFRFk7e0prr8wGJivJAInzz6rUiJkipywWCyASm4MUYv5CWdxStUQclyD1Ff2mQMuihIM1TA5gQuxslSFFGGl9MY2v+y60nv3xTgMfcoNAbCdInZzTC0eY5JACVOO25T5ovyejKdqxj6Jnf1M289kpO5JuPf3btrN/4ofknmr7qcmHIiNG/Wa28qked7u5o/HFnfe/yX7V+f2U90nO7Mv/yya97j/tk3N3716px3hpdefrZs73Tm2XcbpyaEs//Yaz4oODBw+edke9eFfaeOne0/44hdTAKpWJs+PTiU+3bo7Krt52bP4248puZNLTz07YnEjqLTTNPgTvsk/4ZNM44NfBdYu655+1XGO+M5hGbl+ehrBY+VHf/89uq1S77YHGTd7sbWK12b5g30r8kfWTvlie+PbinY5nGPFDpmlh7c//uJQrmW6Xpj2ZG6Ef9cbUV9cvBk3+01sZtlrX98dD73uievcf23pzYXmMteOLfmvbc2nHE3NP/w0yud01a3vTLYfXtax9YdT81kW3duW9e/9eKkDdUX8x7dtePqlIp9fx08NrPwx5yKlcaK0OFLy59uHrk29++SX58u6jz+8eTCfy8eXb595ZUv7rT+uej8w8mC81t+29D90JMnTTuXrXN3dd16v2rv9bKRx3fonw6t6LhUfmNyutiTc26dzvfcmZST8x/1AE4b
\ No newline at end of file
diff --git a/docs/cassettes/configure_5cf53202.msgpack.zlib b/docs/cassettes/configure_5cf53202.msgpack.zlib
index 2a96d248ff3..2b39247971d 100644
--- a/docs/cassettes/configure_5cf53202.msgpack.zlib
+++ b/docs/cassettes/configure_5cf53202.msgpack.zlib
@@ -1 +1 @@
-eNptVXtsE3UcH8MgUUEJCkQTvBTlIbvurr2+NhfoujoK6zbWMcoIlOvdr71b77V7dO0Ir/GcEOBEIT54CKMlzRiMbTwm4x/kFQyJgpgJ6CAxPkZMQBNBjPhr18kWuD96/X2/39/n+/p8v9eUjAJZYUVhRCsrqEAmKRUeFL0pKYN6DSjqugQPVEakWyorfNUHNJntfYdRVUkpyM8nJdYoSkAgWSMl8vlRPJ9iSDUf/pc4kIFpCYp0vHf7cgMPFIUMA8VQgCxebqBE6EpQ4cFQDTgO4QFCInViBL6CoqYiQUDKiiEPMcgiB9JWmgJkw4olUMKLNODSorCkomajBVU1OSimbQUoxeFbUWVA8vAQIjkFQIEKeAkmBg3TWJjRtiLJAJKGaf+Q80oLIyqq3jY8lSMkRQGIDwRKpFkhrB8ON7JSHkKDEEeqIAXjF0CmUHoqAoCEkhwbBYmBW/pRUpI4liLT+vw6RRRas/mialwCT6tT6exQWB1B1TsrYBBOT35lHNZcQHAjYTdiR2OoopKswMEiohwJ40lIGf0XQxUSSUUgCJrtp54YuNw21EZU9INekqrwDYMkZYrRD5IybyU6hsplTVBZHuhJV+XT7rLKJ+7MRhw32tqHAStxgdIPZhpxYthloMpxlBIhhv45lqBEMcICvfd+IECFAkG+qDwQrGFrg3MCxVUYUeOiTcAPfJgvEvVWe+qKrQsX0VUxu8BFF5U7UdxmcuA2i8XqQHEjZsSNOIo5Of8cY71pzkJ/CRVjmGrVNN8bwGzEArbGBVSV9ou1NQruqdHCc/C4sQEPlS1g3FXOuFaplXKlbndFsadeNhbzrgYz4W60+nhigRguRGB0WpSli2qpWqu7qsEi1FviXo32WZ0RWTLFJDzgsTJOsnw+VQYwvMFmLhkantliQrFshFaMsGPpp22QGxwQwiqjH8AJ4pAMFAlOD1ibgCVTNaWpBfIQfHUxmR2j/RXznlB4QksJ5KTeU81oeQhuQyooFTFhJgLBiQKzqcCCI6Xe6lZX1k31MynYXi2TghKCNHQPUj5JMZoQAXTK9Uyy96TJDjuZDh9OKQpikqgANBuV3upHqwb2B+op6RiYLFSUw6TANmbc6j0Z1jc0xhpoSqNpJtrAY45GwswGgUaFOrNXJFlMu4EBobyiH7BhjrasZpB3KZgrhuIYiuHdMRSOOeBYnoX1zPxml5iit1hgsU8+baDCrQPXXZLIdAM7M9RCBjwkbNr3ExjC4XCcfrbRIJQZmjhslu7hVgoYGg1u4pWTTxtkIfZjSmts0Bplab33LXgI0LagzRHEYeEpm4V2OIAdd9B0KEiE7BhuxSyn4OpjKYiSbqYkyiqqAApubDWu9+bxZCy9Y4rMuMVshZkWIqxAcRoNfFqwREznoBQikgw4kaSPuN5DXSTFANSX4Z+eLIGT5vW4jvvRoURCK6SBr0VSEBWBDYUSPiDDxugpihM1Gi5LGSQgVpVzkd5ppwkcw3AToIHDHgI0WgzX0CDa/7RrSW/aJMnB2KOU3sGYiwwFBGE2FCI8WWS3wjZlvilrEulchfC5EVvf3Dw6J/OM5OY7hRvYK6f7X1u8VG4eeWlD3Ze7kJ5XR8vexftmvPR+7eYtM4zz2vpeWPN46faavOcvrd/48e7bV+5GRiBlM63IhztbG8WJf728bdXeVOnf3YF/7ScDroed/an7hdf767fdKZhxKO48J6becPecuLjhZnPNiLcp+fD1yzfqlnzXbxpf1MXsaP3jtP/WtRcnF7s2H6u9vqNqx/xJ50atnJiTc+Sht7B7wuPmMfTeLX9OOnC3krviyjFs927ajn9y8Zv2k3s87q1f/9S16rdxtVdzI+tGgdVT990823d7mT/s33H23tg9tzoebZg7dpxzyvTXuf62By1zlXurL46lnvv1/J7claf+MfcJm0wgurZr/Mp3Nx699nC297MH5jPmKyU9Y6ZPOdUp0fcNHRdy+yqMkdiuwIPZPT+jVycvMbXWTCvE94an7vLpTc0Hd9/79tNfdt847t55/vzmY+s/mEsVzrx8x/D9hVmzOuznzG2P+qrKyg1b9fYff9/Sjz1Ydv+h/NGFO7Ng4R8/Hpkzratb78nNyfkPIFJRJQ==
\ No newline at end of file
+eNptVXlsFGUULxCPqFFT0UQ82G40IdKZndnZs4e17LaldEuPXbSFkPrtN9/sTHeuzrHd3arYVv4weI0oEpNKeu1iKQWlYgUxHqmagBojlRQV9Q/iAcYgicYL/Ha7lTYwyR4z732/93vv/d6bvmwCabqgyEvGBdlAGoAGvtGtvqyGukykG09kJGTwCjvS3BSODJuaMHsfbxiqXuZwAFUgFRXJQCChIjkStAPywHDg/6qI8jAjUYVNzYo9dgnpOogh3V62qccOFRxJNuxl9ggSRZuEbMDWqcTxT1QxDVsUAU23l9o1RUTYx9SRZn90c6ldUlgk4gcx1SAY0k0YphZVsJ9uaAhI9jIOiDp6NMsjwOKUThXdPMIrumFNLKa5D0CIMAKSocIKcszaG0sLaqmNRZwIDDSGyckoXwRrLI6QSgBRSKDM3ClrP1BVUYAgZ3d06oo8XkiGMFIqutw8lmNP4Mxlw5pswiSq6x3NKVxP2UaTHpqk9ycJ3QCCLOICESLAfDJq3n54oUEFMI5BiEKvrMzc4YmFPopujTYC2BReBAk0yFujQJM8rgMLn2umbAgSsrKB5svDFYyXwjEk7SR9ry0C1lMytEbzRX9z0WFkaCkCKhjDGqQyUFHiArJOLrmmowNyHVGpsjb9oDvQ3bRefyjlrVlvxkFAhrViMl0fqlFrYmRYpM0Etc5sp/hGgva6aKfX52PcBE1SJM6ZqDdDBt1c16lIzqivfmMqInVQzTBc19jA1DJiWCATDfUh3ddYw7W2mu4gUtvWyc1NnBxYX90AIw0BMaShQFuQbE9Wp9zORMuGuDvUBTtCAlPdqdVHNRALAX96Y7Mn2F5uw5TNhMBWJmtJ0q02JJy6DMNdUF3DC2l6bXojG6+JSeFgROl6MBzkw+2tvpYFnCnKQ1AF2h7K5aNy18S8YkQkxwzeGqYp324N6SqeF9SfwYU0TL1vBKsTHfs4WxicoaaGS8K+bSSIlWodifBmqY3y2BqBZnNSTreN9pQxTJnbbatrjIwHCmEiVxTmaxENyDqHxVkzPwhZyJtyHLFjgSuOwJHcCOD+5ujj0SRQUlV0RBRYWeNtROvcxiDqgwfm5o1QtBiQhXQ+rHUkPwvd6WQ3C02W5RPdEuVPuxghikzITRaOqJqSC4MJEZJuDTNO70TBMq/GMZwrRdAUQdGHkoSGSyEKkoDrmf8urC3dGsHlp6YudzDwpsELLuvKd4N6Z6GHhiQs41zsSzAuv9//9pWd5qEY7OL3eg4t9tLRQja0U9KnLncoQAxR+nhy3psQWGv2HnzTwXm8yIO8XhhlIeeNAhcHnSwLvBTnpwDyM2/h3SdAjJJrpqpoBqEjiHe0kbJmSyWQzG2eSoZ2Mx6cablNkKFosihsRoNKLge93KZqSFQAuw9yBASQR8Sc/qxssH19dWN94GAbsVBIRJM6937IyoouCxyXCSMNN8Yag6JisniFaigTqCVaq9utST8NGRfN0ZwPeZiojyXW4OU0j/a/7EZy+zcLRMw9Aa0DPFNpL3O5GHu5TQKVPg9uU/4t0pvJ5SrHppfsXbnt2qL8tQx/Ll58qvWo/BV189tnVh+s+KoXffTGmYbeO7ctvap4eeU9Wx/YHn+SmL53ak/F91scq1bv6Ms84Kg4dvYGbsffrUWrYjM3viBM7vT89edMokcnPt/y1Ndffvbtl+lzp3ue/Ca7r/fu/pfRVb9sGbQemtk66Ekvr1smFXe+d3ajTB49zR3evLdn6N5t9x/9edX5mek0ufnkF4+0kCf6Xy+JPTdzo3x90eMvXfjk9v7pujf6p8/eKljtJ3YnfigpevHjWFDgPhrq3z27dsV1fYPP/jucWnqmdG3//rqBd8u43/64lny3/7bQrl/f3zy1Zri8Vl1608slz5/46YPjzu8H4cDqrZ/+vaTqFSY9Bocqb/rn9qqp76ZeLe7MPHd+f3Oksqe06LHfD/946niYbrljtPivp0vu2LFn+8CKN6tKHNe0rp1cee409/szm1DL5PmK2VHnSRdfd3oq+U173S1DO40L5cd6TmVPkccvwqriDx9ePrkhxA+kNiXvbNipnO+AP9z1R++t8FD31au7dv5W1Vbx46e7JmrP3XL9wZn3tq4IhyaqBi7senZ6Zb4ny4oyzKFhP27Qf7FZXVw=
\ No newline at end of file
diff --git a/docs/cassettes/configure_61a67769-4a15-49e2-a945-1f4e7ef19d8c.msgpack.zlib b/docs/cassettes/configure_61a67769-4a15-49e2-a945-1f4e7ef19d8c.msgpack.zlib
new file mode 100644
index 00000000000..231ce06cb0e
--- /dev/null
+++ b/docs/cassettes/configure_61a67769-4a15-49e2-a945-1f4e7ef19d8c.msgpack.zlib
@@ -0,0 +1 @@
+eNqdVWtwE9cVtovbuCmQtKGY0OmgKAmQ4JV3tbJerpzIsuWY2JaxZPzg4V7tXkkr78t7d23JxpNCMm1TG6dbm2nKQDLBD4HiBxQSHFI8GVIaaGknkISODW1DJo/OZMiESaZJhhL36uFgD/yqfkjavd95fd855+5KdEAFcZKYO8aJKlQAo+IHpO9KKLBdg0h9alSAakRih+t8/sCQpnAzD0dUVUbOoiIgcyZJhiLgTIwkFHVQRUwEqEX4v8zDtJvhoMTGZ3Mf6TYKECEQhsjo3NJtZCQcSlSNTmMjNliHDGoEGjohwD+KgRMNfu8jxkKjIvEQQzQEFWPPtkKjILGQxy/CskpYJELgRA6jkKpAIBidIcAjWGhUoSDjKlRNwbakicRvJInPhFXjcsphSBPTRWLjb/46u40iEFKnYai2ZlPBABYiRuHkDMZYCdWFqZowQAYKtsPEoZQPWcF8KCoH00+8xIB579nYOFtODBt7enB5mF9OgSxO7SYSl5lFSsEoZFSM7NnWk4hAwOIQzwxHJKTqE4uJnwQMAzEnUGQkFnvXx8NdnFxoYGGIBypMYrZFmC5TT7ZBKBOA5zrgaMZKPwxkmecy4YuiSBLHsuoQqURuPU6m9CCwlKKqH/PhJNxVRXVx3CGigTJZKRN1OEYgFXAijxUneIDzGZXT568uPJAB04adENnu00czxhMLMRLSR2oA4/MvcgkUJqKPAEWwWo4ufK9oosoJUE946m4Nlz28GY42UWaT/cgixyguMvpIupGOLzKGqhInGAn70F8gJ+b54aEYViP6EE05DioQybjf4ZOj2EzV0K5hrAU8dyaR7fsDvsfnRfxnTsFwOdZFPxmIaIUG0mqoAYrBTJqLDZTVSdNOC22orAmMebJhAreV4UhAASIKYSkq5mVPMBFNbINs0nNbwU+mBMfVpNLHo0XAmCwhSGSz0seaiPrMxBNV5Ucz3UVIShiIXFc6rH4yrXxnV6yTZTSWjXR0CqSjy0JzQagxoWNZEzwCqTA4IUJAmBwrOZE9mec+iWslCYokSOpEjMCzCnlO4DCf6e/s2kH6cDFJklO3AlSpDeIFlbCQ6c/0QoQCBSxaKvZNNxaHw/GH24PmXdEY4rAVn1iMQnBhNpRZQFO3ArIuDpBoLDaPJjhWn3kAP7SabRaKtNtIyk4DlmSDdiugg5AGVruZCdqLLa+k9gGDvaTElCVFJRBk8I5V4/pMoQBiqTlz0VQxZpEkS/BqZHiNhX4tWC6lakAlBlmBvATYSSZEMICJQCLTf3qivLnWXVPlSfpxkh5JauPgb2ZzV7W2MqHWoOBSZU+V2xoHQnu8vbjGjTRLwL7JXS/5N1eihvYyjxm1uDvaGqjI5hqCwkWYbXa7mSYoE2nCU0qwgUoEo22g1WNTWpHVFmDZOMNuDDTEvU1Nzc1as6MOVfg2bWLKKnzmmKUOKqQnYAGo3RGtkarVliaOlWJ2WfXxUTcdEDyq6tjULlVX14r18VhluLjM29q5WeVsSK3AJeJl6yoqMeCGxfsSubJjQ+CxIVJDY3OS80NTYmDTxLhMi1dkieExfGf5RD5eYvCnGIb4F+9tP6dCV60kwplBTIzWwbEu2ttIosdpqoGOhsrsUm2jucsW0hyVDc0+r6XFvtlnCtdvjJV7y7xoATN2i40gs+RYSYs93Zo3U/8/s3q5iVi4BQifnLmcE6KERC4UGvVDBU+VnmR4SWPxtlfgqMdL1Lub9WMOiqEtFCgO2c1BOmiHRBneo/PevtkZw6mrIgF43HgdjH40QruMTouFNpYYBOCyW/GMpa/wnaOZi+v0t5as6c3PSX+W9PlrpEvk0pPXG/N/8oZn+u7u6ZfO3DkWOQ5c/OvmVcnqSxvZswP/Hhd+PVd6ppZf9/HPvvpp/o4dz7w/dOyuVUt+5z716OC+8oYXP3X2rxFLC0ufOHH9P9e+tnad/WT7J1xNdLzz4y/Ia0s/m6pwr7v4I5Bsua9qqKSp6f2ntmsrifBn6/s/6l15sPrnf37rV3tPjXx3wyFof/5vv71iuTN8Td+2+qE33aem7+ijXpbmpn37r9zzAO90VDys9hfkVz3/x1VNQzvyptCF7/89byB3hfee6L9OiIOrc9m+jd9ujK6/3HP9fJzs3bCn9MZ/lRvvbCU+atwy+8Hx0JaZL7e9Yh7q2v85+od10Nby+drdn5Y+t3UFs9Qy2SLuHPzaW/vg7PeczGvC+cDBs8eXX1+2/v6pgsuP/n7nHRMzuz9oeWzdue8sl/9Umje5/4m+viPP2vpfHGLe+PKF1p3mCuGvQ6/KK5RDIxv8vZdfGti9bHntO7PON/ddvTvaltxbby55e3vHvdoBcuvR3H3j/YZ734v+cnLlQOPbF5Y+/ZeO0zfGf7B18Nll5650XFs7EMrLGzo7J35YfFXsOtQ7p6/Jue/w2nMXZoNrT3/1mmdvxVz+3J6L3ZetS5dfLXqIfPDpi9L9r++zJbtXv/vcj8GeD+sLIm0bLn2RG99f23je+J5U8NYP837x7l1Y7bm5JTnanIMn8nJy/gcqPgKj
\ No newline at end of file
diff --git a/docs/cassettes/configure_71248a9f.msgpack.zlib b/docs/cassettes/configure_71248a9f.msgpack.zlib
index 2f748b18fbd..9235afc59a1 100644
--- a/docs/cassettes/configure_71248a9f.msgpack.zlib
+++ b/docs/cassettes/configure_71248a9f.msgpack.zlib
@@ -1 +1 @@
-eNptU29QFGUYx5wIUyY/ZMWHar0ZKp3bu13uuD9gxHHkpUgccBkmgu/tvtwtt7e77b6LdxglSM2U0swOqEzK9MfjLq9LITGaTPtQFs1kMjZDkijqJHxIbGrSGG2g987DZHQ/vfv8+T3P8/s9T1usCcoKJwoLEpyAoAwYhH8UrS0mw1dVqKD2aBAiv8hG3BXVnv2qzI3m+hGSlAKjEUicAQjIL4sSxxgYMWhsoo1BqCjAB5WIV2TDo7u26oIgVI/EABQUXQFBU3lmPaGbC8KWjVt1sshD/NKpCpR12MuIuBMBJU0eyPNEEBKAaMQQBPCKKiK8EMiKrmVTEkhkIZ8MZHigspA0kX7ABVQyD9ehTJQ1CYdgUMKDIVVOVqEMVEvMDwGLxz6fsTTiFxWk9d81yiHAMFBCJBQYkeUEn/apr5mT9AQLG3iAYBz3KMAUV1o8AKFEAp5rgtFbWVofkCSeY0DSb2xURCGRnolEYQne7Y4nRycxIwLSBh1zfRjdYcy8gFs2WQxUX4hUEOAEHnNH8gC3FJVS/qN3OiTABDAOmVZVi95KPnhnjKhoveWAqaieBwlkxq/1AjloMR++0y6rAuKCUIs53XeXSzv/L2cy0LTB2j8PWAkLjNbbAHgF9t8m+XZKHGtlIikLSdGD86AhksMkI+IK2ofUwTkCeSj4kF/bT5vtH8tQkfCywu1RnIZUpS2CxYI/DsXS+/VRRdmc1DsjpVg27ZjHr+oJ2kpUMIhILglBmwtMeQUmK+Eq9ySc6SKee6rU75GBoDRgpZ6f24oY41eFAGTjznvuQzx9RCTHal/hdz1F01V0CWdRnSG6zOYoAeH1DkuTy30kRDK8qLIkwhcIydSwIaSNEnZLvs1szwdsvt1M51tsXjND2/OslB2Txlqphv1NHNDitIEmfKLo4+Eh52rSCRg/JKtTlGix0g0vOsrXOBM1ZJXoFZFCeoBPiwiiAKPVUMYqaPFUabzXMozi9CrHBm3Axpqphgav18bCPAtgAFmC12WOntvjR5JHkbr0ViyBjE0nFmx5ckdWRupbuK6yPnC2eMnsynjdX5fazTlouneYHlpdWJB5bnPOxYnOlpffz/3ldG7+8vGWM+c2V7SN1y/qoZv1/T9P9B2fGWl5bebPy+oXN29M28fEolVTT5y3VtR80PFs+coqNRu0bo8skuKVJ/tBa09obY13V+31gTHD9h3BOmFk75bfp25kLk60d5zp+zc8s8f/d0F4svLSu5mvPGObLhe2+bhFjTmbu2qvhk+Nf5NVFwKmQ9J7npUvfTLYN1kYr11/32JzieRqLjvw9pVG+Y/fBtx9F648UtSara3qLs593HH14sPktoUdKzqGfW8NudZ8P3Ry49kHm05X9zzd3LXkCHt9afY+hy6WuWldUceFLxPa3mVjnQ7X8Gf7iJFO7uLyE7ujv1JTEzX6YepRPel2vX68+c099ffH17zx2NABavcLVx7a/cB3X7efLnvqYFbZTyuWdU+e6vLtHPMVdg98+87la58X/3D9OXT4WO0MZnh2dmHG0WulN/9ZkJHxH+xyZNQ=
\ No newline at end of file
+eNptVH1QFGUYhxzDRi3I0rLUnQOyyXuP3dvz4BgtTzBkEA7hciATfG/3vbuVvX2X/bjghBoVZgJMW5qxKSYy77ijG0IuFC0/pkadMZWZJv8orBynj2HUP0qzHM2il09ldP/afZ/n+f2e5/d73t0eCyJFFbCU3CNIGlIgp5EP1dgeU1CtjlStKRpAmh/zkVJXuTusK8JQpl/TZDU3KwvKggVKml/BssBZOBzICjJZAaSq0IfUiAfz9UNtW00BWFet4RokqaZchrbazKbJFFPuxq0mBYvIlGvSVaSYzCYOkyYkjRy4kShSAURBagsppqAH6xrlQVBRTY2bCAbmkUjSOBHqPAIs8EOhRgdWQkCzdDaB0lBAJvNoukLwaQvdGPMjyJNhLyalRfxY1YzEfQPshxyHZA0gicO8IPmMT30hQTZTPPKKUENx0p6ExhQy4jUIyQCKQhD11wFVg4IkkrmAJgQQadX4pMTlri4o3LCmJDoOavRBWRYFDo6WZ21RsdQzMS3Q6mV0fzg+qgkgQkmaccg52WZWaT2xQ6Joi81hofvupRYh6Tgqj8WP3BuQIVdDcMCE1UZ0vLj33hysGl3FkHOVT4OECuc3uqASsNumTano0uigRiyv9H66ieBdOtbCWC05iWnAar3EGV1eKKooMeXBVEmcGMkC2g5o5tA0aKQp9YDDhMH4mO6dFFBEkk/zG2GGdXQrSJXJBqMdUVKm6er2CPESnTsdm1i7fa6iyU3YGcknrhrH3H7dTNF2qhgqFCFeTjH2XJbNteVQBcXunrwJEvcDXUq4FSipXuLUmsmliXF+XapBfDzvgesSn7hZQOCNo+S9mmaKvF5vbQUKWiX7entdTijEhoLlzs/v6oIVH5SE0BjtaN1QBuuws8t51gOQx8sDmyMnGzgcVgZ4rNYc3pbDZNt4ezgoQCPOWBjKh7FPRPs5L+Ag50dgXBojll9Z4iwuzOupAGXYgzUVuKHPiEhYQtFypBA3jDgnYp0n66+gaN7LoMxZaRxwMBxrYzia53mbjeOsYDVZm0mZpmSIjN6dsd/ANmKFQo5OJbctaZuVNPbM4A1nzY+r5jSPtMbPVh1t6y9Z+EfmO+cfLnseFT32kye6eIj9q0JZ8YE++NnIse8FqmvptuM3b2RoTpw4+eLAK2/82dXx7dlrhzF+aeX80PBx+5Lfd33E5H2Y8qr7ibDwCPtspnnuvPDuLRXMiSrzTHPi0ecqL8zf6Gt4b+Cfhjc7Ftxa6a2YvSzhufXvdfv6yzd/3lTUkrJQmPfVly5r+p32XXuNNG/hby2uO4ODm5uzv7g+6/y+1rLXryWX3Fl7sNBYtWjw0rn3q2K2/Nuwdgl/Ib/UdyO5c1WBa757755Oai5dr2+mbqd6n3p8x3B/U//w38sDKSlPdxfO7sx0preAupnh1NP5zRdTnvHSja2dPyxa8e5q1zeJdZc6Th1IpDYUFPVsDLLmK/7XZgxc6V56pOrYiau754TPlL7N76P3rBWXFaf/Up16OTPtu6bMWKp13VXfmdmhxe0bXlhUe/J69HSf/WA4vvjr7gW9/z35K27I4EZ87Tcah48frhohqo+MzEhKOed+i30oKel/M3dwnw==
\ No newline at end of file
diff --git a/docs/cassettes/configure_7ba735f4.msgpack.zlib b/docs/cassettes/configure_7ba735f4.msgpack.zlib
index 5e90141aaf1..9fdad0a088a 100644
--- a/docs/cassettes/configure_7ba735f4.msgpack.zlib
+++ b/docs/cassettes/configure_7ba735f4.msgpack.zlib
@@ -1 +1 @@
-eNqdVWtsU1Uc3+SDssSoUUNUkMvEaHD39t7e29eWGreWsSmjZS2w8rCcnnva3vW+eh9dO/CD0xgRFS8xSqKMx0qrdQ4IExA3o6JEIxoFNRlGo4mPmPhAIxFExXO7TrbAJ++H3p7z/5//4/f7/84dKOeQpguKXD8syAbSADTwQrcGyhrKmkg3Hi5JyEgrfDEcikSHTE2YWJQ2DFVvdjiAKlCKimQgUFCRHDnGAdPAcOD/qoiqYYoJhS9MPLahUUK6DlJIb2wm1mxohApOJRt40agKMEMAQgMyr0iEbEoJpDU2EY2aIiLbbup4/cA6vCMpPBLtrZRqkCzlIg1TSyi2r4x3GfzWDQ0BCS+SQNQR3jCQpOKWsKMdi6boB8ppBHjc8JZiWtENa2RmC3sBhAhHRzJUeEFOWS+n+gW1ieBRUgQGquC6ZVQFyKpkEFJJIAo5VJo8Ze0DqioKENh2R6+uyMO1PkmjoKJLzRW7NxKjIhvWaAgX0drpCBcw1jLBUJyXovflSd0Agixi8EgR4HpKatX+2nSDCmAGByFrPFqlycMj030U3drTBWAoMiMk0GDa2gM0yc0dmL6vmbIhSMgqB8KXpqsZL6ZjKYahPPtnBNYLMrT2VGk4NOMwMrQCCRUcw9pFj0zhIyI5ZaStIYb1vqAhXcWTgx4q4WOGqQ8UMRfo+Lvl2gjtDt03ReKXdXOKQcyLNR5Nm00E4yFC0CCctJMjGK6ZdTazLLGkKzocqKWJXpaG/VE8fHoSU7F4ivYyTJtyBvGVwGUJH7cJx93Y5eM5JVFeVXRE1qqyhnvI7kntkJ3BA5PTRSpaCshCfzWtNV5lvq8/38dDk+fTuT6J9vVzrJBAJkyO1o6ommKnwQWRkm4NOX3OkZplCvsK7pUmGZqkmSN5Eg86EgVJwHhWf2sC1q2ii6bpw5c6GEoGYamXObr6vD7dQ0MSJs3OfTEM5/P5xi7vNBWKxS4+j+fITC8dTa+GcUr64UsdaiF20/pwfsqbFHhrYiFexHm3h3ZBt8uFmKTXyXsTTg4kII+c0OWBnkTyVSx+AeIoNpmqohmkjiC+rYyCNdEkgbytMz/LuFg37rSFEGQomjyKmImgYvegtxCqhkQF8HsD7WQAwDQiI9X5s8rB2LLWrs5AJYKLDChKRkBbT9XPisdhMp6Q/HRHpA/mvWpvFHa3i+YyJpFd7PH0rHAFdfeSju5YXI9qHUIiLrRDkvE4fYzH5XKxJEPRFEMxZNCk4tlsXvMtWZ7N0HGxk5MKS3v41tiSsHul2C73JpS8V2hzxnrFMMoloRhd1RvvRRSr9MhdIOsNLA93t6+iY7Ivn08AaXkgkAyJrbgbYKT9jhYCz6aA8fXXFEJihZC2PlzN9JQ+Wgi+ioGfmnkbthAd+KIPyWKhhYjYYCL8BhKKCAbyL1NkNPE0xsDMCbyfW8kqYQlkc21L+6MhxpmRcj1UXBJXOQOY1tXBpaxB9QfYjmhvbBoINMaBruHgpjlvdQovlv4/qzrYQ04XPBlSJ79oZVnRZSGZLEWQhgVkVaComDy+2DVUwpx3t8asUS/P0ckk4GiIEAehm2zDV+ZUtP+uh6L9VSgDEc9YDloH0qy/sZnj2MYWQgJ+rxvLqfrde7Bkz6Sceqc+PH/zVXXVZ5b41JvLjtLXBn851/D+98Xegd1t8kfXv3DVplva7tS2dyXUsZHx1qcrF558viSFN/wKBlO5XN91cO2KsfVrDw7wKVO7MHqqaccHv60ZEz1bzvz00rl15IlDN95tHPkuq3715/UL561jN23/9J6GD+rvu3KAKJY2/0it+6YwePXOow3GPtczW5+LDYZ/+Wzb8ZNN87eTi2YPzvnRn3h8/xenH3myNDD//n2FjdHm0/dzb2w+ecexw4F5N1BjqwYfXnDrjl3BwPpHTv2QaJj7yficXdt+eOXWM38LW48da1573trx++qbvv38xIOn022VO0Y37jz/6P4tYOifeKc8L/fMvW856AV01zXZvzYenT33pgWz/daBs/f2xOpn7/n7G2Jtw0Sbmftw6L3ks7ftXTz3YKT0asemYwt/PjS4864Muln97MWv3z56+x8fz8fQXbgwq27pE9edBVfU1f0LdzdoqQ==
\ No newline at end of file
+eNqdVXlsVNUaLzQIUZOnL/G5gV5G8OVp78zdZm0mtp12Smk7085MKYVoOXPumc5l7ta7TGeK+CLU5UUN3qYxkryEpe2M1AI2rYKFGtG4gGiCC6EaMS4Ed1/Ce7iDZ6ZTaQN/vfvHzD3323+/7/vOlnwaabqgyAtGBdlAGoAGPujWlryGuk2kG305CRlJhR9qCUdjg6YmTN+VNAxV9zkcQBXsiopkINihIjnStAMmgeHA76qIim6G4gqfnc5ssklI10EX0m2+9ZtsUMGRZMPms6kCTBGA0IDMKxIhm1IcabYKm6aICEtNHZ8231thkxQeifhDl2qQrN1JGqYWV7CebmgISDZfAog6qrAZSFJxBViKrSk7tTmfRIDH5W0bSiq6Ye2bn/B+ACHCHpEMFV6Qu6y9Xb2CWkHwKCECA43gNGVUhMMaSSGkkkAU0ig3Y2U9B1RVFCAoyB0bdUUeLZVFGlkVXS4eKVRDYgxkw5oI4ySqGxwtWYysTNB2F22nn8uQugEEWcRQkSLA+eTUovzQXIEKYAo7IUusWbkZ431zdRTdGm4GMByd5xJoMGkNA01yceNzv2umbAgSsvKBlsvDlYSXwrF2mrF7xuY51rMytIaLJByYZ4wMLUtCBfuwdlH7ZvERkdxlJK1BmqGe0ZCu4j5BW3PYzDD1LUOYC3T8zXypYXaHG2dJPF1241At5sWaiiXNCoJyEc1AIxiKcRK0y8eyPs5J1DfHRgOlMLEr0jAWw82mJzAVdbO052HSlFOIHwlckfCpAuG4mkL6uDFJlFEVHZGlrKzRtWRkZlLIhtrxme4iFa0LyEJvMaw1VWS+pzfTw0OT55PpHony9nKsEEcmTEyUTFRNKYTBCZGSjsHxuPeVJLPYj+BaKZKmSIqezJC4z5EoSALGs/hbGlfdGnJSFHXwcgVDSSE82HmOKj4vzdXQkIRJK8S+5Ibzer2Hr6w064rFKl63e3K+lo7mZkMzkn7wcoWSi92UPpqZ1SYF3ppegQ+dLpZ1cpDj4jDOMF4WxIsviEpAlmHdHs+LePIFiL0UyFQVzSB1BPFuMrLWdIUEMoU587O0k3XhSisJQYaiyaOoGa9VCjXolYSqIVEB/H6YICGASUTO9J+Vr+0IVTc3BEaiOMmAoqQE1P/hgps6O2GiMy75s/VaeyYRDMqNQTWaFnpqBb7V093U4wm2tm2sFzrjmmK2dToTTrWDpN0czeBkGSdJ2yk7nlIykIRM1mzqcbbQNQ0ybBfYVLfQ3hRKhRqYWF0rC72U2KyC1WGvN9YekutWoTrV69WgnA5G9HgrQJH6SMToMatbYyiaashsVJyt7cy6auhpCTe2grRKi71rA5ybZwolAiPpd1QSuGEFDLq/NDYkHhuyMDRuHzU7NJUEXwTGb5+/IiuJVXjXh2UxW0lECwgj/A8kFBUM5A8pMpoewMCYaYH3h1J07cZYPO10rWpLsiE+HKrrCNXx3XxNZF04jbhwd2e0c218DdMA5yDjoV0kVQLHRXGeYmteSv3/zOqFteTcLUCG1ZlLLS8ruiwkErko0vBUWSNQVEweb3sN5QJBMlLdYU14achydJx1JRKUh6dpsgbv0Vlvf+6MocJVkQcibrw0tMaTrN/m4zjWVklIwO9x4RkrXn0P5gqNKne9tuDB2x9bUlZ8yh/vp5VXqOse+vG3q9/Sq1YM/BWlFr+/q+m7qrY2a8zxrz1w/flFNyy/sOlMX//OyJ7Gh3/9furwoXO0rW91tV6TfftJ95rm28a/PSMcuG/42QPfgnOTnxw9++rmQ/cN3Lqt6vOd1EfL1N9am18URhcOBH98unLDkiPOjtMrv2Leerdu8S13VC3qQN3MDvudpyb3bj/eb3SvORnU/sP9/Yfrl/ctPaS8+cyi+0/8+/hnO1aXnx6/OvmAYOvzDdY8xPwwXJ+zXi//InjHl/b05NLyxeibjiW59cPv/O+9M+FjsZPb995zrnFqYN0vk+rLh08c+WBwbGIw/+iT2+Td5+9a8fw73N+u4Xbs3Ar6/5s2Tn1a9h4b2Nr0BHfup4c7lpdtjz0wcd2xm/dXX3sMbBq94d3Hq7hbfu55av3p309F2i+O3V52dsP1tWDZkqP7zy77cGnf4vPtH6HX7t3w9ZHvVj6ycOVB9aoFK1Nt/+z9+i+37frH6j13qyePfn/h4+3hE26M9cWL5WXjb4CnLiwsK/sDSUeFGQ==
\ No newline at end of file
diff --git a/docs/cassettes/configure_927297a1.msgpack.zlib b/docs/cassettes/configure_927297a1.msgpack.zlib
index e70d9a6a14b..39069ea00f7 100644
--- a/docs/cassettes/configure_927297a1.msgpack.zlib
+++ b/docs/cassettes/configure_927297a1.msgpack.zlib
@@ -1 +1 @@
-eNptU3tsE3Uc3wRFGLKpOIcQuTTDsLhre2vXbSUIpYPNwVjdinW8xq93v/Zuvd7d7q6jHS66Bw8zEjglEubGa31R92oQpkNChCwBo5g4CA4nM8BgoCQoII9g8NeyIQTur99935/P5/utD1VDUWJ4LrGd4WQoAlJGP5JSHxJhlQdKcmPQDWWap/yWkjJrm0dkBmbRsixIRo0GCIwacDIt8gJDqknerakmNG4oScAJJb+dp3wDO9ap3MBbIfMuyEkqI0Zos/SZmGosCFlWrFOJPAvRS+WRoKhCXpJHk3ByzGQTGRliAJNoXpQxgYduDNh5j4zZIRAlVe2qWDGegmwsmGSBh4K4DqcB4/LgWaiXVqfNiZWUoVtA4GSPGOukVWtrQzQEFIJ+LiHFT/OSrESfgtMFSBIKMg45kqcYzql0OGsYIROjoIMFMoygOTkY50uJuCAUcMAy1TD4MEvpBoLAMiSI+TWVEs+1j+LCZZ8An3ZHYvBxxAonKz2msTk0Fh9in0Mj6wxqbbcXl2TAcCziD2cBGikoxP2HHncIgHShOvioskrwYXLn4zG8pASKAVlS9kRJIJK0EgCi26Df/7hd9HAy44ZKyGx5ut2o8/92OjVBqHOiTxSWfBypBByAlWD0EcmPUiJIKx2uNeBaoueJ0lAWfTjJow7KHm3nGIEs5JwyrbQR2bqwCCUBLSxsCKI02SPV+5FY8IfjodEd21uyeEzqzf58JJty2Ep7MjEiByshZSy2JBihN+qyjHodVlBsbTePNrE+U6WoVQSc5EBKLRzbihBJezgXpCLmZ+5DZPSQcIZSvkXvCi2xoDTHvIQp9HIOU3nVkvwam6m0Sqo84MVJlvdQuIyuEOJxsF5ZGcD0kIR2u94BCSo3R59DGewIuJ7IgiBP58jNJtuqGaBECDWBOXneycIu8yLcDEga4mVxSpRQfvlSU/G75vYP8FLezssSbgVOxc/xHAyWQRGpoETirdFeizCI0ktN5cpXuZRe63BQlCFXn62zG/LwBWhdxuh5BN8fO4r4tdchCURk6ku8PrPpxYT4Ny5fKeF/1U7uOzJpz2J10VbDsUl3vgYf1T23SjPOmFzH9h9MbbpZWLna8ueh5KLBa+lba2ruz7060HdymfqKMDg02NJ//87N4VsTh2rfPHvst89Gzgxvv3HypC71tciGxNa1R8uY8afDqcdb1n4yp2Xl8NmMi1N6Nl5LO5/B87d7uv7Z+G/rjU6YlS6d7rn8esGxoeFrw+HBgqmN4ZT5BakNOwgTnTtdn5b/ZXNlb/I8C37wzOcTssEbmGXCW9Nqi7fMKPUnsvkj5ZbiuZlVV9V7V6eszZ57vf7nyxMvdYc3vpM0+3a1oj+adLf5nFhU13t+Vlh38FLTlWnpJd7ozEad59DRWztnTV54dv/8BtXLTfypqP70eHNpRX9DN3C9vWZqrhT8rmth+oHCjoyWFMbWXztj0ZwjjS/tDrxA0sLFfTeya4+betMK69btXlPeza5PSqCydnZ+vGukfmVq3YJw74aMmb90XOjyfT+F6HP9/eFfrfYt09eX2ye9OtJ878d5EanSey+5+73C509tjxoLMpYC0ybbJiI0f/rh4MA384YOl124Oztt24llU/bNWZ4q/rH+/E+bb9pO7Ct63xfIyzMWBX5vpJd1bNthsyx/peLTXa3NtnYK0u1t2yqW16sou3VFEpL2wYNxCaGA84vLzyUk/AexLZt7
\ No newline at end of file
+eNptVGtMFFcUxtCSKlqosRHtD4bVPmKZ3ZndZdndii0siiiPhd2UajV4d+ayM7A7M8zcQYFaK7aagFoHbNWaGpF1wQ0iBEWRWpVq1dZXGm3VWLRNg9EIplKD9VF7QVCJzq+Ze875vnO+79ypqC+BssKLwqhGXkBQBgzCH4pWUS/DYhUq6POgHyJOZAPObJe7TpX5i29yCEmK3WAAEq8HAuJkUeIZPSP6DSW0wQ8VBXihEvCIbOnFL8t1frAkH4lFUFB0dpoymuN1wyk6+8flOln0QZ1dpypQ1sXrGBE3ISB8kCfzCBKAUDhRRoQkQj8BPKKKCA8EsqJbuhDjiCz04VTGB1QWkiaSA3yRShoxCWWiEjEcgn4Jz4RUGXNQemppPQcBiwfuCnstwIkK0lqeG2IXYBgoIRIKjMjyglfb6S3jpXiChQU+gGAItyjAQZW0UBGEEgl8fAlsXUIqCPCCD89GIt4Pcavajqxsd35a+oczs4KPQbVmIEk+ngED5YZCRRQahyYmUakEnw+HBnQhsVgC0vYmD7dpcJZiSwSC0ptteqr5WWofwB0HpcF4x7MBCTBFGIccslsLPi5uejZHVLTtmYDJdo2ABDLDaduB7LeYR0wpq8LAoFq9w/k83VDwKZ1JTxv11pYRwEqpwGjbC4BPgS1PPHhSEsJGmkjKQlL03hHQEMmlJCNiBq2WahoW0AcFL+K0OtpsapChIuEthiuCuAypSkUAewlPHq8fWr1t2XOHN2F1IBW7qh1wc2o8QVmITCATmDiBoC12k8meYCLSMt2NjiES9wtdanHLQFAKsFMzh5emnuFUoQiyIccL1yU0dLtIntW+w+/5FO12leQIljkSci2WzB7RmZPqRIUZ7U91EWUvEPiyQdqBuotTTTaLKYE1eUjoKWBJs82aSNpsRpr0GI1W1mylE82spa6EB1qI1tOEVxS9PrjLMYt0AIaDpGtQGq0+dV5Wcma6o/EjMlf0iEgh3cCrBQRRgEEXlLEbWojxiSqL11+GQVyemzxP222jGZOZZm3QVkCbPFYjmYLXZlimJzIEBu7O4K9gObZCxkdHR/XGVr0SNviEs9XZ4jkq+ujBn45P1Hedb+uYsP4N4u/sadOkDyq2flWTMb71RMbGSa3jzn+6+K/dkSHrvC39fZv7p15pv7Cvc82lnsSJefce9N03xAp38vqLux72HuxtkqZbb/m2UK6GyXbUNTqqNNpdN59QutZcvnp1TF5UXMK+S033oqN4tb39sr9ts/d0xzcn1p18/Urv6bUr9/7bI0/oJcekuF/ZmlR1+Ig1pcE6yR2TWlU9Z/+r7zvJX/unRBV2nkveGpEx/l71jOKYKeqipJSfKwpv/BGzqmD5/OQFG9APaest46gv6sq4ms/kosjc0bEnNvxTeYx21hx4Se28NN3ZPPVuau3ps/a3y06N3z92T7yzdvaRmzF7zjgSDlMhw/VZF5rbVnE5i7pTF76VU7jAEhvdek29Wbb5zLhw7vsVtPtC39m71V0BInNs4FBPrrnfEee+s+74Eu5r3y0uJy2Ljasua+iG22I+STf0RRxq+bPgv7zMa8U3NnWr6Ya1+yoDrvUb61aWvzP27NV1jm+THngqQ7MTujOWnfpx9c7fr3csLpze8/IKY3tnXEL53JTeZOeDTbeL8pyTI279cvu3yvcm8nPaEmcQO97Vt3Wdr+0tzo+MqMq4f+xQd2Pkyu5la2qS2srL7A9HhYU9ehQepj6sCEaEh4X9D/5zr+U=
\ No newline at end of file
diff --git a/docs/cassettes/configure_97538c23.msgpack.zlib b/docs/cassettes/configure_97538c23.msgpack.zlib
index 96c977e0541..cded8d36251 100644
--- a/docs/cassettes/configure_97538c23.msgpack.zlib
+++ b/docs/cassettes/configure_97538c23.msgpack.zlib
@@ -1 +1 @@
-eNqdVX1QFOcZP9EkpU1rRDM41Oh6UTDIHrt3e19cz3ocCIzCAXfgSWKve7vv3a3sF7t7B4ehMWiNtmPsoqnVdho/4I4wfEigiDE4dqqpydBMJtEOpB+2aWNorNJqM62Jgb53HBVG/+r+cXvv+z7v73me3+95nm2NR4AkMwK/oJvhFSCRlAIXstoal0BDGMjKnhgHlJBAt1e63J5TYYkZzw0piigX5OeTIqMTRMCTjI4SuPwInk+FSCUf/hdZkIRp9wt0dPzwTi0HZJkMAllbgDy7U0sJ0BWvwIV2q8QoACEROSRICiIKgENIvxBWED8gJVmbh2glgQUJy7AMJG3LdrjDCTRgE1tBUUENOiOqhCW/kLDl4S4O37IiAZKDiwDJygBuKIATYXLQMIGF6cwt8RAgaZj6wfaQICtq7/xk+kiKAhAd8JRAM3xQ7Qk2M2IeQoMASyqgC2bAgyRValc9ACJKskwExGZuqadJUWQZikyc5++QBb47lTGqREXw4HFXIjcU8sMr6qALBuEoy6+MQtZ5BNcRFh12ugmVFZLhWUgjypIwnpiYPD8390AkqXoIgqYUVWMzl3vn2giy2lFOUi73PEhSokJqBylxJmJg7r4U5hWGA2rcWfmgu9ThfXcGHY7rzP3zgOUoT6kdSRnOzLsMFCmKUgLEUE9gvbP8sIAPKiH1FE5YOiUgi7CGwO4YvKaE5dZ2qAUYvRxPFdNJ1+ZZEf+oyWwvgrqoI55QOA/BzYiLUhA9picQnCgw6AsIK1JS7ul2ptx4HipDv0cieTkApSielT1OhcJ8PaC7nA8VfCQhOMwmET6sUxQ0iYIM0FRUarcXrZ7pIrSsaGCmulBBCpI805x0q44klW9sbmqkqTBNhyKNHGZtJgyMH4SpwGDqiigJCTcwIJST1Xaj0WLoTR3Nkt8Fk8VQHEMx/I0mFFY6YBmOgYQmf1O9nLiLYdjwgwaKUA9g18cJLPmcn2shAQ6qlnB+H4awWq1vPtxoFsoATaxm4xvzrWQwNxpcz8nDDxqkIE5icnfTrDXK0Or4Grjw4Sag12MGvQGzGHHaBEgDbcIIU4DETAHcSBjOwu5nKIiSUFOEYwWVAQUHlxJVx/M4sinRaHYDbjSYYKY2hOEpNkwDd9hfJCRykG2IKAFWIOk+5ybUSVIhgLqTBajGi7ZVOMrLnF1uGKRTEOoZ0PbhgoU+HxXw+Tl7hc9fy9T5S32F1RhR66T1wAvcmLs+Uu4p21Fo2rqNrm6y8GwEgqC4WW/FzUajyYriOkyH63AUc7DeUl2DvnSrt4hqCoU8ir6q3IeZiRqm1gkUhfYKdbUyXlYbDpbiUV0jHthSEyqudkTDleEStqS42FVY1iDpCjlno4Eobja5OaJGCMJsSCVkz7chsDgZyK891SIobBE00SDGAmy2QWwIneTArps/Dm1IKZz5Lp6N2hB3gkwA3yQH3HB42ysEHowfhhyEIwxtr6PqTMXVjUa+wRgtD9Nuk6NeEvVNIu4rM4UcZEUVtQVgeKPZUDSXBINRj2IpHqCYlmQV3g/9/4xqyIvO7XjUJc583OK8IPNMIBBzAwk2kNpFsUKYhpNdAjGoebVjmzpooQksEAAWykpSlgCg0UI4M2fR/jcf2hOfhTjJwhqLUOpAyGDXFhCEQWtDONJuMcF2Sn4CX4wlapIPXkpzrfrhVzTJZyHb5hJ+hz1+afLusj35IxbH+wh67Nl+cuMZ8saUxj7qvHB5sHtT7Dm1N3PZl7d2r4j/7fR36F2uyWBbs2vprtFXxrP07lPHYp99cOPgq6vOD37x2NkD525tn75564aTO3IzJ2jbqX789pKhf9P/mjh29L3Bn195ZMPl7IzctZ9+dPvejoazddvzHjuRvvYuPdBTcWTgqufc8Nc/uPvR88aCG396vGxor2+pZlf8z1vsUtkLrxtH71Adq5cMHu7x6RbQ+H88b1de3K+t//GrRUu9+18aEpYPZncWrqlat79uo+5E4/nRr02tPL1k4/LrE1rHx+uysnqJd48MtE7+7LeV4SWVxy8Ob7721it/Z3sQW0lb5r2B7LGMTRPq5h+ktUifdcZ6CxYP5hx98rl/LD7w4s19Z35xSx471Lq21nV89V8fufbWcfpK//dbJsZWejd8K6NUc+Cp33z1PDnx1PpLhgVu2+Qd4vrKHWc6Lncezry3IcOUvjT3R6aCoWDa2F9ettSs9xQQT144VJjz3nefX7O4M/26dmxfttp7dk/p+23W3Nbs6vSekkPIqsJFR0w3F33+RIs1svugQlxZzvx+2Us98RVZb+ZUBW83ZP7q0WDfNbz6anpsRcMnX3pf+F763sVDB8j1p6aq+1bYXjOrlWnxJ/btNX3zyifpFyZX7/rDt4++8+HyY7XmrMjF9GHvSnH8mds5//zUv8y47vOtsbs1U4/y7raTd27n/vKow+2Ovv5uTl/gi+m4sPobd6xD01XZTz9zaGSd8afv9Hf8RHv819s3uKbSNJrp6YWakrrXrmYs0mj+C91Z35g=
\ No newline at end of file
+eNqdVXtQE3cexwd3Fp8z0mp91DTYuaGyYTebhBBEB0JA5CmJAvYc3Oz+kqzJPtxHgOATtVpRe+uJelrbUZA4FLCeUK1o1XpqK56jrcphxbNnLa3a4mOQ01O8X2I4YfSv25k8dn/fx+f7+Xy/3y33e4Eg0hw7oI5mJSAQpARvRKXcL4CFMhCllTUMkFwcVZ2bY7VVyQLd9q5LknjRFBtL8LSG4wFL0BqSY2K9WCzpIqRY+J/3gGCYajtHlbbxZWoGiCLhBKLa9F6ZmuRgJlZSm9T5Ai0BFaESXZwgqXgOMCrCzsmSyg4IQVTHqAXOA6CdLAJBvXhejJrhKOCBD5y8hOAaPSLJgp2DdqIkAIJRmxyERwSL/S5AULCsD6tdnCgpDf2B7iVIEkB/wJIcRbNOpd7po/kYFQUcHkICtRAeC4I0KLVuAHiE8NBeUPPcS/mM4HkPTRKB89gFIsfWhcpBpFIevHxcG8COwNpZSWnMgSCS0mNzSyGjrArTGDAN9lkJIkoEzXogRYiHgHhq+OB5c98DniDdMAgSUkupee7c0NeGE5XdWQSZY+0XkhBIl7KbEBiDbn/f54LMSjQDFL859+V0ocMX6XANptUY9/ULLJaypLI7SPmBfs5AEkoRkoMxlJ1oQy8/HsA6JZdShWHaPQIQedgfYEUNdJNksbwaagHOfu0PNcqunIxeEa+FjalOgbooR2wuOUaFGlRZhKDSolq9CjOYcNyk16vSsmx15lAa2ytl2GcTCFZ0QCksvbL7SZfMugFVa36l4EcCgsNqAvBhGyKghOdEgIRQKXUFSN7zCUHSU/Y/7y6EE5wES/uCaZUjQeWLfSXFFClTlMtbzKDxPh1O24FMOhpDLrzABdJAQAgjKtWYNl7bEDrqJb8WFosiGIqg2KESRIBceGiGhoQGv0NzCn31KIoefNlA4twATrRfhwavL/taCICBqgWSvwiji4+PP/xqo95QODSJj9Mf6m8lgr5oMC0jHnzZIBRiFyrWlfRaIzSltE2GN0V2vcGoi3Po4wzxFEUSqENH2YFdq9WSlFZnB9QXcNBpEkYJqMnDpYGIgIRLSSpV2mIYoiQwaIk4pscNsNIEFc2SHpkCVtmewgVqEBNUvAA8HEHtNaciZoJ0AcQabEDFn1KYnZSVbq61QpBmjnPTYOOVAWOLikhHkZ1JTPXN0ZuLc7LF/NI4S7bsJswsmeop8aVnWniLU2P1YLIXnSkXoq4sBIvTYdo4oxHXI5gG1cAxRdLlTAnLTVvAMVq7MX1uqY0pQnNJa1pWBp6Ke6y0xpuRnikasyyOvDxZnwL4gplsbo6DNWcnZZC2DLMnUwDmghRNYUlSqV7rnTXbrc9cSBZl0njSAiHdLhDOTCLeNzfXkFIISyQkV2Jsggp2LA1JTwzNDQLnBglMTZwJ7Z2aBBUVJCZR039HJqhmwCWfw3pKE1TWAMMA/hIMsMJ9nZjNsaBtEyRG9tJUYkmqRqPnM7xakSWtC0k+2UX7sBm+uZTb4mSsKTZu4RxristamGec1YcZFOJBQ+QYUJ0x2JovoP+fqD4vQPquASSHf/4287OcyNIOR40VCHCqlFrSw8kUXPcCqIGNkJdUqDTGYySuw4ABcziMuN1IIclwkfZG+9/SqA68K/yEBzael1T2u/BEtUmnw9UJKoZINBrgjAXfectrAo3KOk8OvDOpYkhY8BoEP8+erbN9v3Z82qjFrfmRD6ZOefivk7cXt4xb9Jph8N7jr80aVf/buKzT7bPNs/+efcrx9lHlny1bVrLfbNxs/NOYebeExx9Z5z2VB5z/2/mryTfKnNNG/nLnt59+6vF3c3fbbz79w69LlnTd4HvugGc/nGiPfXhf/m7ziGtPNlz7UkYSO97eaamZd2Wo6US5796TtpM3H2zf/umn276egFZ+/ut8Z37LbXw8OOHsGLOl9T/nPq5KY9gfWsPDjrU+LjTmHZu+7T0jd9SirT2EGA6sVi9LNVbOuGGr2hblvkJ+i7+Zt/vp5mO+lctWdEVcvbR04tDapvmad/jI+VM2JDfvmqQ6zXC4jahSDWmua4qOj2h5ffHay+a4CGP4zXNnDgyaKKYuX69acnxgyoCLg2l3YkHnsMNR6Fvvr5xfvqfl9fqfR+ywzdn+45OOmSBq6qyn+q4LEZfLlqxpDN9hd9LvH8yPxCnHzFV3BcuksnUDjVVf/Xi2u33rlbyx4drp81O14VlTm6b4Ms4nfJOJZ1pGzK5f+uzRluQtJbUXpz005nd1NZ6N+ktFdWXhB5Gyxt28q3jf/RsXHnZkYavemGk+FbbzMlLxhaljuH9X9/Ws5aMvblVvPD0x7vejMyJ2n+veIHdmZEab7hf80o5P1o1tkkb+fLtt7erj0Wv+XN69FnCFkWMHN308aHTymOhlK5KursubQM24NyCja3TxmXeio+p6bEzzowjH2fGbFpz+XbtL3LnIXtNJRrR8tz36H+tvnXGc3/rttJ6Sg7c+OLDibmr3I/PhisimoqEm8G7r3PWraxyTxoQdvVQ57hNPOj6k4/jlU58oEddPXhrREK+5R98/Oc3t0eyY3BD7fef1+kHbKtJzNv/13rnusRvqTw/3kmsWbSz4aE9P3I5OonNT2pWeSf8mvyqbMOyEb9SwC9MrF8WMjvnw6tLKiX9sWDneUrrmrdY3Zp2ZPNDLZjZ3+C8UDn9wqKLR8ubky3F7sh8vvDcy2NKDwoatcuwtDw8L+y+SEjoQ
\ No newline at end of file
diff --git a/docs/cassettes/configure_9a9ea077.msgpack.zlib b/docs/cassettes/configure_9a9ea077.msgpack.zlib
index 99363426144..92c21c13d8b 100644
--- a/docs/cassettes/configure_9a9ea077.msgpack.zlib
+++ b/docs/cassettes/configure_9a9ea077.msgpack.zlib
@@ -1 +1 @@
-eNrtVUFrJEUUZvGPFIUgyPRMz0zPTNKSQwhhV9wQxSjIbmgq1a+7a9Nd1VtVnclsmINxrx7aX6AmJEvYVQ/iRRc8evAPxLv/w1fdMxuNkV32IAgODNPzXr33vvfqe18fnx+ANkLJW0+FtKAZt/jHfHF8ruFhBcY+PivAZio+ub25c1JpcflmZm1pwl6PlaJrCmGzbs5kyjMmZJeroidkok73VDz7+TwDFmP6xxcfGdDeegrS1t+7002cV856frff7Q9Wvl3nHErrbUquYiHT+ln6SJQdEkOSMwtnrbv+jpVlLjhzGHsPjJIXG0pKaDDXF/sApcdycQBPNJgS24DPzoxltjLHp5gXfv3lvABjWApfb7+3BPf5yceC1RcIg6RKpTk85QonIa1nZyX8veQZtoIzq8+rA8GVlj84bMZ4iMRqlXtb7NB1Wp+Mff+na771PFdTb6sZqKm/evubjUWpuyBTm9UnwcroxxtjtrVIhay/vCQ3ujc0xJhHsNzUp1ZXcBrj2OrnO1nVIf0J2eaWDPxBQPpBOByEwxG5vbXzjDOegcfbTPUTqbzGcr6eW+/DA15fdrPhGg2DYEjfIQVbG4xWB77vd7KhN1i9wfH8GrbNw1IZ8O60g8Z+b57Hlb+hzad4Zxo58Nut34/ogp00pH530h0HtEPxNgCvNoLDUujmXiIrCqChrPK8Q/eY5VmE8UjeCHtLRErDI1phRFHlVpRM2whkXCokPA3dsDrUcJZDVJXRQyMeQYT10xQ0Dfuu3SuvtJlGsCbKBRIY3eOlM1ZTGUkoSju7ig7Q69ItTze5XhiivZkFQ8OBvzrpjwb+vEOFRLpKDhGyPjUONq4MLqWFiIkI91HPEDrbyyFeIlc6jTiCauYQC7NwJsgE11emppG1eVSJZYDFHccOBegorhbzi9msqZYrmbr9wQRBAzZT2i4M/QABGmAap3sNw1TpfVO6tIarEiKHScgD0bS3RDKMjFUad++v0fP5P0vN2sukBr9oNT2dFz1M7ZVa4Q30nGQY+78GvZ4GrQTBv6JBwX9Bg964e0RblkUZMxnq0MgPggFL+sMhjPujyRgmwWg45uPRaMwhgX7CeD+YcBb7w0ESDCeTvbHPg+EYxjzme2NABSuYFAky1K2cwD24R1/QGr0tiQ0+ocXizwb+vN8Yd1BgHBfpLsogx53EdUaCICocICKuOK4YRuxPmW71Y8E1fL73SrXuVAhuqw163Zpt0pc1tzjVoa9bxi4jQvqJqgjTQJgkzBjhRNSSRGnS6ApujcekmYK7UWKZ2TddgmpAbAZ4yt2/c5QCkBhEJUQDXj6g6pFmDQ8tsYq0GZqYZdYueTchM6wdK/mWJftSTRt/e7RDHlTGEsNmaGT22sElAg1ADLgNdMULdiiKqsAMMXFS8qd0DgsXBrr35QeL+iE5WkKZk/tyowWL1gVsZ1xvgkPqXi5lZaMDpoWTX8cIuox299+GuPEvBxvhBAtkRUgTr10HOsfP7iunmuMbAw4ZZmvO7M7/ACbG6BE=
\ No newline at end of file
+eNrVVk1vG0UY5uPGkV8wWiEhIa+9ttfr2iiHKFRtoVGLaqqitlqNZ1/vDtmd2c7MxnEjHyg9Iy2/oCVRUkUtRQVxgUocOfAHwoHfwjtrO6VO2lThhGXL9vs1z/v1zN7b3wSluRRvP+bCgKLM4B/93b19BXcK0Ob+XgYmkdHOhfODnULxww8SY3LdbzRozus64yapp1TELKFc1JnMGlyM5O5QRpPf9xOgEYa/f/CFBuWuxiBM+bO1rvzcfNLw6s160+8+XWUMcuOeF0xGXMTlk/guz2skglFKDezN1OWPNM9TzqjF2PhKS3GwJoWACnN5sAGQuzTlm/BIgc4xDfhmTxtqCn1vF+PCn3/sZ6A1jeH7K58twP391vu/2fBauxjMKJm6q2kqx+56lbcuH360+wliKJ8PkqJGvICsU0VaXqtDmkG/3e77XXJhffDriTGuKB5zUT74acAzTGtJ+mSNsgQWLuWzvBhidjWS0S0XQa4E3v5qatxrm6w8rCftFafv+23nY9SvtDq9lud5taTttnonKJ4vwTm/lUsN7sVZzpjTyTm/0O9ep2pS7s2g/mCtsHnuZRCxScqdoNv6ZSnAOoLGDqPO83auc1oeYGdJLGWcwtMbrrWuYHDsTfnQ27uqaJzR8pGQLrNleHbDxTLTSMbuAMcQ3EtReUg6TW/YG3UpbfqtJgPai+g51qI96nm9YdRrHZIT81hTECFeTlNd7hpVwONFBoNJDsfnaH8BbN7kJvmUCtLsdT3ief3qbZtcjfXXOFMKm/nXOw+2nfn2OH3Hq/fqncCpOVzgzAkGIY5urJ3+tjNM5TDURmLGEIKgwxQip29h1ZZ1WGzAYNfaGCjCcmgwIWzRLE9Bh1mRGp5TZZaDnG6Bq4fLbSCkPMS9VpNlA6nikCmoShJGXM+VI6wganM6ybB6y045Zi8FTUP01se9dPtVWWugiiXHpIkch8akYcEXImNHITQcVBgVao6OTqqyplLEdtvR38dVsO7KzAVNf1pzxlJt6NwG0EzmYFGGXGxyA/oI411tohBpK8fu206+jAmDDKlBpNhvJEM0FCMe28MLDS9VO8olEuhRJoymEBZ5eEfzu4gftygGhbC8CuhCK0yCJY90mCI9oHMzWCgjORahgCw3kxfePmptuIV1FetIEA4nVWItr9dtdlredPreq1l85TQWxw9KdUOlWQM76OYKa2Qalo21+R/R+7enkftZiHs3eu2FEFiuODN1P2ZzqjInUtV/ZvZjZH7O959UFOyy+U10RMpvSq+vuwz2cDiQJsv9YpMzqcTy5fAyqb57eduZzV6YUJ0gF3Y832/RUbPdhqDZ6QbQ9TvtgAWdTsBgBM0RZThijEZeuzXy293uMPCY3w4gYBEbBoBMmlHBRzi3dnE5rvZN52jYUTsbbY2/UGLwaw2/rlbCAW6gnVDnds1JGa4cMhJ2BVFhqRBxwZDf0GNjTNWM6+cTiL9vvtFZFwsEtz5zOuuZs6CnJTe3qjlnPcYsPPrOl7IgVAHBS5IibdoLz5CRVKRiGxxVlwo9BttRgpfYhq4T5AhiEkArO0JWkXPAoSFyRBRg8wGJm1Szv2WIkWQWofJZRK2TSyMywbMjKT40ZEPIcaWfmdbIV4U2RNMJCqlZMlwgUABEg90Aezg+avGsyDBCRCzB/CucxcK4hvot8fn8/D7ZXkCZkltibQYWpXPYVrhaOferB4G8MOEmVdzeKHYinIW37f/MxZZ/UdgQK5jhVPSdkTtbB2eKr9tvHGo6ffEsgDa3p/8A34xtTA==
\ No newline at end of file
diff --git a/docs/cassettes/configure_9f6a7c6c.msgpack.zlib b/docs/cassettes/configure_9f6a7c6c.msgpack.zlib
index f90bd15ffdd..e592958ccb3 100644
--- a/docs/cassettes/configure_9f6a7c6c.msgpack.zlib
+++ b/docs/cassettes/configure_9f6a7c6c.msgpack.zlib
@@ -1 +1 @@
-eNptU31QFGUYP7TMCRoZnWkmamA7hj8y9thlz+Ug+8BDTAyOuAMVMXlv973b5fZ21913CVJHjtLGUpu1j5mmsJLjLk8SL3EcE5uIGG1qJE0swhz+0JE0p8bJyGaQ3juBYHT/evf5+D3P8/s9T2u0EWq6qMgpnaKMoAY4hH90szWqwY0G1NFrkSBEgsKHK11uT7uhiUM5AkKqXpSXB1TRBmQkaIoqcjZOCeY10nlBqOvAD/WwV+Gbh97dZA2Cpg1ICUBZtxYRNJVvzyWsU0HYsm6TVVMkiF9WQ4eaFXs5BXcio4TJAyWJCEICEA0YggBexUCEFwJNt25ZnwBSeCglAjkJGDwkGVIAYsAg83EdiqEKEnAIBlU8GDK0RBXKRm2JChDweOyLlvSwoOjIjN81ShfgOKgiEsqcwouy3/zM/4qo5hI89EkAwRjuUYZJrsxYAEKVBJLYCCN3ssxDQFUlkQMJf16DrsidkzORqFmFd7tjidFJzIiMzKPFU33kVTZj5mXcMsPaqENNpI6AKEuYO1ICuKWImvQfn+lQARfAOOSkqmbkTvLBmTGKbnaUA87lngUJNE4wO4AWZO2HZ9o1Q0ZiEJpRZ+Xd5Sad/5djbDRtK4jPAtabZc7s8AFJh/FpkqdTYlgrhqRYkqKPzoKGSGsmOQVXMD+hDk4RKEHZjwSznbYXfqpBXcXLCl+N4DRk6K1hLBb8/lR0cr/2uVZNSb0zXIJlM094BCOXoAsIF4eIxJIQtL2IyS+y08SKck+nc7KI554qxT0akHUfVmr51FZEOcGQA5CPOe+5D7HJIyJF3uzB7w0UXRqQ/QXLlWoBVi8p4dwVqwur1wjBI00kJykGTyJ8gZBMDtuEzCGCc3gdPtpBF7KQKXSwNM+ydi/DUpwdMA6Gge2NIjBjtI0m/Iril2CXs5R0Ak6ApDtJiRktWVtRXL7S2bmGrFK8CtJJD/CbYVmRYcQNNayCGUuWxnutwQhOrypea3Y7eDvl8/FLHLCwgPGyheQyvC5T9EyPH04cRfLSQ1gCDZv6U4ysN+dbkt/cF15cFRh+Lv32k28f+8f66LMPzlu/rMxdd6q3+/XHrnrZX8yX2wd37nv/yIGMiTFrR2jvvC+vpOn9o/FvalwnLzdMtGX9PbCh5vpP1984fy06/tburIn6EIvssecH2+an/p6zq8W9q2H3Q6lSv/vrhQcufR695cusk25sPlG3tVZ+b13uoYWst+/GlZvZZ0bHRttKdzx1VYyMjGyrW9B1Nv3c2T7i2NI/Qx/1WV5q8i3qUh+pXlzdPqaODHiOdd73sNAycvqvvVWLhn4dvfgtjNT3Dj29ee617GYqVFbbeqE3p9byQIY74+Pv0syBPadN1x/B1J9XrjAyfyvOP7dnyGlX5+34MKObvmVvCRwufYYc2ZFW27r98px4Zjmz4AnWfn77xoKT3tC/++vlUO2NHwdTe8YXV5Z/wbkqtsVLvFu7LzC3s9+5mL9x+RqtYv9XS7dEeryXbMNnwo/X5K7+YE7O8MQPjfrx8fstlomJuZabLY6ysRSL5T/v1GTL
\ No newline at end of file
+eNptVG1MFFcUXZUooj/aWpM2mjputZbK7M7sLAtLo5UuYlBZiCxksa307czbnZHZeePMGwTFJkLVVtRkaGPSxqrIsttuQaTYGENpsZZW0baoxEg3MTZ+tRqjqfUj2kgfCCrR+TXz7r3n3HvOfVMTq4CaLiFlTLOkYKgBHpMP3ayJaXCVAXX8YTQMsYiESGFBka/R0KT+2SLGqp5ltwNVsgEFixpSJd7Go7C9grWHoa6DENQjASRU9dettYZBZRlG5VDRrVks43CmWUdSrFnvrLVqSIbWLKuhQ82aZuURaULB5MAHZZkKQwpQK0kxBQLIwFQAAk23rnuPYCAByiSNl4EhQJqjRSCVG7SDEDAck0GgMAyrZB5saASfsTHrYiIEAhn2rOX5iIh0bLY9NUAr4HmoYhoqPBIkJWS2hNZIaholwKAMMIyT9hQ4pJAZL4dQpYEsVcD2SlrHQFJkMheNpTAkrZpfeQt8ZYvyShZ6ow9BzX1AVWWJB4Pl9pU6UpqHp6VxlQqfDscHNaGJUAo2D2SPtGkvrCJ2KBRjc7ptzL4nqWVAOo6qQ/GOJwMq4MsJDj1stRl9WLz3yRykm035gC8oGgUJNF40m4AWdjlHTakZyuCgZsxT+DTdcPAxHWdjHbbMtlHAepXCm01BIOuw7ZEHj0rixEiOZlw0wx4YBQ2xVkXziDCYDczeEQFlqISwaDaynPtLDeoq2WBYGyVl2NBrIsRLePxIbHjt9hQsGdmELZEc4qrZ6RONNIpxUflAowhxOsW6sjguK52lFuX7mj3DJL5nutTm04CiB4lTC0eWJsaLhlIOhbjnmesSH75ZtCSY35H3MoZd5k33ZLCOVeWoeKVRIvBVyFfqCR18rAvSQkCR1gzRDtb1z+LcLi5d4AI0DAQF2unOzKDdbgdLBxyOTMGZyWY4BVdjhQTMOGtjqRBCIRm28kGaB7wI6YfSmLGcUm92fp6n2U8vQwGEddoHQmZEQQqMFkGNuGHGeRkZAll/DUY9ufSy7FJzv5vlOScrpLNAcHOBTAf9NlmbEZkeyRAZvDtDv4H1xAqNHHWP2TyjLtky9IwTzOy6xILJGwY2x5fYvDeUlNTv0/79JvXTefv3NTm7l4YuHO/jj35xYhY7c6DzTGLD9p1J986e+qR3au3h8Zuq5asdqwsO35yy6djfV28+OHU+NLejbp3T3+B99dc3evqnT5y79IXLG4+6hfRm/5/UNqtLbEzddjrxbUaD/er9ew/CnS2r/Q1TSwLdlxK3jD0n4W1b06F5s5dD5kbt0ppdJ6fg+vfrT06/21X2X5K3smnS7+rO4lJ514xzXdePfl4/se9U0tkdf8HFiyIoeGfFb19PvrJ1fteE3hePrE/xv5zck3x59yuTLzx3PokHydX1lSnV1XmJrhW5V3q2zEntzQMTsg+9nuMoBcknuiceL9TOnR8/v/7HBb3tO2bidukitXHnJf9b14P+vqLlY09fkagD73YeStxJae8585qj9KVpsypatv90d4Jwr/gjGhye/XPKsX8+u7R5/S97WhYX7Gg7mCgRt97q+yMnt1a937r7g4sfb7m/K+/ED292XLt2e5rFMjAwzpJ7fZKTG2ux/A/oUIK0
\ No newline at end of file
diff --git a/docs/cassettes/configure_c09fac15.msgpack.zlib b/docs/cassettes/configure_c09fac15.msgpack.zlib
index e4c3ce24645..3c81f5444de 100644
--- a/docs/cassettes/configure_c09fac15.msgpack.zlib
+++ b/docs/cassettes/configure_c09fac15.msgpack.zlib
@@ -1 +1 @@
-eNptVWtsFFUULgKRRxQwBDBRGDZFA3S2Mzuzr9aGlC2Fhpbdbtc+VFzv3rnTne68Oo/ttgQTSklDJMqg6A8CEbrsmlJasKQ8Skl4Ki9REyGVBCT8wMRQiCJqYoJ3t1tpA/Njd+45537n9Z0z7ek40nRBkSf1CLKBNAANfNCt9rSGmk2kGx0pCRlRhUsG/DWhLlMThpdFDUPViwoLgSrYFRXJQLBDRSqM04UwCoxC/K6KKAuTjChc6/CODTYJ6TpoRLqtiHh3gw0q2JVs4IMtIMAYAQgNyJwiEbIpRZBGgIgSRwRlKyBsmiKijJ2pI822cT2WSAqHxIyoUTVIxu4kDVOLKBlbGUtp/K8bGgISPvBA1BEWGEhScWrYMINF2b0b01EEOJz4rbzZyaiiG1bvxGT6AIQI4yMZKpwgN1oHG9sEtYDgEC8CA3XjDGSULZXVHUNIJYEoxFFq9JZ1CKiqKECQ0Rc26Yrck8uYNFpV9Ky6O5MdiesjG9YRPw6itKIw0IqrLhO0nfXYqUMJUjeAIIu4jKQIcDwpNasfHK9QAYxhEDLXUSs1erl3vI2iW/urAPTXTIAEGoxa+4Emudj+8XLNlA1BQlbaF3jWXU751B1jp2m7+/AEYL1Vhtb+bCOOTriMDK2VhArGsPZSKagoMQFZw7+Hw5APR6QSak1NC0x41KYQDJaL5jo60rzK7a5/21mmu1avCTaE9ZC2RoiEhXJI0m6Hl3Y7nU6GpO2UnbbTZJlpDzc3JzTv6urmGBUWK1iptbKeK21YHXDViuVyU0RJeISVjoYmMYDiPBRDdU3hJmRnlHq5CjR7fNWBYHkd1SB7E4kIkKp9Pt4vlhYTODozLnAlbC2jBCTQHF9Z2Rby046YFK+3hyWxzuEzlNg7ZZWMYW/zMWtCTQ3jwqNwhFQuQhfFeqjM0zvGDRHJjUbU6qJZ11ca0lU8P2hzCpfMMPX2JOYhuvJtOjdI+/xrn1J4XrIMc9IaCkXNAoJ2E35oEA7KwRI0W8Q4ihgnsboq1OPLuQk9l4KHQ3gEdR7TcNUY5dMwasoxxHX7nkv2oQzZcScz4eMpJVFCVXRE5qKyeurJ4OgGISvK+kcni1S0RiALbVm31lCW9S1tiRYOmhwXjbdIlLeNZYQIMiF/JHdF1ZSMGxwQKelWF+Nw9OY0Y7zrxrlSJE2RFH0iQeIxR6IgCbie2d/cGtOtpBMX+9izBrhfCC+8NJvtBnVqvIWGJEzYjO+nMKzX6z35fKMxKAabeN2uExOtdDQ+Gtoh6ceeNchB7KP0nsSYNSlw1nA+PoQB74q4OMR4XQxDcR7ew7p5wHo5ikFOnoGe43j1CRCjZJqpKppB6gjinW20WsMFEkhkdkwJQzsZF860mBBkKJocqjEjZUomB72YUDUkKoDr85WTPgCjiKzJ8s9KlzWsK62q8A3Uk+OJRPrV0e9FWlZ0WeD5VA3ScGOsbigqJoeXpYZSGCtY2mAd8XAsxfMR2gsgz0LoIlfiNTSG9j/tkplNmwYijj0Orf4oU2IrYlnGVkxIoMTjwm3KflU2pTK5yo3nJ4UWfTQtL/tMFrdflc9QszsetM64fG/KHGLV1YOd7T9rv+4Nbule5vhuxzHz5IVb3y86N1g3fVLw9PnOf97q7x+cueDusntL7349N3/5ga7Bb87zA6/evvZF38xPZo38JhzfffXA7XsXH6NzZy9sOjBru2G8lF9bO3kp+2LB69Pa33Du+Vi+K+zIr5i57Eqpe+BMkF2YP+cnecsP1MIlu/vLlpz+47OhqdvogdjtVNfFO7vS4oKFO2ecnD/14aNHW+mRgHvG/FvL/Wdmz7oW2LeYku7vEhZ56m+8z56vPGimH/g/Zeddv3Op5d9k387qU1dem/ZJquLPjs+PGouox7fk0JPlFw+evRQs/3t6avq2Nzs9H54Cs+LbtneMXKxILCm4vKnjx1Mts+1bChavKO4sVfovWzdsL99cWvNecsr6V/5asefLS9cvzf2lp+b+1ptrRzavwOV78mRynvbBw17wQl7ef6XuMgw=
\ No newline at end of file
+eNptVWtsG1UWdgiPwv6gCIraCi2uQQiBrzMPj2OnhG5qO2lIY8eJQxJKa13fuc5MPK/Mw45TWtqEh4BdYKBQIfEojWO3aUiBPiClRS2IR0XDQ4BEgIVKkP2xoouAVVeobLvXjrNN1M4Py3fOud/5zjnfOTNUzGDdEFWlalxUTKxDZJKDYQ8VddxvYcN8oCBjU1D5fFu0Iz5i6eL0bYJpakZdTQ3URI+qYQWKHqTKNRm6BgnQrCH/NQmXYfJJlc9Nb9nokrFhwF5suOrWbXQhlURSTFedq01EaSd06lDhVdmpWHIS606YVDPYSbncLl2VMPGyDKy7Nq13u2SVxxJ50auZgPVwwLT0pEr8DFPHUHbVpaBkYLfLxLJGMiFWcpvyBDYVBQx5kuZ3jsV5QTVMe2Ih9b0QIUwwsYJUXlR67Vd6B0XN7eRxSoImHiOEFVwujD2WxlgDUBIzuDB7y34VapokIliy1/QZqjJeSRCYOQ1faB4r5QNINRTT3h8lJBqaa9pypMaKk/b4aA/96gAwTCgqEikakCDhU9DK9rfmGzSI0gQEVPpnF2YvT8z3UQ17tBWiaMcCSKgjwR6Fuuzz7pv/XrcUU5SxXQy2XRiuYjwfjvXQjMf/2gJgI6cge7TchjcWXMamngNIJRj2y1QBqWpaxPbXVVckEiiVSMr1uSa9ayDV2Ki0NGodGTEbEvmYv39t1t8Y6+xrEhNJXbU6E1yK03oAXeulmVq/n+EA7aE8JGcQFBCTs9ZmuTZ6dbOCukQ23S92rY2kI81MPBxjUYCSWjV4VzQQiHdFlPAaHNYCAR0pmcZ2IxmDuL2pvd3MWg2xOO5INw/0qVysi7mnAfnboi0xmNFoabA76K3lGWOlk1C2MiJfH0nTob54MsP51nQKbISPRsI9kTDfz69uvyeawd5of6Ij0Z28m2lG8zj7aR+gKrR9lNdPlZ6JOcVIWOk1BXuEZvy7dGxoZIbwcIEU0rSMoTxRJz7xYbEyTDujLeeFfX0+RJRqH4kLlttJ+ZytUHcyFMM5aV8dy9Z5fc6m1vh4sBImflFhvhYng2ikiDjDc4NQRIKlpDE/FrzoCBwpjQDpb4k+GVaABzTVwKDCyh7vBu2zWwQ0h/bNzhtQ9V6oiIPlsPaR8ixkBweyPLJ4XshkZSow6GXFJLZQan/liqarpTCEEJANe4ShvBMVy5wax0iuFKApQNGHBgCZfSyJskjqWf6trDLDznOk2G9e6GCqaUyWXtFb7gb19nwPHctExqXY52G8gUDg8MWd5qBY4hKo5Q4t9DLwfDY0IxtvXuhQgdhJGeMDc95A5O3pm8khkfKxqJZJJiFLQ4ZPMhxXy9DkmPLXYl+AqZ0k21BEBKXUTE3VTWBgRPa2mbOn3TIcKG2eepbmWB/JdKVTVJBk8bjDSobUUg5E4JqOJRXye1EKIIgEDGb1ZxdDPZGG1ubgwW4wX0ggqs1+M4qKaihiKlXowDppjD2GJNXiyQrVcSHYCNobeuz9ARqxXjoJ/QHM+XmaBqvJcppD+7/s8qX9W4QS4Z5B9j6BrXfVeb2sa6VThvV+H2lT+cuytVDKVel9r2rLjY8tcpSf6r8+9ZEyTC0O/+e+B2e4Ry6ZqsbTu8ZeP4VinUv0n6q+XfrNgYfvvG7m+9uvvSrUOqp0n3hu4yT7QzC06JmhiStnlgQ3rOMmP379zKkhfPUTv4LJv292nz4arspG90xNPfR+A8zsPfpLEzdTOHhV2/JN73xbFVw0cbzvxfwed8t2sOtvjiX86HvHvzTpY++fOsE/m38s0rPs+LHPudzji1b8+OlmUoSTT08cd5/d/cXJ+tu3Tpy5ZfUu9rb4rb9tGOSXi3f8RfEOKeiTLnRyh3DHn46ZI/GuVafvby/s//in+z44M7jiwL87/7UlNzK8pwmJLU9cs6LmVGiqZ03fC7vBP4TTO4Yhs2r6M/hw9dqVD13+3HLx2V8u23evg9n9h7BiG7th69Jt757uvim/bKvg/fVn9/bDUdSy/uwHmx3PD4cXT1rOm1e9lBWMxX/+KiH/99Fvjn4xvHH7Dfq2k+8su/S68Vjx845zM9f88/CThy7tfUH6+fsXf1+6qdrhOHeu2jE0tXPH2Uscjv8BZV1LvA==
\ No newline at end of file
diff --git a/docs/cassettes/configure_e4ee9fbc.msgpack.zlib b/docs/cassettes/configure_e4ee9fbc.msgpack.zlib
index 0ff10fc4205..44cb27578a0 100644
--- a/docs/cassettes/configure_e4ee9fbc.msgpack.zlib
+++ b/docs/cassettes/configure_e4ee9fbc.msgpack.zlib
@@ -1 +1 @@
-eNptVX1sE2UYHwMTBFRCAP9Q4Kya6dzb3bXXdt1YYOsGm6PrtnZjQ7Be7972br2v3Uc/BhjcFAXGx4EkSvwD99GOOcYWFj4FEghiAE1MJDhQZoSEqCAqJigxwbddJ1vg/uj1fZ7n/T1fv+e5tmQEKioniVP6OVGDCkVr6KAabUkFtuhQ1d5NCFBjJaa7xuP1dekKN5LLapqsFubnUzJnlmQoUpyZloT8CJFPs5SWj/7LPEzDdAckJj6yY61JgKpKhaBqKsTeWGuiJeRK1NDB5IM8jwkQo7BmKYxeAUnXsACkFNWUh5kUiYcpK12Fimn9GiQRJAbyKVFI1oDVbAOargSklK2IpAR6q5oCKQEdghSvQiTQoCCjxJBhCgs3O9YnWUgxKO1rWbO7WUnVjIHJqRygaBoifCjSEsOJIWN/qJWT8zAGBnlKg30ofhGmC2X0hSGUAcVzEZgYu2UMUrLMczSV0uc3q5LYn8kXaHEZPqruS2UHUHVEzRj2oCBKKvNr4qjmIkaYyQIzPhgDqkZxIo+KCHgKxZOQ0/rjExUyRYcRCMj000iMXR6YaCOpRo+boj3eSZCUQrNGD6UIdvLgRLmiixonQCPpqnnUXUb50J3VTBBmx9AkYDUu0kZPuhGHJ12GmhIHtIQwjE/xBC1JYQ4aI3/6/XTQHxCKq/2BBm5VoMJfWoeTDS7GAhuhF/eGI25fZXOpfWUTUxcrEPlIU3UJIBwWJ+Gw2exOQJhxM2EmAF7CN1aYWywVKxvL6BjL+jRLrduPO8h6rsEFNY1plFY1qERlgx6qIOLmKBFcUc+W15XE9Rp9Ob+8vNxTWtmimEsFV9RKlrfavQJZL4WKMBSdHuGY4lX0Knt5XdQmttjibp3x2kvCimyJyYS/0s6WUNW19AqIE1GHtWxieFabBeCZCO04WYCnnoFxbvBQDGms0UWQZK8CVRlND2xPoJJputrWjXgIL36ZzIxRp6fqIYXnd5chThonfKyehxEOzENrmAW3kBhBFlothTYcW+729bsybnyPpeCQT6FENYhoWD5O+STN6mIYMn2ux5L9RIrsqJOp8NGUAhiTJRWCTFRGfyOoG9sfoLLs4NhkAUkJUSLXmnZrnEizPtoaizK0zjBsJCrgzlbSygWgTgeHM1dkRUq5QQEBQTW6SCs5kNGM864P5YoDAgc4cSwG0JhDnhM4VM/0b2aJqUa3DRX7yKMGGto6aN0lyXQ38JMTLRQoIMKmfD+EIZ1O5+ePNxqHsiITp8N+bLKVCidGQ1gE9cijBhmITlztj41bA44xRl5CB78tYLMU4EwAWi023A6DpA1xK2CF0BZwWgnoPIpWH0cjlFQzZUnRgApptLG1uDGSJ1Cx1I4pthI2qx1lWoRxIs3rDPTqgTIplYNahMkK5CWKOeBaBlwUzULgTfPPSJahSXNXug41golEAh557GuRFCVV5ILBhBcqqDFGH81LOoOWpQITCKuupMkYLmBIAvkNEiiFgiBkQClaQ+No/9OuO7VpkxSPYo/QxkHWWmwqJEmrqQgTqOICO2pT+pvyTiKVqxg6O2Xtoi3Ts9LPVL52qPoMPvf7u/Oeyc8Dzdk9nTNzS2dN3zRlWumG3Mvxre4Fp368l+3rfbBt58ZDH5gX/rbu5vG70ey9qwe+2NtxKXD+/L2cy44793YlO0avn17zw3u3Ni9smWNeJAqtna1b24ISe2OlzvT2L97cNZhwl4HvjiS6buUteOr9ZAjU1m07aczb/df27RvX77vecb+dfC7yetXX2GfE4pysrNHbI8ws59v7iFmtpyo2XTy21HZtdXaNsa79la9uNWqzL13dubHtLZZecuXlG3OffiLY++KZnF+m80/+8/ybp6uOLO0KL5u9pWrw/tSra/HlH7avufDB3T2699Vvr0X/bt71jbe2fE/LOen3ozO33SY7gzPcC+f3+S4MzNlwObLlvqe4vjz3tY9nAJzc+PMfV6qFy6MveArrKoZHfxrpPXxO2NPF3oyHquDw0EfS7nYmeeWB415T/69Ha/5VKwtzzlbNz0uqz87oqNdu3xn2DuxchKr84MHUrP07pi35JDsr6z8PST+u
\ No newline at end of file
+eNptVQtsU+cVDgLRVlPaTBQQQms8066Q5rfv9fUzWZSmdpI6iZM0NiGhm9Lf//3te+P7yn04thmtgE6jQCXuKgErj6rE2DRNwrOUhoQ92CrYoy+1SEm39N2qXdVpQ5Xo6Ep/O85IBFfy495z/u9855zvnLs1n8SqxsvSohFe0rEKkU5uNHNrXsUDBtb0J3Mi1jmZzXZ2hCNDhspPVXG6rmg1djtUeJusYAnyNiSL9iRtRxzU7eS/IuAiTDYqs+kpYZNVxJoG41iz1jy6yYpkEknSrTXWCBYEi4gt0NIvJ8hPVDZ0SxRDVbNWW1VZwMTH0LBq3fzzaqsos1ggD+KKDhibC+iGGpWJn6arGIrWmhgUNLw5z2HIkpRmyiqynKzp5thCmscgQpggYAnJLC/FzdF4hleqLSyOCVDHw4SchItFMIcTGCsACnwS52ZPmcehogg8ggW7vV+TpZFSMkBPK/hm83CBPSCZS7p5uoOQaAjaO9OknpKFtrlpG308BTQd8pJACgQESPjklKL93HyDAlGCgIBSr8zc7OGx+T6yZh4JQdQRXgAJVcSZR6Aqup2n5j9XDUnnRWzm/Z03hysZb4RjbLTD5j2xAFhLS8g8Uiz6ywsOY11NAyQTDPN5KodkOcFjc3rRbX19KNYXFeuaMt0u/2BHu7Yh7WlsNxLQL6EmIZUJtjUqjXFbWKCNJNVi9FJcCNAeJ+3weL2MC9A2ykZyBkGjTac7m/tl0RH1BjemI2If1YnCzaFWpokRwrwt2Rps07yhxlhXl+EKYKWnRersiEn+9oZWFGn1C20q9vcEbL2phrTLkXxkfcLVNoD62nimoV8NRlUYb4O+zMZOd6C31kIoG0merUs12WwupTXp0CQUHkDKQxyfoR/ObGQTjXExHIjIA93hABfu7fI+Mo8zRbkBVaLtppxeqnCNzSlGwFJc58whmvIeVbGmkHnB23KkkLqhbc0SdeK/XsyXBudwR+sNYa/IBohSzckIZ1RbKLclBFWLg3K4LLS7hmFqXC5Lcygy4i+FidxSmCciKpS0GBFn49wg5BFnSAnMDvtvOQKThREg/S3QJ6MJcEqRNQxKrMyRHtA1uzFAMHBqdt6ArMahxGeKYc3J4iwMZlKDLDJYlksOipQv42T4KDZQ7HTpiKLKhTCEEBA1c8jh842VLHNqHCa5UoCmAEWPp4BKSiHwIk/qWfwurS3NzJLyU2dvdtDJpiELLu8sdoM6P99DxSKRcSH2DRinz+ebuLXTHBRDXHwe1/hCLw3PZ0M7RO3szQ4liMOUNpKa8wY8a07dS276fJjC0EVHfYybdUVdvhhiPZQbuukoZijswK+Q3ccjglJopiKrOtAwIjtaT5tT1SJMFTZPHUO7GDfJtNbCS0gwWBw2ogG5kINWa1FULMiQPYZiAEHEYTCrPzMf6G1vCAX9Z3rAfCGBDmX2/ZCXZE3iY7FcGKukMeYwEmSDJStUxTl/E+hq6DVP+2jEOGkcY1inj4l6WfAQWU5zaP+XXbawf/NQINyTyDzFMXXWGqeTsdZaRFjndZM2Fd8iW3KFXKX4nxaNV+68vax4LSaf69d3dYUSq+mKyX8d2/eFcNu7q5d91Mq3b9nzxuUjtidPXnzry5aXutdkp8Z/fPW3K0frL+fKPz58YdOVQzO/mLi77A/9J5Y8v4t9Z6nnfF91d1/lXy69bH+xo/Lcu5V3TTx+deK7787uF+pH/rj2Ae7KD5Y/F/HsmH5/N/hm8ai15dUvaybPHXztqxU71ZllgNd7Ly+5Z6/nCj044P/oku6uryrv/WWw+oMXyspSn7934M3Et2v2UKsO7l4R/nX5jk9euf3BgLrqh4779vdkVgxtiXy8anPlJvFQw6PlQoV77RrB+megL0ruCd11f7Mwc6Guauq+JduOP3PHvrVM7dF1W39/9av/bNtu7GWlNwZf+9G//+c7MfSTwHR2zROvJv4pOtYHfnPxM/2pddsP/WNlWf21S/qGv+2oeKH8Z7T4ZvwCnz6+7IJtqZo88Oxj0c+XTvz0qbG/Vz1dPdq68pnlLVXbMp13JnYfPKM33L2y7vrqT5+Y+dX5UXkmWl/RAh97e/32o6PjUmrtzn32r186eWXXtd9Z4bfi8m6B/rSOG8Gf3em59y0tKrw4/d+laPuZ+uzjr1/70F5s0OIyZnDa3Uy69T3LDll/
\ No newline at end of file
diff --git a/docs/cassettes/configure_e75ae678.msgpack.zlib b/docs/cassettes/configure_e75ae678.msgpack.zlib
index c4ca45fe3f9..171b19f2169 100644
--- a/docs/cassettes/configure_e75ae678.msgpack.zlib
+++ b/docs/cassettes/configure_e75ae678.msgpack.zlib
@@ -1 +1 @@
-eNptVX1sE2UYHyAIxOAiqAn+sUvBRHDX3vWuXbs5ktExQDa6jwIrhpS3773XXntfu4+uHRKyOUgIDDgJMVFAga7FOccmhI8pKhINGBMSwI8ZIcAfEgNBEREVyXzbdbIF7o/23ud53t/z9Xue68gmkKYLijyhV5ANpAFo4INudWQ11GIi3ejMSMiIKly63t8UOGBqwtD8qGGoernDAVTBrqhIBoIdKpIjQTtgFBgO/K6KKA+TDitcaujNdTYJ6TqIIN1WTry2zgYV7Eo28MFWL8A4AQgNyJwiEbIphZFGgLCSQARlKyVsmiKinJ2pI822fg2WSAqHxJwoohokY3eRhqmFlZytjKU0/tcNDQEJH3gg6ggLDCSpODVsmMOi7NT6bBQBDid+uag4HVV0w+obn8whACHC+EiGCifIEevDSJuglhIc4kVgoB6cgYzypbJ64gipJBCFBMqM3LL6gaqKAgQ5vSOmK3JvIWPSSKnoUXVPLjsS10c2rCN+HETVUkd9ClddJmg767FT/UlSN4Agi7iMpAhwPBk1r/94rEIFMI5ByEJHrczI5b6xNopuddcB6G8aBwk0GLW6gSa52cNj5ZopG4KErKyv/lF3BeVDd4ydpu1lA+OA9ZQMre58I46Nu4wMLUVCBWNY+6gMVJS4gKyh30MhyIfCUiW1pKkVJj1qLAAba0RzOR1uWVRW1rzCVa27Fy9pDIb0gLZECIeEGkjSZU4vXeZyuRiStlN22k6T1aY91NKS1LyLG1riVEhcykqp2mauKri43r1SrJFjYSXpERY6gzGxHiV4KAZWxUIxZGeUZrkOtHh8DfWNNauooOxNJsNAavD5eL9YVUHg6MyEwFWyKxmlXgItiYW1bQE/7YxLiWZ7SBJXOX2GEl9dXcsY9jYfsyQQC44Jj8IRUoUI3RTroXJP3yg3RCRHjKh1gGbdBzWkq3h+0BsZXDLD1DvSmIfomzPZwiDt9y97SOHn0tWYk9bJQNQsJegywg8Nwkk5WYJmyxlnOcMSi+sCvb6Cm8BjKTgQwCOo85iGi0Ypn4VRU44jrsf3WLKfzJEddzIXPp5SEiVVRUdkISqrt5lsHNkg5NLqwyOTRSpaBMhCW96tdTLP+ta2ZCsHTY6LJlolytvGMkIYmZA/UriiakrODQ6IlHTrgMdL9xU0o7zrwblSJE2RFD2YJPGYI1GQBFzP/G9hjelW2oWLffxRA9wvhBdels13g/p0rIWGJEzYnO+HMKzX6/3k8UajUAw28Za5B8db6WhsNLRT0o8/alCA2E/pvclRa1LgrKG5+BDyeHBbEcWHUZgHTJhy8U6Ogy7K5QE8T1HMCbz6BIhRcs1UFc0gdQTxzjZS1lCpBJK5HVPJ0C7GjTOtIAQZiiaHmsxwtZLLQa8gVA2JCuAO+WpIH4BRRDbl+Wdlq4PLq+qW+o42k2OJRPrVke9FVlZ0WeD5TBPScGOsHigqJoeXpYYyGKuxKmgd8XAsxfMgzEIXw0LoJhfiNTSK9j/t0rlNmwUijj0BrcNRptJWzrKMrYKQQKXHjduU/6q0Z3K5ypEvJzSUbJlalH8miTto/2mqeOO9f6ff+DkdW+hbdnTG1M7ptU9N3XfG7Nt8aqBrvTV9xXCFNvvH9xK3f1vwWeuv8vP0lI86Otqjns3f9XB/9/8z+ODqpUtiy7vkMceuttWV117mh+7emXjtCnXj6Vl/XH976oXVG+d2LVh7dcfMTReNvZfSd145F6mM1m2c/0Ns2dlv5/oP6vGdtRdKS3Z3WRMrZ90kw1sH1gxv2jb4BTlLnD08j2w7F++89tdmWq2ZPPn4vfMzv3/i9rTpB6aZoYp3trefrrueeilT0T1l/Ybd6VppT/B+e8ndrovny7p9t7pmvL9368a9t+Z8dfl+1+CGF0+VX1g9/+zr+2e0/3Kz+MnsC9uufH6meNUc73ZH565nn3nr6+0zJnZsmfLBTn12l7uiJDXvWv+ei3eDr7Jz24f/7JROWIM1kT03/T+FtJIHE4uKhocnFT0oPrUW4Pf/ANGuMu4=
\ No newline at end of file
+eNptVXtsU2UUL6AZQWVoFBON8VogGbjb3UfbtcUZt3Udc6zd1sKYaJqv3/3a3vW+dh/tHszgUOKDMa4CYkRA2FqzjDkdkzeKBsWIkaB/OCCQaGKEGaPxFcPLr10nW+D+cXPvPef7nd8553fO7c4kkarxsjRjkJd0pAKo4xfN7M6oqNVAmv5iWkR6XOb66gPB0B5D5ceWxHVd0TwlJUDhbbKCJMDboCyWJOkSGAd6CX5WBJSD6YvIXPvY2k6riDQNxJBm9azutEIZR5J0q8daz8MEAQgVSJwsEpIhRpBKgIicRARlLbaqsoCwl6Eh1dr1XLFVlDkk4A8xRSdZm4PUDTUiYz9NVxEQrZ4oEDRUbNWRqOBMsBWfpmxUVyaOAIfTvGCZ1xeXNd0cmk79fQAhwphIgjLHSzFzb6yDV4oJDkUFoKMBTFhCucKYAwmEFBIIfBKlJ06Zw0BRBB6CrL2kRZOlwXyCpN6uoFvNA9l8SFwNSTf3BTCJ8pqS+nZcY4mgbU7aRg+3kZoOeEnARSMFgPmklZz98FSDAmACg5D5/pnpicNDU31kzeyvAzAQnAYJVBg3+4EqOu0jU7+rhqTzIjIzlfW3hssbb4ZjbTRjc30wDVhrl6DZn2vD/mmHka62k1DGGOa7VBrKcoJH5tkZBeEwjIYjYll7tdrUFvX5pFqfEkzyKS/PNbhal6dcvoYVLdV8OKLKxoqwI+pQmkm61E4zpS4X4yBpG2XDOZOVcci0G8tTjnq6okaCTTybaOWblvsT/homVNXAQjcl1Cng6YDbHWryS1XLUJXidqtQSvoatUgDQI3VjY16yihvCKFgoqatRXY0NDHPlENXfaC2ASQVWuhYVWkv5RhtKYEpG0meK/MnaG9LKJJ0OJetiLN+LuCvavZXca1cReMzgSSyB1rDwfCqyEqmBk7h7KKdJJWn7aTsLip7DU0qRkBSTI+be2jG9Z6KNAXPEFqXxoXUDa27D6sTnTqZyQ/T7kDtTWHP7/NipZpHQ3GjmKCcRB1QCYZiHATt9LCsx+4kqutCg5X5MKHbCvODEB5ELYrFWTU5CBkYN6QE4gYqbzsCR7MjgPubpY+HlURtiqwhMs/KHFxFNk5sEbLGOzIxb6SsxoDEd+TCmkdzs5DqaEtx0OC4eDIlUu4OO8tHkAGj+/JHFFXOhsGESFHDxXExQ3nLpBoHcK4USVMkRR9qI/HsI4EXeVzP3D2/yjSzz4GLfeBWB11OILz0MvZcN6hjUz1UJGIZZ2PfhLG73e4jt3eahGKxi7vUeWi6l4amsqEZUTtwq0MeYjelDbZNepM8Z44txC9hdykHWQcCkYg9iqIMa3c5KQo5aQTcTpaNMgfxNuQhRsk2U5FVndQQxHtbbzfHikXQlt08ZSztYPExainBS1AwOBQ0Il45mwMWuKIiQQbc+zBKQgDjiJzQn5nxNvvL62oqP1pFThUSGVAm/hkZSdYkPhpNB5GKG2MOQEE2OLxCVZSu9JGN5c3mPjcNWTsdKYWcq9TF0TRZgZfTJNr/suvL7t8MEDD3JDRH4myZ1WO3s9alhAjKXE7cptyf5YV0NlcpdmJG96OvzbbkrlkbXg/Xfkbdf+LilcUVOw6D8eubjy0uerWQ8Xq9T289b7zF3/f66nc2dKVqzu19xP9bf+E/qW8uHfwn+uC9Ff273O+u+XpT08qeoZFfLl1fYfvwzROf9ZwtenIk3PvN/tR58a8HetZ9Wlz6ye+7HtKCzXcW9Uh06pN53Qlm58JLT3x1pnnWkgVP3dnMt+ruHRuHk7XsoWVn+MyzD395/NvtR6penPdh4thje+YP97sPvly4Zc6R2KLTV7/3zjZ8L81BF2p7ly/o6TzZ4ztd94v+5B1zV47GHts2tOXy5Ss/7ty8eC9be/F32LVo9M/xT2f84Sto6v38zPpfF1RcePuVy29s+s4TbC4+vWbt3PVf9o3uuTa3c1vy7nsYx+lTP7DRlyxc47/HK4pia+7Z3fT3OTi6KWrZffzkxyPb/5C+2Hroam9XoavgNWLO+FPLnvfIF38inhj++OzGx6WZwcJfg2pRffddsXnDkdGWQODa/ur3Xt3y3SLXOKfd+OnUlZ8LLJYbN2ZZNj9EJK7PtFj+A1XHTc0=
\ No newline at end of file
diff --git a/docs/cassettes/configure_f33f3cf2.msgpack.zlib b/docs/cassettes/configure_f33f3cf2.msgpack.zlib
index 1341db8257f..35956698baf 100644
--- a/docs/cassettes/configure_f33f3cf2.msgpack.zlib
+++ b/docs/cassettes/configure_f33f3cf2.msgpack.zlib
@@ -1 +1 @@
-eNrtVVFv3EQQVsUfWa2QkND5zufz3SUmrRRFUVuVEFACUhUia8+es7exd93ddS7X6B4IfeXB/AIgUVJFLfCAeIFKPPLAHwjv/A9m7bsEQlCrPtfSyXczOzPfzH7z3dHZPijNpbj1nAsDikUGf+ivj84UPC5Bm6enOZhUxsd317ePS8Uv3k2NKXTQ6bCCt3XOTdrOmEiilHHRjmTe4WIsT0Yynv52lgKLMf3T8081KGc1AWGqn+zpOs4pph233W13vaUfVqMICuOsi0jGXCTVi+QJL1okhnHGDJw27upHVhQZj5jF2HmkpThfk0JAjbk63wMoHJbxfXimQBfYBnx5qg0zpT46wbzwx+9nOWjNEvhu88EC3FfHn3FWnSMMkkiZZPA8kjgJYRwzLeC/JU+xFZxZdVbu80gq8bPFprWDSIySmbPBDmyn1fHAdX+95lvNMjlxNuqB6urb979fm5f6EERi0urYX+r/cmPMpuIJF9U3F+RG95qCGPNwlunqxKgSTmIcW/VyOy1bpDskm5Ehnuv5pOsHPS/o9cndje0XEYtScKImU/VMSKe2nK1mxtnaj6qLdtq7TQPf79EPSM5ue/1lz3XdVtpzvOUbHC+vYVs/KKQG514zaOz35nlc+WvafIF3ppADf97665DO2UkD6raH7YFPWxRvA/BqQzgouKrvJTQ8BxqIMstadMRMlIYYj+QNsbcxT2hwSEuMyMvM8IIpE4KIC4mEp4EdVovqiGUQlkX4WPMnEGL9JAFFg65t98orTKoQrA4zjgRG92DhjOVEhALywkyvon302nSL03WuS0M4mhrQNPDc5WG377mzFuUC6SoiCJH1ibawcWVwKQ2EjIe4j2qK0Nkog3iBXKokjBBUPYeY67lzjEywfaVyEhqThSVfBBjcceyQgwrjcj6/mE3rapkUid0fTODXYFOpzNzQ9RGgBqZwutcwTKTa04VNqyNZQGgxcbHP6/YWSHqhNlLh7v07ejb7f6lZe5XU4AetuqOyvIOpnUJJvAEny1jOOlY4tHmrRG+mREvDt0p0qUTv7B3ShmthynSKajRY7jEvHvV9fwi9UX8pcgcuG7DI9Yb9pTgajrvMPj7AwOv6cdcbjsZDd4Q4u/2lQc9FHcuZ4GNkqF08jtuwQy/Jjd6Gyhq/ocXgaw1fH9fGbZQZy0W6i2IY4WbiUiNBEBUOEBGXES4aRuxNmGpUZM41/L7zWrXulQhuowl605pN0lc1Nz/Vom9axiwiArpz/6Ot7d2Vla2HW3fukIeyJEwBYYIwrbkVVkPGUpFaa3CHHCb0BOz9EsP0nm4T1AZiUsBTlg3WUXBAmhA5JgqQCoBKSOqlPDDESNJkqGMWWdvk/phMsXYsxXuG7Ak5qf3N0RZ5VGpDNJuikZlrBxcIFADRYPfRFs/ZAc/LHDPExArLP9JZLBHX0F5Z6TRdfy4+mQMJyOEC0wzNaw1qtM7xW+NqnSUgO516dNT+AxWlCfeZ4lajLWHoIoulRxNqb2cx9xBHmiNpAjp2mm2hM3x2XzvVDP9W4IBhtvrM7uxvcr/ytw==
\ No newline at end of file
+eNrVVk1vG0UY5uPGkV8wWiEhIa+99vojNilSFKq2QGhRTNUqRKvZ3de70+zObGdm47iRD5SekZZf0JIorqKWooK4QCWOHPgD4cBv4Z21nVInbapwwrJl+/2a5/16Zu9OtkEqJvibjxjXIGmg8Y/67u5Ewu0clL53kIKORbh36WJ/L5fs6L1Y60z1ajWasapKmY6rCeVREFPGq4FIa4wPxL4vwtHvkxhoiOHvHX6pQNorEXBd/GysSz87G9Wcar1ab3aerAQBZNq+yAMRMh4Vj6M7LKuQEAYJ1XAwVRc/0ixLWEANxtotJfjhquAcSszF4RZAZtOEbcNDCSrDNOCbA6WpztXdfYwLf/4xSUEpGsH3Vz+dg/v7jXd/M+GVsjGYliKxV5JEDO21Mm9VPPhg/2PEUDzrx3mFOG2yRiVpOI0Wqbd7rttrdsiltf6vp8a4KlnEeHH/pz5LMa0F6eNVGsQwdymeZrmP2VVISndsBHmh7UxWEm2vbwfFUTV2L1i9ZtO1PkT9hUar23AcpxK7dqN7iuLZApyLO5lQYF+e5ow5nZ7zc/3+dSpHxcEU6g/GCptnfwY80nGx1+40flkIsIagscOoc5y964wWh9hZEgkRJfDkhm2sSxgMe1M8cA6uSRqltHjIhR2YMjy9YWOZaSgiu49jCPaVsDgi9S64IQ0dGviNptsMO92u67ZD120N/G7Q8Y/IqXmsSggRL6OJKva1zOHRPIP+KIOTczSZA5s1uU4+oRwP7zjEcXrl2zS5HOuvcaYkNvOvt+7vWrPtsXqWU+1WW22rYjGOM8cD8HB0I2X1di0/Eb6ntMCMwQNO/QRCq2dgVRZ1WGzAYOsuBgqxHAq0Bzs0zRJQXponmmVU6sUgZ1vg6uFya/Ao83Cv5WjRQMjICySUJfFCpmbKAVYQtRkdpVi9RacMsxecJh56q5Neyn1Z1gqoDOIT0lgMPa0TL2dzkTaj4GkG0gtzOUNHR2VZE8Ejs+3o38RVMO5SzwT15rhiDYXcUpkJoAKRgUHpMb7NNKhjjHeUDj2krQy7bzr5IiYM4lONSLHfSIZoyAcsMofnCl6odpgJJNDjTAKagJdn3m3F7iB+3KIIJMJySqBzLdcxljxUXoL0gM719lwZiiH3OKSZHj33bqLWhJtbl7GOBZ4/KhNrON1OvdVwxuN3Xs7iq2exOH5QqmoySWvYQTuTWCNtJwlNac1wstL/I5L/9iyKPw9974evvBbahjHOTeCPghlh6VMJ6z/z+wlKX+q0HpdEbAez++iYml+XZF91JRzgcCBZFpN8mwVC8sUr4kVqfXtr15pOoBdTFSMjtrsubYR+q9nsgOu3lgKn7dA2DZxGp7UUBp1BnZpXE6DdqDfDeqPjDzqOj2Wtt5baroN8mlLOBji3Zn0ZLviGdTzyqJ0OuMJfKNH4tYpf10phH/fQTKi1WbGSABcPeQm7gqiwVIg4D5Dl0GNrSOWU8WcTiL83XuusyzmCW5s6nffMadCzkptZVazzHqPnHj1r48rn6/3N5eX1m+sffURuipxQCQQvTopUai5BTQZCkpKBcHBtytUQTH8JXmxbqkqQMYiOAa3MQBlFxgBHiIgBkYCjAEjmpNyEHU20INMIpc88apVcGZARnh0K/r4mW1wMS/3UtEJu5UoTRUcopHrBcI5AAhAFZh/M4fj4xdI8xQghMXTzr3AGS8AUVJeXa9Osv+JfzID0yO4c0xjFq1PUKJ3hN8KVMkqPbNTK0pVPC1muvW0qmbl2zMBY8yhmPKaupjvzuntY0hSHpmcN7Om2WGN8bb52qPH4+QMD2myO/wFK03fT
\ No newline at end of file
diff --git a/docs/docs/concepts/few_shot_prompting.mdx b/docs/docs/concepts/few_shot_prompting.mdx
index e48d76905dc..ad11df48255 100644
--- a/docs/docs/concepts/few_shot_prompting.mdx
+++ b/docs/docs/concepts/few_shot_prompting.mdx
@@ -30,7 +30,7 @@ At a high-level, the basic ways to generate examples are:
- User feedback: users (or labelers) leave feedback on interactions with the application and examples are generated based on that feedback (for example, all interactions with positive feedback could be turned into examples).
- LLM feedback: same as user feedback but the process is automated by having models evaluate themselves.
-Which approach is best depends on your task. For tasks where a small number core principles need to be understood really well, it can be valuable hand-craft a few really good examples.
+Which approach is best depends on your task. For tasks where a small number of core principles need to be understood really well, it can be valuable hand-craft a few really good examples.
For tasks where the space of correct behaviors is broader and more nuanced, it can be useful to generate many examples in a more automated fashion so that there's a higher likelihood of there being some highly relevant examples for any runtime input.
**Single-turn v.s. multi-turn examples**
@@ -39,8 +39,8 @@ Another dimension to think about when generating examples is what the example is
The simplest types of examples just have a user input and an expected model output. These are single-turn examples.
-One more complex type if example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer.
-This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where its useful to show common errors and spell out exactly why they're wrong and what should be done instead.
+One more complex type of example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer.
+This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where it's useful to show common errors and spell out exactly why they're wrong and what should be done instead.
## 2. Number of examples
@@ -77,7 +77,7 @@ If we insert our examples as messages, where each example is represented as a se
One area where formatting examples as messages can be tricky is when our example outputs have tool calls. This is because different models have different constraints on what types of message sequences are allowed when any tool calls are generated.
- Some models require that any AIMessage with tool calls be immediately followed by ToolMessages for every tool call,
- Some models additionally require that any ToolMessages be immediately followed by an AIMessage before the next HumanMessage,
-- Some models require that tools are passed in to the model if there are any tool calls / ToolMessages in the chat history.
+- Some models require that tools are passed into the model if there are any tool calls / ToolMessages in the chat history.
These requirements are model-specific and should be checked for the model you are using. If your model requires ToolMessages after tool calls and/or AIMessages after ToolMessages and your examples only include expected tool calls and not the actual tool outputs, you can try adding dummy ToolMessages / AIMessages to the end of each example with generic contents to satisfy the API constraints.
In these cases it's especially worth experimenting with inserting your examples as strings versus messages, as having dummy messages can adversely affect certain models.
diff --git a/docs/docs/concepts/streaming.mdx b/docs/docs/concepts/streaming.mdx
index 8a02537102b..c2dc400e23e 100644
--- a/docs/docs/concepts/streaming.mdx
+++ b/docs/docs/concepts/streaming.mdx
@@ -91,7 +91,7 @@ For more information, please see:
#### Usage with LCEL
-If you compose multiple Runnables using [LangChain’s Expression Language (LCEL)](/docs/concepts/lcel), the `stream()` and `astream()` methods will, by convention, stream the output of the last step in the chain. This allows the final processed result to be streamed incrementally. **LCEL** tries to optimize streaming latency in pipelines such that the streaming results from the last step are available as soon as possible.
+If you compose multiple Runnables using [LangChain’s Expression Language (LCEL)](/docs/concepts/lcel), the `stream()` and `astream()` methods will, by convention, stream the output of the last step in the chain. This allows the final processed result to be streamed incrementally. **LCEL** tries to optimize streaming latency in pipelines so that the streaming results from the last step are available as soon as possible.
@@ -104,7 +104,7 @@ Use the `astream_events` API to access custom data and intermediate outputs from
While this API is available for use with [LangGraph](/docs/concepts/architecture#langgraph) as well, it is usually not necessary when working with LangGraph, as the `stream` and `astream` methods provide comprehensive streaming capabilities for LangGraph graphs.
:::
-For chains constructed using **LCEL**, the `.stream()` method only streams the output of the final step from te chain. This might be sufficient for some applications, but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of the chain alongside the final output. For example, you may want to return sources alongside the final generation when building a chat-over-documents app.
+For chains constructed using **LCEL**, the `.stream()` method only streams the output of the final step from the chain. This might be sufficient for some applications, but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of the chain alongside the final output. For example, you may want to return sources alongside the final generation when building a chat-over-documents app.
There are ways to do this [using callbacks](/docs/concepts/callbacks), or by constructing your chain in such a way that it passes intermediate
values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an
diff --git a/docs/docs/how_to/configure.ipynb b/docs/docs/how_to/configure.ipynb
index 8b67a4a3f8b..f6e24d082e7 100644
--- a/docs/docs/how_to/configure.ipynb
+++ b/docs/docs/how_to/configure.ipynb
@@ -21,7 +21,7 @@
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
- "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n",
+ "- [The Runnable interface](/docs/concepts/runnables/)\n",
"- [Chaining runnables](/docs/how_to/sequence/)\n",
"- [Binding runtime arguments](/docs/how_to/binding/)\n",
"\n",
@@ -62,6 +62,163 @@
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "9d25f63f-a048-42f3-ac2f-e20ba99cff16",
+ "metadata": {},
+ "source": [
+ "### Configuring fields on a chat model\n",
+ "\n",
+ "If using [init_chat_model](/docs/how_to/chat_models_universal_init/) to create a chat model, you can specify configurable fields in the constructor:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "92ba5e49-b2b4-432b-b8bc-b03de46dc2bb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chat_models import init_chat_model\n",
+ "\n",
+ "llm = init_chat_model(\n",
+ " \"openai:gpt-4o-mini\",\n",
+ " # highlight-next-line\n",
+ " configurable_fields=(\"temperature\",),\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "61ef4976-9943-492b-9554-0d10e3d3ba76",
+ "metadata": {},
+ "source": [
+ "You can then set the parameter at runtime using `.with_config`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "277e3232-9b77-4828-8082-b62f4d97127f",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Hello! How can I assist you today?\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = llm.with_config({\"temperature\": 0}).invoke(\"Hello\")\n",
+ "print(response.content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "44c5fe89-f0a6-4ff0-b419-b927e51cc9fa",
+ "metadata": {},
+ "source": [
+ ":::tip\n",
+ "\n",
+ "In addition to invocation parameters like temperature, configuring fields this way extends to clients and other attributes.\n",
+ "\n",
+ ":::"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fed7e600-4d5e-4875-8d37-082ec926e66f",
+ "metadata": {},
+ "source": [
+ "#### Use with tools\n",
+ "\n",
+ "This method is applicable when [binding tools](/docs/concepts/tool_calling/) as well:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "61a67769-4a15-49e2-a945-1f4e7ef19d8c",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[{'name': 'get_weather',\n",
+ " 'args': {'location': 'San Francisco'},\n",
+ " 'id': 'call_B93EttzlGyYUhzbIIiMcl3bE',\n",
+ " 'type': 'tool_call'}]"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_core.tools import tool\n",
+ "\n",
+ "\n",
+ "@tool\n",
+ "def get_weather(location: str):\n",
+ " \"\"\"Get the weather.\"\"\"\n",
+ " return \"It's sunny.\"\n",
+ "\n",
+ "\n",
+ "llm_with_tools = llm.bind_tools([get_weather])\n",
+ "response = llm_with_tools.with_config({\"temperature\": 0}).invoke(\n",
+ " \"What's the weather in SF?\"\n",
+ ")\n",
+ "response.tool_calls"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b71c7bf5-f351-4b90-ae86-1100d2dcdfaa",
+ "metadata": {},
+ "source": [
+ "In addition to `.with_config`, we can now include the parameter when passing a configuration directly. See example below, where we allow the underlying model temperature to be configurable inside of a [langgraph agent](/docs/tutorials/agents/):"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9bb36a46-7b67-4f11-b043-771f3005f493",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "! pip install --upgrade langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "093d1c7d-1a64-4e6a-849f-075526b9b3ca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langgraph.prebuilt import create_react_agent\n",
+ "\n",
+ "agent = create_react_agent(llm, [get_weather])\n",
+ "\n",
+ "response = agent.invoke(\n",
+ " {\"messages\": \"What's the weather in Boston?\"},\n",
+ " {\"configurable\": {\"temperature\": 0}},\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9dc5be03-528f-4532-8cb0-1f149dddedc9",
+ "metadata": {},
+ "source": [
+ "### Configuring fields on arbitrary Runnables\n",
+ "\n",
+ "You can also use the `.configurable_fields` method on arbitrary [Runnables](/docs/concepts/runnables/), as shown below:"
+ ]
+ },
{
"cell_type": "code",
"execution_count": 2,
@@ -604,7 +761,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.1"
+ "version": "3.10.4"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/custom_tools.ipynb b/docs/docs/how_to/custom_tools.ipynb
index 0e1262841a1..81853e17a0d 100644
--- a/docs/docs/how_to/custom_tools.ipynb
+++ b/docs/docs/how_to/custom_tools.ipynb
@@ -492,7 +492,7 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": null,
"id": "1dad8f8e",
"metadata": {
"execution": {
@@ -504,13 +504,14 @@
},
"outputs": [],
"source": [
- "from typing import Optional, Type\n",
+ "from typing import Optional\n",
"\n",
"from langchain_core.callbacks import (\n",
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"from langchain_core.tools import BaseTool\n",
+ "from langchain_core.tools.base import ArgsSchema\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
@@ -524,7 +525,7 @@
"class CustomCalculatorTool(BaseTool):\n",
" name: str = \"Calculator\"\n",
" description: str = \"useful for when you need to answer questions about math\"\n",
- " args_schema: Type[BaseModel] = CalculatorInput\n",
+ " args_schema: Optional[ArgsSchema] = CalculatorInput\n",
" return_direct: bool = True\n",
"\n",
" def _run(\n",
diff --git a/docs/docs/how_to/trim_messages.ipynb b/docs/docs/how_to/trim_messages.ipynb
index 505e7b90195..9c6553c7e6d 100644
--- a/docs/docs/how_to/trim_messages.ipynb
+++ b/docs/docs/how_to/trim_messages.ipynb
@@ -30,7 +30,8 @@
"1. The resulting chat history should be **valid**. Usually this means that the following properties should be satisfied:\n",
" - The chat history **starts** with either (1) a `HumanMessage` or (2) a [SystemMessage](/docs/concepts/messages/#systemmessage) followed by a `HumanMessage`.\n",
" - The chat history **ends** with either a `HumanMessage` or a `ToolMessage`.\n",
- " - A `ToolMessage` can only appear after an `AIMessage` that involved a tool call. \n",
+ " - A `ToolMessage` can only appear after an `AIMessage` that involved a tool call.\n",
+ "\n",
" This can be achieved by setting `start_on=\"human\"` and `ends_on=(\"human\", \"tool\")`.\n",
"3. It includes recent messages and drops old messages in the chat history.\n",
" This can be achieved by setting `strategy=\"last\"`.\n",
diff --git a/docs/docs/integrations/chat/contextual.ipynb b/docs/docs/integrations/chat/contextual.ipynb
new file mode 100644
index 00000000000..00cdbd4ae58
--- /dev/null
+++ b/docs/docs/integrations/chat/contextual.ipynb
@@ -0,0 +1,253 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "afaf8039",
+ "metadata": {
+ "vscode": {
+ "languageId": "raw"
+ }
+ },
+ "source": [
+ "---\n",
+ "sidebar_label: ContextualAI\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e49f1e0d",
+ "metadata": {},
+ "source": [
+ "# ChatContextual\n",
+ "\n",
+ "This will help you getting started with Contextual AI's Grounded Language Model [chat models](/docs/concepts/chat_models/).\n",
+ "\n",
+ "To learn more about Contextual AI, please visit our [documentation](https://docs.contextual.ai/).\n",
+ "\n",
+ "This integration requires the `contextual-client` Python SDK. Learn more about it [here](https://github.com/ContextualAI/contextual-client-python).\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "This integration invokes Contextual AI's Grounded Language Model.\n",
+ "\n",
+ "### Integration details\n",
+ "\n",
+ "| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
+ "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
+ "| [ChatContextual](https://github.com/ContextualAI//langchain-contextual) | [langchain-contextual](https://pypi.org/project/langchain-contextual/) | ❌ | beta | ❌ |  |  |\n",
+ "\n",
+ "### Model features\n",
+ "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
+ "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
+ "| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | \n",
+ "\n",
+ "## Setup\n",
+ "\n",
+ "To access Contextual models you'll need to create a Contextual AI account, get an API key, and install the `langchain-contextual` integration package.\n",
+ "\n",
+ "### Credentials\n",
+ "\n",
+ "Head to [app.contextual.ai](https://app.contextual.ai) to sign up to Contextual and generate an API key. Once you've done this set the CONTEXTUAL_AI_API_KEY environment variable:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import getpass\n",
+ "import os\n",
+ "\n",
+ "if not os.getenv(\"CONTEXTUAL_AI_API_KEY\"):\n",
+ " os.environ[\"CONTEXTUAL_AI_API_KEY\"] = getpass.getpass(\n",
+ " \"Enter your Contextual API key: \"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "72ee0c4b-9764-423a-9dbf-95129e185210",
+ "metadata": {},
+ "source": [
+ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
+ "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0730d6a1-c893-4840-9817-5e5251676d5d",
+ "metadata": {},
+ "source": [
+ "### Installation\n",
+ "\n",
+ "The LangChain Contextual integration lives in the `langchain-contextual` package:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "652d6238-1f87-422a-b135-f5abbb8652fc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain-contextual"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a38cde65-254d-4219-a441-068766c0d4b5",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "Now we can instantiate our model object and generate chat completions.\n",
+ "\n",
+ "The chat client can be instantiated with these following additional settings:\n",
+ "\n",
+ "| Parameter | Type | Description | Default |\n",
+ "|-----------|------|-------------|---------|\n",
+ "| temperature | Optional[float] | The sampling temperature, which affects the randomness in the response. Note that higher temperature values can reduce groundedness. | 0 |\n",
+ "| top_p | Optional[float] | A parameter for nucleus sampling, an alternative to temperature which also affects the randomness of the response. Note that higher top_p values can reduce groundedness. | 0.9 |\n",
+ "| max_new_tokens | Optional[int] | The maximum number of tokens that the model can generate in the response. Minimum is 1 and maximum is 2048. | 1024 |"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_contextual import ChatContextual\n",
+ "\n",
+ "llm = ChatContextual(\n",
+ " model=\"v1\", # defaults to `v1`\n",
+ " api_key=\"\",\n",
+ " temperature=0, # defaults to 0\n",
+ " top_p=0.9, # defaults to 0.9\n",
+ " max_new_tokens=1024, # defaults to 1024\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2b4f3e15",
+ "metadata": {},
+ "source": [
+ "## Invocation\n",
+ "\n",
+ "The Contextual Grounded Language Model accepts additional `kwargs` when calling the `ChatContextual.invoke` method.\n",
+ "\n",
+ "These additional inputs are:\n",
+ "\n",
+ "| Parameter | Type | Description |\n",
+ "|-----------|------|-------------|\n",
+ "| knowledge | list[str] | Required: A list of strings of knowledge sources the grounded language model can use when generating a response. |\n",
+ "| system_prompt | Optional[str] | Optional: Instructions the model should follow when generating responses. Note that we do not guarantee that the model follows these instructions exactly. |\n",
+ "| avoid_commentary | Optional[bool] | Optional (Defaults to `False`): Flag to indicate whether the model should avoid providing additional commentary in responses. Commentary is conversational in nature and does not contain verifiable claims; therefore, commentary is not strictly grounded in available context. However, commentary may provide useful context which improves the helpfulness of responses. |"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "62e0dbc3",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "# include a system prompt (optional)\n",
+ "system_prompt = \"You are a helpful assistant that uses all of the provided knowledge to answer the user's query to the best of your ability.\"\n",
+ "\n",
+ "# provide your own knowledge from your knowledge-base here in an array of string\n",
+ "knowledge = [\n",
+ " \"There are 2 types of dogs in the world: good dogs and best dogs.\",\n",
+ " \"There are 2 types of cats in the world: good cats and best cats.\",\n",
+ "]\n",
+ "\n",
+ "# create your message\n",
+ "messages = [\n",
+ " (\"human\", \"What type of cats are there in the world and what are the types?\"),\n",
+ "]\n",
+ "\n",
+ "# invoke the GLM by providing the knowledge strings, optional system prompt\n",
+ "# if you want to turn off the GLM's commentary, pass True to the `avoid_commentary` argument\n",
+ "ai_msg = llm.invoke(\n",
+ " messages, knowledge=knowledge, system_prompt=system_prompt, avoid_commentary=True\n",
+ ")\n",
+ "\n",
+ "print(ai_msg.content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2c35a9e0",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "We can chain the Contextual Model with output parsers."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "545e1e16",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_core.output_parsers import StrOutputParser\n",
+ "\n",
+ "chain = llm | StrOutputParser\n",
+ "\n",
+ "chain.invoke(\n",
+ " messages, knowledge=knowledge, systemp_prompt=system_prompt, avoid_commentary=True\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "For detailed documentation of all ChatContextual features and configurations head to the Github page: https://github.com/ContextualAI//langchain-contextual"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": ".venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/chat/deepseek.ipynb b/docs/docs/integrations/chat/deepseek.ipynb
index b00408c1afe..5004dc85212 100644
--- a/docs/docs/integrations/chat/deepseek.ipynb
+++ b/docs/docs/integrations/chat/deepseek.ipynb
@@ -38,6 +38,12 @@
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
"\n",
+ ":::note\n",
+ "\n",
+ "DeepSeek-R1, specified via `model=\"deepseek-reasoner\"`, does not support tool calling or structured output. Those features [are supported](https://api-docs.deepseek.com/guides/function_calling) by DeepSeek-V3 (specified via `model=\"deepseek-chat\"`).\n",
+ "\n",
+ ":::\n",
+ "\n",
"## Setup\n",
"\n",
"To access DeepSeek models you'll need to create a/an DeepSeek account, get an API key, and install the `langchain-deepseek` integration package.\n",
diff --git a/docs/docs/integrations/chat/groq.ipynb b/docs/docs/integrations/chat/groq.ipynb
index 7f5bd28b3ed..a1696fba7a4 100644
--- a/docs/docs/integrations/chat/groq.ipynb
+++ b/docs/docs/integrations/chat/groq.ipynb
@@ -85,21 +85,10 @@
},
{
"cell_type": "code",
- "execution_count": 3,
- "id": "652d6238-1f87-422a-b135-f5abbb8652fc",
+ "execution_count": null,
+ "id": "3f3f510e-2afe-4e76-be41-c5a9665aea63",
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n",
- "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
- "Note: you may need to restart the kernel to use updated packages.\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"%pip install -qU langchain-groq"
]
@@ -116,7 +105,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 1,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
@@ -124,7 +113,7 @@
"from langchain_groq import ChatGroq\n",
"\n",
"llm = ChatGroq(\n",
- " model=\"mixtral-8x7b-32768\",\n",
+ " model=\"llama-3.1-8b-instant\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
@@ -143,7 +132,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 2,
"id": "62e0dbc3",
"metadata": {
"tags": []
@@ -152,10 +141,10 @@
{
"data": {
"text/plain": [
- "AIMessage(content='I enjoy programming. (The French translation is: \"J\\'aime programmer.\")\\n\\nNote: I chose to translate \"I love programming\" as \"J\\'aime programmer\" instead of \"Je suis amoureux de programmer\" because the latter has a romantic connotation that is not present in the original English sentence.', response_metadata={'token_usage': {'completion_tokens': 73, 'prompt_tokens': 31, 'total_tokens': 104, 'completion_time': 0.1140625, 'prompt_time': 0.003352463, 'queue_time': None, 'total_time': 0.117414963}, 'model_name': 'mixtral-8x7b-32768', 'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop', 'logprobs': None}, id='run-64433c19-eadf-42fc-801e-3071e3c40160-0', usage_metadata={'input_tokens': 31, 'output_tokens': 73, 'total_tokens': 104})"
+ "AIMessage(content='The translation of \"I love programming\" to French is:\\n\\n\"J\\'adore le programmation.\"', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 55, 'total_tokens': 77, 'completion_time': 0.029333333, 'prompt_time': 0.003502892, 'queue_time': 0.553054073, 'total_time': 0.032836225}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-2b2da04a-993c-40ab-becc-201eab8b1a1b-0', usage_metadata={'input_tokens': 55, 'output_tokens': 22, 'total_tokens': 77})"
]
},
- "execution_count": 5,
+ "execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@@ -174,7 +163,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 3,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"metadata": {},
"outputs": [
@@ -182,9 +171,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "I enjoy programming. (The French translation is: \"J'aime programmer.\")\n",
+ "The translation of \"I love programming\" to French is:\n",
"\n",
- "Note: I chose to translate \"I love programming\" as \"J'aime programmer\" instead of \"Je suis amoureux de programmer\" because the latter has a romantic connotation that is not present in the original English sentence.\n"
+ "\"J'adore le programmation.\"\n"
]
}
],
@@ -204,17 +193,17 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 4,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "AIMessage(content='That\\'s great! I can help you translate English phrases related to programming into German.\\n\\n\"I love programming\" can be translated as \"Ich liebe Programmieren\" in German.\\n\\nHere are some more programming-related phrases translated into German:\\n\\n* \"Programming language\" = \"Programmiersprache\"\\n* \"Code\" = \"Code\"\\n* \"Variable\" = \"Variable\"\\n* \"Function\" = \"Funktion\"\\n* \"Array\" = \"Array\"\\n* \"Object-oriented programming\" = \"Objektorientierte Programmierung\"\\n* \"Algorithm\" = \"Algorithmus\"\\n* \"Data structure\" = \"Datenstruktur\"\\n* \"Debugging\" = \"Fehlersuche\"\\n* \"Compile\" = \"Kompilieren\"\\n* \"Link\" = \"Verknüpfen\"\\n* \"Run\" = \"Ausführen\"\\n* \"Test\" = \"Testen\"\\n* \"Deploy\" = \"Bereitstellen\"\\n* \"Version control\" = \"Versionskontrolle\"\\n* \"Open source\" = \"Open Source\"\\n* \"Software development\" = \"Softwareentwicklung\"\\n* \"Agile methodology\" = \"Agile Methodik\"\\n* \"DevOps\" = \"DevOps\"\\n* \"Cloud computing\" = \"Cloud Computing\"\\n\\nI hope this helps! Let me know if you have any other questions or if you need further translations.', response_metadata={'token_usage': {'completion_tokens': 331, 'prompt_tokens': 25, 'total_tokens': 356, 'completion_time': 0.520006542, 'prompt_time': 0.00250165, 'queue_time': None, 'total_time': 0.522508192}, 'model_name': 'mixtral-8x7b-32768', 'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop', 'logprobs': None}, id='run-74207fb7-85d3-417d-b2b9-621116b75d41-0', usage_metadata={'input_tokens': 25, 'output_tokens': 331, 'total_tokens': 356})"
+ "AIMessage(content='Ich liebe Programmieren.', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 50, 'total_tokens': 56, 'completion_time': 0.008, 'prompt_time': 0.003337935, 'queue_time': 0.20949214500000002, 'total_time': 0.011337935}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-e33b48dc-5e55-466e-9ebd-7b48c81c3cbd-0', usage_metadata={'input_tokens': 50, 'output_tokens': 6, 'total_tokens': 56})"
]
},
- "execution_count": 7,
+ "execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -269,7 +258,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.9"
+ "version": "3.10.4"
}
},
"nbformat": 4,
diff --git a/docs/docs/integrations/chat/openai.ipynb b/docs/docs/integrations/chat/openai.ipynb
index 35d8bf295e9..f8300a34f2b 100644
--- a/docs/docs/integrations/chat/openai.ipynb
+++ b/docs/docs/integrations/chat/openai.ipynb
@@ -322,7 +322,7 @@
"source": [
"### ``strict=True``\n",
"\n",
- ":::info Requires ``langchain-openai>=0.1.21rc1``\n",
+ ":::info Requires ``langchain-openai>=0.1.21``\n",
"\n",
":::\n",
"\n",
@@ -397,6 +397,405 @@
"For more on binding tools and tool call outputs, head to the [tool calling](/docs/how_to/function_calling) docs."
]
},
+ {
+ "cell_type": "markdown",
+ "id": "84833dd0-17e9-4269-82ed-550639d65751",
+ "metadata": {},
+ "source": [
+ "## Responses API\n",
+ "\n",
+ ":::info Requires ``langchain-openai>=0.3.9-rc.1``\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "OpenAI supports a [Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions) API that is oriented toward building [agentic](/docs/concepts/agents/) applications. It includes a suite of [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses), including web and file search. It also supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses), allowing you to continue a conversational thread without explicitly passing in previous messages.\n",
+ "\n",
+ "`ChatOpenAI` will route to the Responses API if one of these features is used. You can also specify `use_responses_api=True` when instantiating `ChatOpenAI`.\n",
+ "\n",
+ "### Built-in tools\n",
+ "\n",
+ "Equipping `ChatOpenAI` with built-in tools will ground its responses with outside information, such as via context in files or the web. The [AIMessage](/docs/concepts/messages/#aimessage) generated from the model will include information about the built-in tool invocation.\n",
+ "\n",
+ "#### Web search\n",
+ "\n",
+ "To trigger a web search, pass `{\"type\": \"web_search_preview\"}` to the model as you would another tool.\n",
+ "\n",
+ ":::tip\n",
+ "\n",
+ "You can also pass built-in tools as invocation params:\n",
+ "```python\n",
+ "llm.invoke(\"...\", tools=[{\"type\": \"web_search_preview\"}])\n",
+ "```\n",
+ "\n",
+ ":::"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "0d8bfe89-948b-42d4-beac-85ef2a72491d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
+ "\n",
+ "tool = {\"type\": \"web_search_preview\"}\n",
+ "llm_with_tools = llm.bind_tools([tool])\n",
+ "\n",
+ "response = llm_with_tools.invoke(\"What was a positive news story from today?\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c9fe67c6-38ff-40a5-93b3-a4b7fca76372",
+ "metadata": {},
+ "source": [
+ "Note that the response includes structured [content blocks](/docs/concepts/messages/#content-1) that include both the text of the response and OpenAI [annotations](https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#output-and-citations) citing its sources:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "3ea5a4b1-f57a-4c8a-97f4-60ab8330a804",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[{'type': 'text',\n",
+ " 'text': 'Today, a heartwarming story emerged from Minnesota, where a group of high school robotics students built a custom motorized wheelchair for a 2-year-old boy named Cillian Jackson. Born with a genetic condition that limited his mobility, Cillian\\'s family couldn\\'t afford the $20,000 wheelchair he needed. The students at Farmington High School\\'s Rogue Robotics team took it upon themselves to modify a Power Wheels toy car into a functional motorized wheelchair for Cillian, complete with a joystick, safety bumpers, and a harness. One team member remarked, \"I think we won here more than we do in our competitions. Instead of completing a task, we\\'re helping change someone\\'s life.\" ([boredpanda.com](https://www.boredpanda.com/wholesome-global-positive-news/?utm_source=openai))\\n\\nThis act of kindness highlights the profound impact that community support and innovation can have on individuals facing challenges. ',\n",
+ " 'annotations': [{'end_index': 778,\n",
+ " 'start_index': 682,\n",
+ " 'title': '“Global Positive News”: 40 Posts To Remind Us There’s Good In The World',\n",
+ " 'type': 'url_citation',\n",
+ " 'url': 'https://www.boredpanda.com/wholesome-global-positive-news/?utm_source=openai'}]}]"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "response.content"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "95fbc34c-2f12-4d51-92c5-bf62a2f8900c",
+ "metadata": {},
+ "source": [
+ ":::tip\n",
+ "\n",
+ "You can recover just the text content of the response as a string by using `response.text()`. For example, to stream response text:\n",
+ "\n",
+ "```python\n",
+ "for token in llm_with_tools.stream(\"...\"):\n",
+ " print(token.text(), end=\"|\")\n",
+ "```\n",
+ "\n",
+ "See the [streaming guide](/docs/how_to/chat_streaming/) for more detail.\n",
+ "\n",
+ ":::"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2a332940-d409-41ee-ac36-2e9bee900e83",
+ "metadata": {},
+ "source": [
+ "The output message will also contain information from any tool invocations:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "a8011049-6c90-4fcb-82d4-850c72b46941",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'tool_outputs': [{'id': 'ws_67d192aeb6cc81918e736ad4a57937570d6f8507990d9d71',\n",
+ " 'status': 'completed',\n",
+ " 'type': 'web_search_call'}]}"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "response.additional_kwargs"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "288d47bb-3ccb-412f-a3d3-9f6cee0e6214",
+ "metadata": {},
+ "source": [
+ "#### File search\n",
+ "\n",
+ "To trigger a file search, pass a [file search tool](https://platform.openai.com/docs/guides/tools-file-search) to the model as you would another tool. You will need to populate an OpenAI-managed vector store and include the vector store ID in the tool definition. See [OpenAI documentation](https://platform.openai.com/docs/guides/tools-file-search) for more detail."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "id": "1f758726-33ef-4c04-8a54-49adb783bbb3",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Deep Research by OpenAI is a new capability integrated into ChatGPT that allows for the execution of multi-step research tasks independently. It can synthesize extensive amounts of online information and produce comprehensive reports similar to what a research analyst would do, significantly speeding up processes that would typically take hours for a human.\n",
+ "\n",
+ "### Key Features:\n",
+ "- **Independent Research**: Users simply provide a prompt, and the model can find, analyze, and synthesize information from hundreds of online sources.\n",
+ "- **Multi-Modal Capabilities**: The model is also able to browse user-uploaded files, plot graphs using Python, and embed visualizations in its outputs.\n",
+ "- **Training**: Deep Research has been trained using reinforcement learning on real-world tasks that require extensive browsing and reasoning.\n",
+ "\n",
+ "### Applications:\n",
+ "- Useful for professionals in sectors like finance, science, policy, and engineering, enabling them to obtain accurate and thorough research quickly.\n",
+ "- It can also be beneficial for consumers seeking personalized recommendations on complex purchases.\n",
+ "\n",
+ "### Limitations:\n",
+ "Although Deep Research presents significant advancements, it has some limitations, such as the potential to hallucinate facts or struggle with authoritative information. \n",
+ "\n",
+ "Deep Research aims to facilitate access to thorough and documented information, marking a significant step toward the broader goal of developing artificial general intelligence (AGI).\n"
+ ]
+ }
+ ],
+ "source": [
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
+ "\n",
+ "openai_vector_store_ids = [\n",
+ " \"vs_...\", # your IDs here\n",
+ "]\n",
+ "\n",
+ "tool = {\n",
+ " \"type\": \"file_search\",\n",
+ " \"vector_store_ids\": openai_vector_store_ids,\n",
+ "}\n",
+ "llm_with_tools = llm.bind_tools([tool])\n",
+ "\n",
+ "response = llm_with_tools.invoke(\"What is deep research by OpenAI?\")\n",
+ "print(response.text())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f88bbd71-83b0-45a6-9141-46ec9da93df6",
+ "metadata": {},
+ "source": [
+ "As with [web search](#web-search), the response will include content blocks with citations:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "865bc14e-1599-438e-be44-857891004979",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[{'file_id': 'file-3UzgX7jcC8Dt9ZAFzywg5k',\n",
+ " 'index': 346,\n",
+ " 'type': 'file_citation',\n",
+ " 'filename': 'deep_research_blog.pdf'},\n",
+ " {'file_id': 'file-3UzgX7jcC8Dt9ZAFzywg5k',\n",
+ " 'index': 575,\n",
+ " 'type': 'file_citation',\n",
+ " 'filename': 'deep_research_blog.pdf'}]"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "response.content[0][\"annotations\"][:2]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "dd00f6be-2862-4634-a0c3-14ee39915c90",
+ "metadata": {},
+ "source": [
+ "It will also include information from the built-in tool invocations:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "e16a7110-d2d8-45fa-b372-5109f330540b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'tool_outputs': [{'id': 'fs_67d196fbb83c8191ba20586175331687089228ce932eceb1',\n",
+ " 'queries': ['What is deep research by OpenAI?'],\n",
+ " 'status': 'completed',\n",
+ " 'type': 'file_search_call'}]}"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "response.additional_kwargs"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6fda05f0-4b81-4709-9407-f316d760ad50",
+ "metadata": {},
+ "source": [
+ "### Managing conversation state\n",
+ "\n",
+ "The Responses API supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).\n",
+ "\n",
+ "#### Manually manage state\n",
+ "\n",
+ "You can manage the state manually or using [LangGraph](/docs/tutorials/chatbot/), as with other chat models:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "51d3e4d3-ea78-426c-9205-aecb0937fca7",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "As of March 12, 2025, here are some positive news stories that highlight recent uplifting events:\n",
+ "\n",
+ "*... exemplify positive developments in health, environmental sustainability, and community well-being. \n"
+ ]
+ }
+ ],
+ "source": [
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
+ "\n",
+ "tool = {\"type\": \"web_search_preview\"}\n",
+ "llm_with_tools = llm.bind_tools([tool])\n",
+ "\n",
+ "first_query = \"What was a positive news story from today?\"\n",
+ "messages = [{\"role\": \"user\", \"content\": first_query}]\n",
+ "\n",
+ "response = llm_with_tools.invoke(messages)\n",
+ "response_text = response.text()\n",
+ "print(f\"{response_text[:100]}... {response_text[-100:]}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "5da9d20f-9712-46f4-a395-5be5a7c1bc62",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Your question was: \"What was a positive news story from today?\"\n",
+ "\n",
+ "The last sentence of my answer was: \"These stories exemplify positive developments in health, environmental sustainability, and community well-being.\"\n"
+ ]
+ }
+ ],
+ "source": [
+ "second_query = (\n",
+ " \"Repeat my question back to me, as well as the last sentence of your answer.\"\n",
+ ")\n",
+ "\n",
+ "messages.extend(\n",
+ " [\n",
+ " response,\n",
+ " {\"role\": \"user\", \"content\": second_query},\n",
+ " ]\n",
+ ")\n",
+ "second_response = llm_with_tools.invoke(messages)\n",
+ "print(second_response.text())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5fd8ca21-8a5e-4294-af32-11f26a040171",
+ "metadata": {},
+ "source": [
+ ":::tip\n",
+ "\n",
+ "You can use [LangGraph](https://langchain-ai.github.io/langgraph/) to manage conversational threads for you in a variety of backends, including in-memory and Postgres. See [this tutorial](/docs/tutorials/chatbot/) to get started.\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "\n",
+ "#### Passing `previous_response_id`\n",
+ "\n",
+ "When using the Responses API, LangChain messages will include an `\"id\"` field in its metadata. Passing this ID to subsequent invocations will continue the conversation. Note that this is [equivalent](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#openai-apis-for-conversation-state) to manually passing in messages from a billing perspective."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "009e541a-b372-410e-b9dd-608a8052ce09",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Hi Bob! How can I assist you today?\n"
+ ]
+ }
+ ],
+ "source": [
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(\n",
+ " model=\"gpt-4o-mini\",\n",
+ " use_responses_api=True,\n",
+ ")\n",
+ "response = llm.invoke(\"Hi, I'm Bob.\")\n",
+ "print(response.text())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "393a443a-4c5f-4a07-bc0e-c76e529b35e3",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Your name is Bob. How can I help you today, Bob?\n"
+ ]
+ }
+ ],
+ "source": [
+ "second_response = llm.invoke(\n",
+ " \"What is my name?\",\n",
+ " previous_response_id=response.response_metadata[\"id\"],\n",
+ ")\n",
+ "print(second_response.text())"
+ ]
+ },
{
"cell_type": "markdown",
"id": "57e27714",
diff --git a/docs/docs/integrations/chat/tongyi.ipynb b/docs/docs/integrations/chat/tongyi.ipynb
index 8940489d39d..02ee96c1e86 100644
--- a/docs/docs/integrations/chat/tongyi.ipynb
+++ b/docs/docs/integrations/chat/tongyi.ipynb
@@ -26,22 +26,9 @@
},
{
"cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "collapsed": false,
- "jupyter": {
- "outputs_hidden": false
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Note: you may need to restart the kernel to use updated packages.\n"
- ]
- }
- ],
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
"source": [
"# Install the package\n",
"%pip install --upgrade --quiet dashscope"
@@ -49,8 +36,12 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {
+ "ExecuteTime": {
+ "end_time": "2025-03-05T01:11:20.457141Z",
+ "start_time": "2025-03-05T01:11:18.810160Z"
+ },
"collapsed": false,
"jupyter": {
"outputs_hidden": false
@@ -66,8 +57,12 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 2,
"metadata": {
+ "ExecuteTime": {
+ "end_time": "2025-03-05T01:11:24.270318Z",
+ "start_time": "2025-03-05T01:11:24.268064Z"
+ },
"collapsed": false,
"jupyter": {
"outputs_hidden": false
@@ -266,6 +261,52 @@
"ai_message"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Partial Mode\n",
+ "Enable the large model to continue generating content from the initial text you provide."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2025-03-05T01:31:29.155824Z",
+ "start_time": "2025-03-05T01:31:27.239667Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "AIMessage(content=' has cast off its heavy cloak of snow, donning instead a vibrant garment of fresh greens and floral hues; it is as if the world has woken from a long slumber, stretching and reveling in the warm caress of the sun. Everywhere I look, there is a symphony of life: birdsong fills the air, bees dance from flower to flower, and a gentle breeze carries the sweet fragrance of blossoms. It is in this season that my heart finds particular joy, for it whispers promises of renewal and growth, reminding me that even after the coldest winters, there will always be a spring to follow.', additional_kwargs={}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': '447283e9-ee31-9d82-8734-af572921cb05', 'token_usage': {'input_tokens': 40, 'output_tokens': 127, 'prompt_tokens_details': {'cached_tokens': 0}, 'total_tokens': 167}}, id='run-6a35a91c-cc12-4afe-b56f-fd26d9035357-0')"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_community.chat_models.tongyi import ChatTongyi\n",
+ "from langchain_core.messages import AIMessage, HumanMessage\n",
+ "\n",
+ "messages = [\n",
+ " HumanMessage(\n",
+ " content=\"\"\"Please continue the sentence \"Spring has arrived, and the earth\" to express the beauty of spring and the author's joy.\"\"\"\n",
+ " ),\n",
+ " AIMessage(\n",
+ " content=\"Spring has arrived, and the earth\", additional_kwargs={\"partial\": True}\n",
+ " ),\n",
+ "]\n",
+ "chatLLM = ChatTongyi()\n",
+ "ai_message = chatLLM.invoke(messages)\n",
+ "ai_message"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
diff --git a/docs/docs/integrations/document_loaders/agentql.ipynb b/docs/docs/integrations/document_loaders/agentql.ipynb
new file mode 100644
index 00000000000..0cdfcc39b81
--- /dev/null
+++ b/docs/docs/integrations/document_loaders/agentql.ipynb
@@ -0,0 +1,265 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "wkUAAcGZNSJ3"
+ },
+ "source": [
+ "# AgentQLLoader\n",
+ "\n",
+ "[AgentQL](https://www.agentql.com/)'s document loader provides structured data extraction from any web page using an [AgentQL query](https://docs.agentql.com/agentql-query). AgentQL can be used across multiple languages and web pages without breaking over time and change.\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "`AgentQLLoader` requires the following two parameters:\n",
+ "- `url`: The URL of the web page you want to extract data from.\n",
+ "- `query`: The AgentQL query to execute. Learn more about [how to write an AgentQL query in the docs](https://docs.agentql.com/agentql-query) or test one out in the [AgentQL Playground](https://dev.agentql.com/playground).\n",
+ "\n",
+ "Setting the following parameters are optional:\n",
+ "- `api_key`: Your AgentQL API key from [dev.agentql.com](https://dev.agentql.com). **`Optional`.**\n",
+ "- `timeout`: The number of seconds to wait for a request before timing out. **Defaults to `900`.**\n",
+ "- `is_stealth_mode_enabled`: Whether to enable experimental anti-bot evasion strategies. This feature may not work for all websites at all times. Data extraction may take longer to complete with this mode enabled. **Defaults to `False`.**\n",
+ "- `wait_for`: The number of seconds to wait for the page to load before extracting data. **Defaults to `0`.**\n",
+ "- `is_scroll_to_bottom_enabled`: Whether to scroll to bottom of the page before extracting data. **Defaults to `False`.**\n",
+ "- `mode`: `\"standard\"` uses deep data analysis, while `\"fast\"` trades some depth of analysis for speed and is adequate for most usecases. [Learn more about the modes in this guide.](https://docs.agentql.com/accuracy/standard-mode) **Defaults to `\"fast\"`.**\n",
+ "- `is_screenshot_enabled`: Whether to take a screenshot before extracting data. Returned in 'metadata' as a Base64 string. **Defaults to `False`.**\n",
+ "\n",
+ "AgentQLLoader is implemented with AgentQL's [REST API](https://docs.agentql.com/rest-api/api-reference)\n",
+ "\n",
+ "### Integration details\n",
+ "\n",
+ "| Class | Package | Local | Serializable | JS support |\n",
+ "| :--- | :--- | :---: | :---: | :---: |\n",
+ "| AgentQLLoader| langchain-agentql | ✅ | ❌ | ❌ |\n",
+ "\n",
+ "### Loader features\n",
+ "| Source | Document Lazy Loading | Native Async Support\n",
+ "| :---: | :---: | :---: |\n",
+ "| AgentQLLoader | ✅ | ❌ |"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "CaKa2QrnwPXq"
+ },
+ "source": [
+ "## Setup\n",
+ "\n",
+ "To use the AgentQL Document Loader, you will need to configure the `AGENTQL_API_KEY` environment variable, or use the `api_key` parameter. You can acquire an API key from our [Dev Portal](https://dev.agentql.com)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "mZNJvUQBNSJ5"
+ },
+ "source": [
+ "### Installation\n",
+ "\n",
+ "Install **langchain-agentql**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "IblRoJJDNSJ5"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain_agentql"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "SNsUT60YvfCm"
+ },
+ "source": [
+ "### Set Credentials"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "2D1EN7Egvk1c"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "os.environ[\"AGENTQL_API_KEY\"] = \"YOUR_AGENTQL_API_KEY\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "D4hnJV_6NSJ5"
+ },
+ "source": [
+ "## Initialization\n",
+ "\n",
+ "Next instantiate your model object:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "oMJdxL_KNSJ5"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_agentql.document_loaders import AgentQLLoader\n",
+ "\n",
+ "loader = AgentQLLoader(\n",
+ " url=\"https://www.agentql.com/blog\",\n",
+ " query=\"\"\"\n",
+ " {\n",
+ " posts[] {\n",
+ " title\n",
+ " url\n",
+ " date\n",
+ " author\n",
+ " }\n",
+ " }\n",
+ " \"\"\",\n",
+ " is_scroll_to_bottom_enabled=True,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "SRxIOx90NSJ5"
+ },
+ "source": [
+ "## Load"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "bNnnCZ1oNSJ5",
+ "outputId": "d0eb8cb4-9742-4f0c-80f1-0509a3af1808"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Document(metadata={'request_id': 'bdb9dbe7-8a7f-427f-bc16-839ccc02cae6', 'generated_query': None, 'screenshot': None}, page_content=\"{'posts': [{'title': 'Launch Week Recap—make the web AI-ready', 'url': 'https://www.agentql.com/blog/2024-launch-week-recap', 'date': 'Nov 18, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Accurate data extraction from PDFs and images with AgentQL', 'url': 'https://www.agentql.com/blog/accurate-data-extraction-pdfs-images', 'date': 'Feb 1, 2025', 'author': 'Rachel-Lee Nabors'}, {'title': 'Introducing Scheduled Scraping Workflows', 'url': 'https://www.agentql.com/blog/scheduling', 'date': 'Dec 2, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Updates to Our Pricing Model', 'url': 'https://www.agentql.com/blog/2024-pricing-update', 'date': 'Nov 19, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Get data from any page: AgentQL’s REST API Endpoint—Launch week day 5', 'url': 'https://www.agentql.com/blog/data-rest-api', 'date': 'Nov 15, 2024', 'author': 'Rachel-Lee Nabors'}]}\")"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "docs = loader.load()\n",
+ "docs[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "wtPMNh72NSJ5",
+ "outputId": "59d529a4-3c22-445c-f5cf-dc7b24168906"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'request_id': 'bdb9dbe7-8a7f-427f-bc16-839ccc02cae6', 'generated_query': None, 'screenshot': None}\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(docs[0].metadata)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "7RMuEwl4NSJ5"
+ },
+ "source": [
+ "## Lazy Load\n",
+ "\n",
+ "`AgentQLLoader` currently only loads one `Document` at a time. Therefore, `load()` and `lazy_load()` behave the same:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "FIYddZBONSJ5",
+ "outputId": "c39a7a6d-bc52-4ef9-b36f-e1d138590b79"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Document(metadata={'request_id': '06273abd-b2ef-4e15-b0ec-901cba7b4825', 'generated_query': None, 'screenshot': None}, page_content=\"{'posts': [{'title': 'Launch Week Recap—make the web AI-ready', 'url': 'https://www.agentql.com/blog/2024-launch-week-recap', 'date': 'Nov 18, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Accurate data extraction from PDFs and images with AgentQL', 'url': 'https://www.agentql.com/blog/accurate-data-extraction-pdfs-images', 'date': 'Feb 1, 2025', 'author': 'Rachel-Lee Nabors'}, {'title': 'Introducing Scheduled Scraping Workflows', 'url': 'https://www.agentql.com/blog/scheduling', 'date': 'Dec 2, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Updates to Our Pricing Model', 'url': 'https://www.agentql.com/blog/2024-pricing-update', 'date': 'Nov 19, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Get data from any page: AgentQL’s REST API Endpoint—Launch week day 5', 'url': 'https://www.agentql.com/blog/data-rest-api', 'date': 'Nov 15, 2024', 'author': 'Rachel-Lee Nabors'}]}\")]"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "pages = [doc for doc in loader.lazy_load()]\n",
+ "pages"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "For more information on how to use this integration, please refer to the [git repo](https://github.com/tinyfish-io/agentql-integrations/tree/main/langchain) or the [langchain integration documentation](https://docs.agentql.com/integrations/langchain)"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/docs/docs/integrations/providers/ads4gpts.mdx b/docs/docs/integrations/providers/ads4gpts.mdx
new file mode 100644
index 00000000000..fd82f96f8eb
--- /dev/null
+++ b/docs/docs/integrations/providers/ads4gpts.mdx
@@ -0,0 +1,82 @@
+# ADS4GPTs
+
+> [ADS4GPTs](https://www.ads4gpts.com/) is building the open monetization backbone of the AI-Native internet. It helps AI applications monetize through advertising with a UX and Privacy first approach.
+
+## Installation and Setup
+
+### Using pip
+You can install the package directly from PyPI:
+
+```bash
+pip install ads4gpts-langchain
+```
+
+### From Source
+Alternatively, install from source:
+
+```bash
+git clone https://github.com/ADS4GPTs/ads4gpts.git
+cd ads4gpts/libs/python-sdk/ads4gpts-langchain
+pip install .
+```
+
+## Prerequisites
+
+- Python 3.11+
+- ADS4GPTs API Key ([Obtain API Key](https://www.ads4gpts.com))
+
+## Environment Variables
+Set the following environment variables for API authentication:
+
+```bash
+export ADS4GPTS_API_KEY='your-ads4gpts-api-key'
+```
+
+Alternatively, API keys can be passed directly when initializing classes or stored in a `.env` file.
+
+## Tools
+
+ADS4GPTs provides two main tools for monetization:
+
+### Ads4gptsInlineSponsoredResponseTool
+This tool fetches native, sponsored responses that can be seamlessly integrated within your AI application's outputs.
+
+```python
+from ads4gpts_langchain import Ads4gptsInlineSponsoredResponseTool
+```
+
+### Ads4gptsSuggestedPromptTool
+Generates sponsored prompt suggestions to enhance user engagement and provide monetization opportunities.
+
+```python
+from ads4gpts_langchain import Ads4gptsSuggestedPromptTool
+```
+### Ads4gptsInlineConversationalTool
+Delivers conversational sponsored content that naturally fits within chat interfaces and dialogs.
+
+```python
+from ads4gpts_langchain import Ads4gptsInlineConversationalTool
+```
+
+### Ads4gptsInlineBannerTool
+Provides inline banner advertisements that can be displayed within your AI application's response.
+
+```python
+from ads4gpts_langchain import Ads4gptsInlineBannerTool
+```
+
+### Ads4gptsSuggestedBannerTool
+Generates banner advertisement suggestions that can be presented to users as recommended content.
+
+```python
+from ads4gpts_langchain import Ads4gptsSuggestedBannerTool
+```
+
+## Toolkit
+
+The `Ads4gptsToolkit` combines these tools for convenient access in LangChain applications.
+
+```python
+from ads4gpts_langchain import Ads4gptsToolkit
+```
+
diff --git a/docs/docs/integrations/providers/agentql.mdx b/docs/docs/integrations/providers/agentql.mdx
new file mode 100644
index 00000000000..20381e1784e
--- /dev/null
+++ b/docs/docs/integrations/providers/agentql.mdx
@@ -0,0 +1,35 @@
+# AgentQL
+
+[AgentQL](https://www.agentql.com/) provides web interaction and structured data extraction from any web page using an [AgentQL query](https://docs.agentql.com/agentql-query) or a Natural Language prompt. AgentQL can be used across multiple languages and web pages without breaking over time and change.
+
+## Installation and Setup
+
+Install the integration package:
+
+```bash
+pip install langchain-agentql
+```
+
+## API Key
+
+Get an API Key from our [Dev Portal](https://dev.agentql.com/) and add it to your environment variables:
+```
+export AGENTQL_API_KEY="your-api-key-here"
+```
+
+## DocumentLoader
+AgentQL's document loader provides structured data extraction from any web page using an AgentQL query.
+
+```python
+from langchain_agentql.document_loaders import AgentQLLoader
+```
+See our [document loader documentation and usage example](/docs/integrations/document_loaders/agentql).
+
+## Tools and Toolkits
+AgentQL tools provides web interaction and structured data extraction from any web page using an AgentQL query or a Natural Language prompt.
+
+```python
+from langchain_agentql.tools import ExtractWebDataTool, ExtractWebDataBrowserTool, GetWebElementBrowserTool
+from langchain_agentql import AgentQLBrowserToolkit
+```
+See our [tools documentation and usage example](/docs/integrations/tools/agentql).
diff --git a/docs/docs/integrations/providers/apify.mdx b/docs/docs/integrations/providers/apify.mdx
index 4e7a8848283..3d30c6d4a98 100644
--- a/docs/docs/integrations/providers/apify.mdx
+++ b/docs/docs/integrations/providers/apify.mdx
@@ -29,7 +29,7 @@ You can use the `ApifyActorsTool` to use Apify Actors with agents.
from langchain_apify import ApifyActorsTool
```
-See [this notebook](/docs/integrations/tools/apify_actors) for example usage.
+See [this notebook](/docs/integrations/tools/apify_actors) for example usage and a full example of a tool-calling agent with LangGraph in the [Apify LangGraph agent Actor template](https://apify.com/templates/python-langgraph).
For more information on how to use this tool, visit [the Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph).
diff --git a/docs/docs/integrations/providers/contextual.ipynb b/docs/docs/integrations/providers/contextual.ipynb
new file mode 100644
index 00000000000..5363a188bc0
--- /dev/null
+++ b/docs/docs/integrations/providers/contextual.ipynb
@@ -0,0 +1,110 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Contextual AI\n",
+ "\n",
+ "Contextual AI is a platform that offers state-of-the-art Retrieval-Augmented Generation (RAG) technology for enterprise applications. Our platformant models helps innovative teams build production-ready AI applications that can process millions of pages of documents with exceptional accuracy.\n",
+ "\n",
+ "## Grounded Language Model (GLM)\n",
+ "\n",
+ "The Grounded Language Model (GLM) is specifically engineered to minimize hallucinations in RAG and agentic applications. The GLM achieves:\n",
+ "\n",
+ "- State-of-the-art performance on the FACTS benchmark\n",
+ "- Responses strictly grounded in provided knowledge sources\n",
+ "\n",
+ "## Using Contextual AI with LangChain\n",
+ "\n",
+ "See details [here](/docs/integrations/chat/contextual).\n",
+ "\n",
+ "This integration allows you to easily incorporate Contextual AI's GLM into your LangChain workflows. Whether you're building applications for regulated industries or security-conscious environments, Contextual AI provides the grounded and reliable responses your use cases demand.\n",
+ "\n",
+ "Get started with a free trial today and experience the most grounded language model for enterprise AI applications."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "y8ku6X96sebl"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "According to the information available, there are two types of cats in the world:\n",
+ "\n",
+ "1. Good cats\n",
+ "2. Best cats\n"
+ ]
+ }
+ ],
+ "source": [
+ "import getpass\n",
+ "import os\n",
+ "\n",
+ "from langchain_contextual import ChatContextual\n",
+ "\n",
+ "# Set credentials\n",
+ "if not os.getenv(\"CONTEXTUAL_AI_API_KEY\"):\n",
+ " os.environ[\"CONTEXTUAL_AI_API_KEY\"] = getpass.getpass(\n",
+ " \"Enter your Contextual API key: \"\n",
+ " )\n",
+ "\n",
+ "# intialize Contextual llm\n",
+ "llm = ChatContextual(\n",
+ " model=\"v1\",\n",
+ " api_key=\"\",\n",
+ ")\n",
+ "# include a system prompt (optional)\n",
+ "system_prompt = \"You are a helpful assistant that uses all of the provided knowledge to answer the user's query to the best of your ability.\"\n",
+ "\n",
+ "# provide your own knowledge from your knowledge-base here in an array of string\n",
+ "knowledge = [\n",
+ " \"There are 2 types of dogs in the world: good dogs and best dogs.\",\n",
+ " \"There are 2 types of cats in the world: good cats and best cats.\",\n",
+ "]\n",
+ "\n",
+ "# create your message\n",
+ "messages = [\n",
+ " (\"human\", \"What type of cats are there in the world and what are the types?\"),\n",
+ "]\n",
+ "\n",
+ "# invoke the GLM by providing the knowledge strings, optional system prompt\n",
+ "# if you want to turn off the GLM's commentary, pass True to the `avoid_commentary` argument\n",
+ "ai_msg = llm.invoke(\n",
+ " messages, knowledge=knowledge, system_prompt=system_prompt, avoid_commentary=True\n",
+ ")\n",
+ "\n",
+ "print(ai_msg.content)"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": ".venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/docs/docs/integrations/providers/opengradient.ipynb b/docs/docs/integrations/providers/opengradient.ipynb
new file mode 100644
index 00000000000..734199d1e65
--- /dev/null
+++ b/docs/docs/integrations/providers/opengradient.ipynb
@@ -0,0 +1,138 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "vscode": {
+ "languageId": "raw"
+ }
+ },
+ "source": [
+ "---\n",
+ "sidebar_label: OpenGradient\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# OpenGradient\n",
+ "[OpenGradient](https://www.opengradient.ai/) is a decentralized AI computing network enabling globally accessible, permissionless, and verifiable ML model inference.\n",
+ "\n",
+ "The OpenGradient langchain package currently offers a toolkit that allows developers to build their own custom ML inference tools for models on the OpenGradient network. This was previously a challenge because of the context-window polluting nature of large model parameters -- imagine having to give your agent a 200x200 array of floating-point data!\n",
+ "\n",
+ "The toolkit solves this problem by encapsulating all data processing logic within the tool definition itself. This approach keeps the agent's context window clean while giving developers complete flexibility to implement custom data processing and live-data retrieval for their ML models."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Installation and Setup\n",
+ "Ensure that you have an OpenGradient API key in order to access the OpenGradient network. If you already have an API key, simply set the environment variable:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "shellscript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "!export OPENGRADIENT_PRIVATE_KEY=\"your-api-key\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you need to set up a new API key, download the opengradient SDK and follow the instructions to initialize a new configuration."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "shellscript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "!pip install opengradient\n",
+ "!opengradient config init"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Once you have set up your API key, install the langchain-opengradient package."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "powershell"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "pip install -U langchain-opengradient"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## OpenGradient Toolkit\n",
+ "The OpenGradientToolkit empowers developers to create specialized tools based on [ML models](https://hub.opengradient.ai/models) and [workflows](https://docs.opengradient.ai/developers/sdk/ml_workflows.html) deployed on the OpenGradient decentralized network. This integration enables LangChain agents to access powerful ML capabilities while maintaining efficient context usage.\n",
+ "\n",
+ "### Key Benefits\n",
+ "* 🔄 Real-time data integration - Process live data feeds within your tools\n",
+ "\n",
+ "* 🎯 Dynamic processing - Custom data pipelines that adapt to specific agent inputs\n",
+ "\n",
+ "* 🧠 Context efficiency - Handle complex ML operations without flooding your context window\n",
+ "\n",
+ "* 🔌 Seamless deployment - Easy integration with models already on the OpenGradient network\n",
+ "\n",
+ "* 🔧 Full customization - Create and deploy your own specific models through the [OpenGradient SDK](https://docs.opengradient.ai/developers/sdk/model_management.html), then build custom tools from them\n",
+ "\n",
+ "* 🔐 Verifiable inference - All inferences run on the decentralized OpenGradient network, allowing users to choose various flavors of security such as ZKML and TEE for trustless, verifiable model execution\n",
+ "\n",
+ "For detailed examples and implementation guides, check out our [comprehensive tutorial](/docs/integrations/tools/opengradient_toolkit.ipynb)."
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/docs/docs/integrations/providers/tableau.mdx b/docs/docs/integrations/providers/tableau.mdx
new file mode 100644
index 00000000000..fc872a5fa12
--- /dev/null
+++ b/docs/docs/integrations/providers/tableau.mdx
@@ -0,0 +1,15 @@
+# Tableau
+
+[Tableau](https://www.tableau.com/) is an analytics platform that enables anyone to
+see and understand data.
+
+
+## Installation and Setup
+
+```bash
+pip install langchain-tableau
+```
+
+## Tools
+
+See detail on available tools [here](/docs/integrations/tools/tableau).
diff --git a/docs/docs/integrations/providers/valthera.mdx b/docs/docs/integrations/providers/valthera.mdx
new file mode 100644
index 00000000000..7a7f56963a0
--- /dev/null
+++ b/docs/docs/integrations/providers/valthera.mdx
@@ -0,0 +1,63 @@
+# Valthera
+
+> [Valthera](https://github.com/valthera/valthera) is an open-source framework that empowers LLM Agents to drive meaningful, context-aware user engagement. It evaluates user motivation and ability in real time, ensuring that notifications and actions are triggered only when users are most receptive.
+>
+> **langchain-valthera** integrates Valthera with LangChain, enabling developers to build smarter, behavior-driven engagement systems that deliver personalized interactions.
+
+## Installation and Setup
+
+### Install langchain-valthera
+
+Install the LangChain Valthera package via pip:
+
+```bash
+pip install -U langchain-valthera
+```
+
+Import the ValtheraTool:
+
+```python
+from langchain_valthera.tools import ValtheraTool
+```
+
+### Example: Initializing the ValtheraTool for LangChain
+
+This example shows how to initialize the ValtheraTool using a `DataAggregator` and configuration for motivation and ability scoring.
+
+```python
+import os
+from langchain_openai import ChatOpenAI
+from valthera.aggregator import DataAggregator
+from mocks import hubspot, posthog, snowflake # Replace these with your actual connector implementations
+from langchain_valthera.tools import ValtheraTool
+
+# Initialize the DataAggregator with your data connectors
+data_aggregator = DataAggregator(
+ connectors={
+ "hubspot": hubspot(),
+ "posthog": posthog(),
+ "app_db": snowflake()
+ }
+)
+
+# Initialize the ValtheraTool with your scoring configurations
+valthera_tool = ValtheraTool(
+ data_aggregator=data_aggregator,
+ motivation_config=[
+ {"key": "hubspot_lead_score", "weight": 0.30, "transform": lambda x: min(x, 100) / 100.0},
+ {"key": "posthog_events_count_past_30days", "weight": 0.30, "transform": lambda x: min(x, 50) / 50.0},
+ {"key": "hubspot_marketing_emails_opened", "weight": 0.20, "transform": lambda x: min(x / 10.0, 1.0)},
+ {"key": "posthog_session_count", "weight": 0.20, "transform": lambda x: min(x / 5.0, 1.0)}
+ ],
+ ability_config=[
+ {"key": "posthog_onboarding_steps_completed", "weight": 0.30, "transform": lambda x: min(x / 5.0, 1.0)},
+ {"key": "posthog_session_count", "weight": 0.30, "transform": lambda x: min(x / 10.0, 1.0)},
+ {"key": "behavior_complexity", "weight": 0.40, "transform": lambda x: 1 - (min(x, 5) / 5.0)}
+ ]
+)
+
+print("✅ ValtheraTool successfully initialized for LangChain integration!")
+```
+
+
+The langchain-valthera integration allows you to assess user behavior and decide on the best course of action for engagement, ensuring that interactions are both timely and relevant within your LangChain applications.
\ No newline at end of file
diff --git a/docs/docs/integrations/tools/ads4gpts.ipynb b/docs/docs/integrations/tools/ads4gpts.ipynb
new file mode 100644
index 00000000000..b04fdd51957
--- /dev/null
+++ b/docs/docs/integrations/tools/ads4gpts.ipynb
@@ -0,0 +1,365 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# ADS4GPTs\n",
+ "\n",
+ "Integrate AI native advertising into your Agentic application.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Overview"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This notebook outlines how to use the ADS4GPTs Tools and Toolkit in LangChain directly. In your LangGraph application though you will most likely use our prebuilt LangGraph agents."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Setup"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Install ADS4GPTs Package\n",
+ "Install the ADS4GPTs package using pip."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Install ADS4GPTs Package\n",
+ "# Install the ADS4GPTs package using pip\n",
+ "!pip install ads4gpts-langchain"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Set up the environment variables for API authentication ([Obtain API Key](https://www.ads4gpts.com))."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Setup Environment Variables\n",
+ "# Prompt the user to enter their ADS4GPTs API key securely\n",
+ "if not os.environ.get(\"ADS4GPTS_API_KEY\"):\n",
+ " os.environ[\"ADS4GPTS_API_KEY\"] = getpass(\"Enter your ADS4GPTS API key: \")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Instantiation"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Import the necessary libraries, including ADS4GPTs tools and toolkit.\n",
+ "\n",
+ "Initialize the ADS4GPTs tools such as Ads4gptsInlineSponsoredResponseTool. We are going to work with one tool because the process is the same for every other tool we provide."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Import Required Libraries\n",
+ "\n",
+ "import os\n",
+ "from getpass import getpass\n",
+ "\n",
+ "from ads4gpts_langchain import Ads4gptsInlineSponsoredResponseTool, Ads4gptsToolkit"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Initialize ADS4GPTs Tools\n",
+ "# Initialize the Ads4gptsInlineSponsoredResponseTool\n",
+ "inline_sponsored_response_tool = Ads4gptsInlineSponsoredResponseTool(\n",
+ " ads4gpts_api_key=os.environ[\"ADS4GPTS_API_KEY\"],\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Toolkit Instantiation\n",
+ "Initialize the Ads4gptsToolkit with the required parameters."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Initialized tool: Ads4gptsInlineSponsoredResponseTool\n",
+ "Initialized tool: Ads4gptsSuggestedPromptTool\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Toolkit Initialization\n",
+ "# Initialize the Ads4gptsToolkit with the required parameters\n",
+ "toolkit = Ads4gptsToolkit(\n",
+ " ads4gpts_api_key=os.environ[\"ADS4GPTS_API_KEY\"],\n",
+ ")\n",
+ "\n",
+ "# Retrieve tools from the toolkit\n",
+ "tools = toolkit.get_tools()\n",
+ "\n",
+ "# Print the initialized tools\n",
+ "for tool in tools:\n",
+ " print(f\"Initialized tool: {tool.__class__.__name__}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Invocation"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Run the ADS4GPTs tools with sample inputs and display the results."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Inline Sponsored Response Result: {'ad_text': '<- Promoted Content ->\\n\\nLearn the sartorial ways and get your handmade tailored suit by the masters themselves with Bespoke Tailors. [Subscribe now](https://youtube.com/@bespoketailorsdubai?si=9iH587ujoWKkueFa)\\n\\n<->'}\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Run ADS4GPTs Tools\n",
+ "# Sample input data for the tools\n",
+ "sample_input = {\n",
+ " \"id\": \"test_id\",\n",
+ " \"user_gender\": \"female\",\n",
+ " \"user_age\": \"25-34\",\n",
+ " \"user_persona\": \"test_persona\",\n",
+ " \"ad_recommendation\": \"test_recommendation\",\n",
+ " \"undesired_ads\": \"test_undesired_ads\",\n",
+ " \"context\": \"test_context\",\n",
+ " \"num_ads\": 1,\n",
+ " \"style\": \"neutral\",\n",
+ "}\n",
+ "\n",
+ "# Run Ads4gptsInlineSponsoredResponseTool\n",
+ "inline_sponsored_response_result = inline_sponsored_response_tool._run(\n",
+ " **sample_input, ad_format=\"INLINE_SPONSORED_RESPONSE\"\n",
+ ")\n",
+ "print(\"Inline Sponsored Response Result:\", inline_sponsored_response_result)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Async Run ADS4GPTs Tools\n",
+ "Run the ADS4GPTs tools asynchronously with sample inputs and display the results."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Async Inline Sponsored Response Result: {'ad_text': '<- Promoted Content ->\\n\\nGet the best tailoring content from Jonathan Farley. Learn to tie 100 knots and more! [Subscribe now](https://www.youtube.com/channel/UCx5hk4LN3p02jcUt3j_cexQ)\\n\\n<->'}\n"
+ ]
+ }
+ ],
+ "source": [
+ "import asyncio\n",
+ "\n",
+ "\n",
+ "# Define an async function to run the tools asynchronously\n",
+ "async def run_ads4gpts_tools_async():\n",
+ " # Run Ads4gptsInlineSponsoredResponseTool asynchronously\n",
+ " inline_sponsored_response_result = await inline_sponsored_response_tool._arun(\n",
+ " **sample_input, ad_format=\"INLINE_SPONSORED_RESPONSE\"\n",
+ " )\n",
+ " print(\"Async Inline Sponsored Response Result:\", inline_sponsored_response_result)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Toolkit Invocation\n",
+ "Use the Ads4gptsToolkit to get and run tools."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Result from Ads4gptsInlineSponsoredResponseTool: {'ad_text': '<- Promoted Content ->\\n\\nLearn the sartorial ways and get your handmade tailored suit by the masters themselves with Bespoke Tailors. [Subscribe now](https://youtube.com/@bespoketailorsdubai?si=9iH587ujoWKkueFa)\\n\\n<->'}\n",
+ "Async result from Ads4gptsInlineSponsoredResponseTool: {'ad_text': '<- Promoted Content ->\\n\\nGet the best tailoring content from Jonathan Farley. Learn to tie 100 knots and more! [Subscribe now](https://www.youtube.com/channel/UCx5hk4LN3p02jcUt3j_cexQ)\\n\\n<->'}\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Sample input data for the tools\n",
+ "sample_input = {\n",
+ " \"id\": \"test_id\",\n",
+ " \"user_gender\": \"female\",\n",
+ " \"user_age\": \"25-34\",\n",
+ " \"user_persona\": \"test_persona\",\n",
+ " \"ad_recommendation\": \"test_recommendation\",\n",
+ " \"undesired_ads\": \"test_undesired_ads\",\n",
+ " \"context\": \"test_context\",\n",
+ " \"num_ads\": 1,\n",
+ " \"style\": \"neutral\",\n",
+ "}\n",
+ "\n",
+ "# Run one tool and print the result\n",
+ "tool = tools[0]\n",
+ "result = tool._run(**sample_input)\n",
+ "print(f\"Result from {tool.__class__.__name__}:\", result)\n",
+ "\n",
+ "\n",
+ "# Define an async function to run the tools asynchronously\n",
+ "async def run_toolkit_tools_async():\n",
+ " result = await tool._arun(**sample_input)\n",
+ " print(f\"Async result from {tool.__class__.__name__}:\", result)\n",
+ "\n",
+ "\n",
+ "# Execute the async function\n",
+ "await run_toolkit_tools_async()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Chaining\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not os.environ.get(\"OPENAI_API_KEY\"):\n",
+ " os.environ[\"OPENAI_API_KEY\"] = getpass(\"Enter your OPENAI_API_KEY API key: \")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Tool call: content='' additional_kwargs={'tool_calls': [{'id': 'call_XLR5UjF8JhylVHvrk9mTjhj8', 'function': {'arguments': '{\"id\":\"unique_user_id_001\",\"user_gender\":\"male\",\"user_age\":\"18-24\",\"ad_recommendation\":\"Stylish and trendy clothing suitable for young men going out with friends.\",\"undesired_ads\":\"formal wear, women\\'s clothing, children\\'s clothing\",\"context\":\"A young man looking for clothing to go out with friends\",\"num_ads\":1,\"style\":\"youthful and trendy\",\"ad_format\":\"INLINE_SPONSORED_RESPONSE\"}', 'name': 'ads4gpts_inline_sponsored_response'}, 'type': 'function'}], 'refusal': None} response_metadata={'token_usage': {'completion_tokens': 106, 'prompt_tokens': 1070, 'total_tokens': 1176, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 1024}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_eb9dce56a8', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-e3e64b4b-4505-4a71-bf02-a8d77bb68eee-0' tool_calls=[{'name': 'ads4gpts_inline_sponsored_response', 'args': {'id': 'unique_user_id_001', 'user_gender': 'male', 'user_age': '18-24', 'ad_recommendation': 'Stylish and trendy clothing suitable for young men going out with friends.', 'undesired_ads': \"formal wear, women's clothing, children's clothing\", 'context': 'A young man looking for clothing to go out with friends', 'num_ads': 1, 'style': 'youthful and trendy', 'ad_format': 'INLINE_SPONSORED_RESPONSE'}, 'id': 'call_XLR5UjF8JhylVHvrk9mTjhj8', 'type': 'tool_call'}] usage_metadata={'input_tokens': 1070, 'output_tokens': 106, 'total_tokens': 1176, 'input_token_details': {'audio': 0, 'cache_read': 1024}, 'output_token_details': {'audio': 0, 'reasoning': 0}}\n"
+ ]
+ }
+ ],
+ "source": [
+ "import os\n",
+ "\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "openai_model = ChatOpenAI(model=\"gpt-4o\", openai_api_key=os.environ[\"OPENAI_API_KEY\"])\n",
+ "model = openai_model.bind_tools(tools)\n",
+ "model_response = model.invoke(\n",
+ " \"Get me an ad for clothing. I am a young man looking to go out with friends.\"\n",
+ ")\n",
+ "print(\"Tool call:\", model_response)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## API reference"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can learn more about ADS4GPTs and the tools at our [GitHub](https://github.com/ADS4GPTs/ads4gpts/tree/main)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ads4gpts-langraph-agent",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/docs/integrations/tools/agentql.ipynb b/docs/docs/integrations/tools/agentql.ipynb
new file mode 100644
index 00000000000..96c3c3797cc
--- /dev/null
+++ b/docs/docs/integrations/tools/agentql.ipynb
@@ -0,0 +1,1077 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "a6f91f20",
+ "metadata": {
+ "id": "a6f91f20"
+ },
+ "source": [
+ "# AgentQL\n",
+ "\n",
+ "[AgentQL](https://www.agentql.com/) tools provides web interaction and structured data extraction from any web page using an [AgentQL query](https://docs.agentql.com/agentql-query) or a Natural Language prompt. AgentQL can be used across multiple languages and web pages without breaking over time and change.\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "AgentQL provides the following three tools:\n",
+ "\n",
+ "- **`ExtractWebDataTool`** extracts structured data as JSON from a web page given a URL using either an [AgentQL query](https://docs.agentql.com/agentql-query/query-intro) or a Natural Language description of the data.\n",
+ "\n",
+ "The following two tools are also bundled as `AgentQLBrowserToolkit` and must be used with a `Playwright` browser or a remote browser instance via Chrome DevTools Protocal (CDP):\n",
+ "\n",
+ "- **`ExtractWebDataBrowserTool`** extracts structured data as JSON from the active web page in a browser using either an [AgentQL query](https://docs.agentql.com/agentql-query/query-intro) or a Natural Language description.\n",
+ "\n",
+ "- **`GetWebElementBrowserTool`** finds a web element on the active web page in a browser using a Natural Language description and returns its CSS selector for further interaction.\n",
+ "\n",
+ "### Integration details\n",
+ "\n",
+ "| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/langchain_agentql) | Package latest |\n",
+ "| :--- | :--- | :---: | :---: | :---: |\n",
+ "| AgentQL | langchain-agentql | ❌ | ❌ | 1.0.0 |\n",
+ "\n",
+ "### Tool features\n",
+ "\n",
+ "| Tool | Web Data Extraction | Web Element Extraction | Use With Local Browser |\n",
+ "| :--- | :---: | :---: | :---: |\n",
+ "| ExtractWebDataTool | ✅ | ❌ | ❌\n",
+ "| ExtractWebDataBrowserTool | ✅ | ❌ | ✅\n",
+ "| GetWebElementBrowserTool | ❌ | ✅ | ✅"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e0ec39b2",
+ "metadata": {},
+ "source": [
+ "## Setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f85b4089",
+ "metadata": {
+ "id": "f85b4089"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install --quiet -U langchain_agentql"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "uQvMedxGWeUV",
+ "metadata": {
+ "id": "uQvMedxGWeUV"
+ },
+ "source": [
+ "To run this notebook, install `Playwright` browser and configure Jupyter Notebook's `asyncio` loop."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "OMaAdTLYWRfL",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "OMaAdTLYWRfL",
+ "outputId": "e1472145-dea3-45b1-b8d1-875772bfdfe1"
+ },
+ "outputs": [],
+ "source": [
+ "!playwright install\n",
+ "\n",
+ "# This import is required only for jupyter notebooks, since they have their own eventloop\n",
+ "import nest_asyncio\n",
+ "\n",
+ "nest_asyncio.apply()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b15e9266",
+ "metadata": {
+ "id": "b15e9266"
+ },
+ "source": [
+ "### Credentials\n",
+ "\n",
+ "To use the AgentQL tools, you will need to get your own API key from the [AgentQL Dev Portal](https://dev.agentql.com/) and set the AgentQL environment variable."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
+ "metadata": {
+ "id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "os.environ[\"AGENTQL_API_KEY\"] = \"YOUR_AGENTQL_API_KEY\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3nk4k9ED8E9Z",
+ "metadata": {
+ "id": "3nk4k9ED8E9Z"
+ },
+ "source": [
+ "## Instantiation"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
+ "metadata": {
+ "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f"
+ },
+ "source": [
+ "### `ExtractWebDataTool`\n",
+ "You can instantiate `ExtractWebDataTool` with the following params:\n",
+ "- `api_key`: Your AgentQL API key from [dev.agentql.com](https://dev.agentql.com). **`Optional`.**\n",
+ "- `timeout`: The number of seconds to wait for a request before timing out. Increase if data extraction times out. **Defaults to `900`.**\n",
+ "- `is_stealth_mode_enabled`: Whether to enable experimental anti-bot evasion strategies. This feature may not work for all websites at all times. Data extraction may take longer to complete with this mode enabled. **Defaults to `False`.**\n",
+ "- `wait_for`: The number of seconds to wait for the page to load before extracting data. **Defaults to `0`.**\n",
+ "- `is_scroll_to_bottom_enabled`: Whether to scroll to bottom of the page before extracting data. **Defaults to `False`.**\n",
+ "- `mode`: `\"standard\"` uses deep data analysis, while `\"fast\"` trades some depth of analysis for speed and is adequate for most usecases. [Learn more about the modes in this guide.](https://docs.agentql.com/accuracy/standard-mode) **Defaults to `\"fast\"`.**\n",
+ "- `is_screenshot_enabled`: Whether to take a screenshot before extracting data. Returned in 'metadata' as a Base64 string. **Defaults to `False`.**\n",
+ "\n",
+ "`ExtractWebDataTool` is implemented with AgentQL's REST API, you can view more details about the parameters in the [API Reference docs](https://docs.agentql.com/rest-api/api-reference)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
+ "metadata": {
+ "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_agentql.tools import ExtractWebDataTool\n",
+ "\n",
+ "extract_web_data_tool = ExtractWebDataTool()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "sSTERzBmCyi3",
+ "metadata": {
+ "id": "sSTERzBmCyi3"
+ },
+ "source": [
+ "### `ExtractWebDataBrowserTool`\n",
+ "\n",
+ "To instantiate **ExtractWebDataBrowserTool**, you need to connect the tool with a browser instance.\n",
+ "\n",
+ "You can set the following params:\n",
+ "- `timeout`: The number of seconds to wait for a request before timing out. Increase if data extraction times out. **Defaults to `900`.**\n",
+ "- `wait_for_network_idle`: Whether to wait until the network reaches a full idle state before executing. **Defaults to `True`.**\n",
+ "- `include_hidden`: Whether to take into account visually hidden elements on the page. **Defaults to `True`.**\n",
+ "- `mode`: `\"standard\"` uses deep data analysis, while `\"fast\"` trades some depth of analysis for speed and is adequate for most usecases. [Learn more about the modes in this guide.](https://docs.agentql.com/accuracy/standard-mode) **Defaults to `\"fast\"`.**\n",
+ "\n",
+ "`ExtractWebDataBrowserTool` is implemented with AgentQL's SDK. You can find more details about the parameters and the functions in AgentQL's [API References](https://docs.agentql.com/python-sdk/api-references/agentql-page#querydata)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "bnhKlXqHE7Z5",
+ "metadata": {
+ "id": "bnhKlXqHE7Z5"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_agentql.tools import ExtractWebDataBrowserTool\n",
+ "from langchain_agentql.utils import create_async_playwright_browser\n",
+ "\n",
+ "async_browser = await create_async_playwright_browser()\n",
+ "\n",
+ "extract_web_data_browser_tool = ExtractWebDataBrowserTool(async_browser=async_browser)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "DJjSQwSaJ4Ml",
+ "metadata": {
+ "id": "DJjSQwSaJ4Ml"
+ },
+ "source": [
+ "### `GetWebElementBrowserTool`\n",
+ "\n",
+ "To instantiate **GetWebElementBrowserTool**, you need to connect the tool with a browser instance.\n",
+ "\n",
+ "You can set the following params:\n",
+ "- `timeout`: The number of seconds to wait for a request before timing out. Increase if data extraction times out. **Defaults to `900`.**\n",
+ "- `wait_for_network_idle`: Whether to wait until the network reaches a full idle state before executing. **Defaults to `True`.**\n",
+ "- `include_hidden`: Whether to take into account visually hidden elements on the page. **Defaults to `False`.**\n",
+ "- `mode`: `\"standard\"` uses deep data analysis, while `\"fast\"` trades some depth of analysis for speed and is adequate for most usecases. [Learn more about the modes in this guide.](https://docs.agentql.com/accuracy/standard-mode) **Defaults to `\"fast\"`.**\n",
+ "\n",
+ "`GetWebElementBrowserTool` is implemented with AgentQL's SDK. You can find more details about the parameters and the functions in AgentQL's [API References](https://docs.agentql.com/python-sdk/api-references/agentql-page#queryelements).`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "503PRMZ1Lay7",
+ "metadata": {
+ "id": "503PRMZ1Lay7"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_agentql.tools import GetWebElementBrowserTool\n",
+ "\n",
+ "extract_web_element_tool = GetWebElementBrowserTool(async_browser=async_browser)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "RVRZn9Dy8Q65",
+ "metadata": {
+ "id": "RVRZn9Dy8Q65"
+ },
+ "source": [
+ "## Invocation"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "74147a1a",
+ "metadata": {
+ "id": "74147a1a"
+ },
+ "source": [
+ "### `ExtractWebDataTool`\n",
+ "\n",
+ "This tool uses AgentQL's REST API under the hood, sending the publically available web page's URL to AgentQL's endpoint. This will not work with private pages or logged in sessions. Use `ExtractWebDataBrowserTool` for those usecases.\n",
+ "\n",
+ "- `url`: The URL of the web page you want to extract data from.\n",
+ "- `query`: The AgentQL query to execute. Use AgentQL query if you want to extract precisely structured data. Learn more about [how to write an AgentQL query in the docs](https://docs.agentql.com/agentql-query) or test one out in the [AgentQL Playground](https://dev.agentql.com/playground).\n",
+ "- `prompt`: A Natural Language description of the data to extract from the page. AgentQL will infer the data’s structure from your prompt. Use `prompt` if you want to extract data defined by free-form language without defining a particular structure. \n",
+ "\n",
+ "**Note:** You must define either a `query` or a `prompt` to use AgentQL."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
+ "outputId": "48996c37-b61e-487f-a618-719f75afc4db"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'data': {'posts': [{'title': 'Launch Week Recap—make the web AI-ready',\n",
+ " 'url': 'https://www.agentql.com/blog/2024-launch-week-recap',\n",
+ " 'date': 'Nov 18, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Accurate data extraction from PDFs and images with AgentQL',\n",
+ " 'url': 'https://www.agentql.com/blog/accurate-data-extraction-pdfs-images',\n",
+ " 'date': 'Feb 1, 2025',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Introducing Scheduled Scraping Workflows',\n",
+ " 'url': 'https://www.agentql.com/blog/scheduling',\n",
+ " 'date': 'Dec 2, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Updates to Our Pricing Model',\n",
+ " 'url': 'https://www.agentql.com/blog/2024-pricing-update',\n",
+ " 'date': 'Nov 19, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Get data from any page: AgentQL’s REST API Endpoint—Launch week day 5',\n",
+ " 'url': 'https://www.agentql.com/blog/data-rest-api',\n",
+ " 'date': 'Nov 15, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'}]},\n",
+ " 'metadata': {'request_id': '0dc1f89c-1b6a-46fe-8089-6cd0f082f094',\n",
+ " 'generated_query': None,\n",
+ " 'screenshot': None}}"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# You can invoke the tool with either a query or a prompt\n",
+ "\n",
+ "# extract_web_data_tool.invoke(\n",
+ "# {\n",
+ "# \"url\": \"https://www.agentql.com/blog\",\n",
+ "# \"prompt\": \"the blog posts with title, url, date of post and author\",\n",
+ "# }\n",
+ "# )\n",
+ "\n",
+ "extract_web_data_tool.invoke(\n",
+ " {\n",
+ " \"url\": \"https://www.agentql.com/blog\",\n",
+ " \"query\": \"{ posts[] { title url date author } }\",\n",
+ " },\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "71zmio-iEEDU",
+ "metadata": {
+ "id": "71zmio-iEEDU"
+ },
+ "source": [
+ "### `ExtractWebDataBrowserTool`\n",
+ "- `query`: The AgentQL query to execute. Use AgentQL query if you want to extract precisely structured data. Learn more about [how to write an AgentQL query in the docs](https://docs.agentql.com/agentql-query) or test one out in the [AgentQL Playground](https://dev.agentql.com/playground).\n",
+ "- `prompt`: A Natural Language description of the data to extract from the page. AgentQL will infer the data’s structure from your prompt. Use `prompt` if you want to extract data defined by free-form language without defining a particular structure. \n",
+ "\n",
+ "**Note:** You must define either a `query` or a `prompt` to use AgentQL.\n",
+ "\n",
+ "To extract data, first you must navigate to a web page using LangChain's [Playwright](https://python.langchain.com/docs/integrations/tools/playwright/) tool."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "Cp9LxO8MaPN1",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "Cp9LxO8MaPN1",
+ "outputId": "08b98158-e451-428f-f2c0-4c7eec9924cd"
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.google.colaboratory.intrinsic+json": {
+ "type": "string"
+ },
+ "text/plain": [
+ "'Navigating to https://www.agentql.com/blog returned status code 200'"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_community.tools.playwright import NavigateTool\n",
+ "\n",
+ "navigate_tool = NavigateTool(async_browser=async_browser)\n",
+ "await navigate_tool.ainvoke({\"url\": \"https://www.agentql.com/blog\"})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "ia5qf2RFEQR9",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "ia5qf2RFEQR9",
+ "outputId": "ee9c1396-82c2-4e94-a79e-730319610033"
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.11/dist-packages/agentql/_core/_utils.py:167: UserWarning: \u001b[31m🚨 The function get_data_by_prompt_experimental is experimental and may not work as expected 🚨\u001b[0m\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "{'blog_posts': [{'title': 'Launch Week Recap—make the web AI-ready',\n",
+ " 'url': 'https://www.agentql.com/blog/2024-launch-week-recap',\n",
+ " 'date': 'Nov 18, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Accurate data extraction from PDFs and images with AgentQL',\n",
+ " 'url': 'https://www.agentql.com/blog/accurate-data-extraction-pdfs-images',\n",
+ " 'date': 'Feb 1, 2025',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Introducing Scheduled Scraping Workflows',\n",
+ " 'url': 'https://www.agentql.com/blog/scheduling',\n",
+ " 'date': 'Dec 2, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Updates to Our Pricing Model',\n",
+ " 'url': 'https://www.agentql.com/blog/2024-pricing-update',\n",
+ " 'date': 'Nov 19, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'},\n",
+ " {'title': 'Get data from any page: AgentQL’s REST API Endpoint—Launch week day 5',\n",
+ " 'url': 'https://www.agentql.com/blog/data-rest-api',\n",
+ " 'date': 'Nov 15, 2024',\n",
+ " 'author': 'Rachel-Lee Nabors'}]}"
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# You can invoke the tool with either a query or a prompt\n",
+ "\n",
+ "# await extract_web_data_browser_tool.ainvoke(\n",
+ "# {'query': '{ blogs[] { title url date author } }'}\n",
+ "# )\n",
+ "\n",
+ "await extract_web_data_browser_tool.ainvoke(\n",
+ " {\"prompt\": \"the blog posts with title, url, date of post and author\"}\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "xg8rKW6jEXEI",
+ "metadata": {
+ "id": "xg8rKW6jEXEI"
+ },
+ "source": [
+ "### `GetWebElementBrowserTool`\n",
+ "- `prompt`: A Natural Language description of the web element to find on the page."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "XgktygByEnas",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "XgktygByEnas",
+ "outputId": "23091977-4b37-415a-97f9-8ed1154de495"
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.google.colaboratory.intrinsic+json": {
+ "type": "string"
+ },
+ "text/plain": [
+ "\"[tf623_id='194']\""
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "selector = await extract_web_element_tool.ainvoke({\"prompt\": \"Next page button\"})\n",
+ "selector"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "O8QJ_NyFfcdh",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "O8QJ_NyFfcdh",
+ "outputId": "ff0033ca-b230-4fc1-b25f-383f7b5e2ca0"
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.google.colaboratory.intrinsic+json": {
+ "type": "string"
+ },
+ "text/plain": [
+ "\"Clicked element '[tf623_id='194']'\""
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_community.tools.playwright import ClickTool\n",
+ "\n",
+ "# Disabling 'visible_only' will allow us to click on elements that are not visible on the page\n",
+ "await ClickTool(async_browser=async_browser, visible_only=False).ainvoke(\n",
+ " {\"selector\": selector}\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "dgsTLAcifoJO",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "dgsTLAcifoJO",
+ "outputId": "376d0bce-3595-4643-c616-764e4f6bb0ed"
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.google.colaboratory.intrinsic+json": {
+ "type": "string"
+ },
+ "text/plain": [
+ "'https://www.agentql.com/blog/page/2'"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_community.tools.playwright import CurrentWebPageTool\n",
+ "\n",
+ "await CurrentWebPageTool(async_browser=async_browser).ainvoke({})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ed9fda2c",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "You can use AgentQL tools in a chain by first binding one to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7fd5dc6f",
+ "metadata": {},
+ "source": [
+ "### Instantiate LLM"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ef351fb1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ac535776",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chat_models import init_chat_model\n",
+ "\n",
+ "llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4727f685",
+ "metadata": {},
+ "source": [
+ "### Execute Tool Chain"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "434b4678",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'messages': [ToolMessage(content='{\"data\": {\"posts\": [{\"title\": \"Launch Week Recap—make the web AI-ready\", \"url\": \"https://www.agentql.com/blog/2024-launch-week-recap\", \"date\": \"Nov 18, 2024\", \"author\": \"Rachel-Lee Nabors\"}, {\"title\": \"Accurate data extraction from PDFs and images with AgentQL\", \"url\": \"https://www.agentql.com/blog/accurate-data-extraction-pdfs-images\", \"date\": \"Feb 1, 2025\", \"author\": \"Rachel-Lee Nabors\"}, {\"title\": \"Introducing Scheduled Scraping Workflows\", \"url\": \"https://www.agentql.com/blog/scheduling\", \"date\": \"Dec 2, 2024\", \"author\": \"Rachel-Lee Nabors\"}, {\"title\": \"Updates to Our Pricing Model\", \"url\": \"https://www.agentql.com/blog/2024-pricing-update\", \"date\": \"Nov 19, 2024\", \"author\": \"Rachel-Lee Nabors\"}, {\"title\": \"Get data from any page: AgentQL’s REST API Endpoint—Launch week day 5\", \"url\": \"https://www.agentql.com/blog/data-rest-api\", \"date\": \"Nov 15, 2024\", \"author\": \"Rachel-Lee Nabors\"}]}, \"metadata\": {\"request_id\": \"1a84ed12-d02a-497d-b09d-21fe49342fa3\", \"generated_query\": null, \"screenshot\": null}}', name='extract_web_data_with_rest_api', tool_call_id='call_z4Rl1MpjJZNcbLlq1OCneoMF')]}"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "from langchain_core.runnables import RunnableConfig, chain\n",
+ "\n",
+ "prompt = ChatPromptTemplate(\n",
+ " [\n",
+ " (\"system\", \"You are a helpful assistant in extracting data from website.\"),\n",
+ " (\"human\", \"{user_input}\"),\n",
+ " (\"placeholder\", \"{messages}\"),\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "# specifying tool_choice will force the model to call this tool.\n",
+ "llm_with_tools = llm.bind_tools(\n",
+ " [extract_web_data_tool], tool_choice=\"extract_web_data_with_rest_api\"\n",
+ ")\n",
+ "\n",
+ "llm_chain = prompt | llm_with_tools\n",
+ "\n",
+ "\n",
+ "@chain\n",
+ "def tool_chain(user_input: str, config: RunnableConfig):\n",
+ " input_ = {\"user_input\": user_input}\n",
+ " ai_msg = llm_chain.invoke(input_, config=config)\n",
+ " tool_msgs = extract_web_data_tool.batch(ai_msg.tool_calls, config=config)\n",
+ " return {\"messages\": tool_msgs}\n",
+ "\n",
+ "\n",
+ "tool_chain.invoke(\n",
+ " \"Extract data from https://www.agentql.com/blog using the following agentql query: { posts[] { title url date author } }\"\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "-SknEjzZXQWg",
+ "metadata": {
+ "id": "-SknEjzZXQWg"
+ },
+ "source": [
+ "## Use within an agent\n",
+ "\n",
+ "You can use AgentQL tools with an AI Agent using the `AgentQLBrowserToolkit` . This toolkit includes `ExtractDataBrowserTool` and `GetWebElementBrowserTool`. Here's an example of agentic browser actions that combine AgentQL's toolkit with the Playwright tools."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "VLzyKpfAmvv7",
+ "metadata": {
+ "id": "VLzyKpfAmvv7"
+ },
+ "source": [
+ "### Instantiate Toolkit\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "UGsFDL1atP3y",
+ "metadata": {
+ "id": "UGsFDL1atP3y"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_agentql.utils import create_async_playwright_browser\n",
+ "\n",
+ "async_agent_browser = await create_async_playwright_browser()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "id": "nCWN9X118rtF",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "nCWN9X118rtF",
+ "outputId": "0ef8160e-f2a3-4ad5-f53d-8cb0f0d71367"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[ExtractWebDataBrowserTool(async_browser= version=133.0.6943.16>),\n",
+ " GetWebElementBrowserTool(async_browser= version=133.0.6943.16>)]"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_agentql import AgentQLBrowserToolkit\n",
+ "\n",
+ "agentql_toolkit = AgentQLBrowserToolkit(async_browser=async_agent_browser)\n",
+ "agentql_toolkit.get_tools()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "id": "rBYb-I6Tp56C",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "rBYb-I6Tp56C",
+ "outputId": "b1ccaa79-425b-4137-cd4d-bffbc32fc395"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[NavigateTool(async_browser= version=133.0.6943.16>),\n",
+ " ClickTool(async_browser= version=133.0.6943.16>, visible_only=False)]"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_community.tools.playwright import ClickTool, NavigateTool\n",
+ "\n",
+ "# we hand pick the following tools to allow more precise agentic browser actions\n",
+ "playwright_toolkit = [\n",
+ " NavigateTool(async_browser=async_agent_browser),\n",
+ " ClickTool(async_browser=async_agent_browser, visible_only=False),\n",
+ "]\n",
+ "playwright_toolkit"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5c_KiBCHqTjv",
+ "metadata": {
+ "id": "5c_KiBCHqTjv"
+ },
+ "source": [
+ "### Use with a ReAct Agent\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "HaAPyYr7quau",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "HaAPyYr7quau",
+ "outputId": "2de2496c-06eb-47db-91d3-f2171caf0640"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install --quiet -U langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "id": "ShcjQ352qRS_",
+ "metadata": {
+ "id": "ShcjQ352qRS_"
+ },
+ "outputs": [],
+ "source": [
+ "from langgraph.prebuilt import create_react_agent\n",
+ "\n",
+ "# You need to set up an llm, please refer to the chaining section\n",
+ "agent_executor = create_react_agent(\n",
+ " llm, agentql_toolkit.get_tools() + playwright_toolkit\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "id": "YEdeprRCq_7E",
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "YEdeprRCq_7E",
+ "outputId": "f834b9f3-802d-49d6-c5c3-06c86d6d82e8"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
+ "\n",
+ "\n",
+ "Navigate to https://news.ycombinator.com/,\n",
+ "extract the news titles on the current page,\n",
+ "show the current page url,\n",
+ "find the button on the webpage that direct to the next page,\n",
+ "click on the button,\n",
+ "show the current page url,\n",
+ "extract the news title on the current page\n",
+ "extract the news titles that mention \"AI\" from the two pages.\n",
+ "\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "Tool Calls:\n",
+ " navigate_browser (call_3eY5a0BRwyYj7kaNpAxkquTD)\n",
+ " Call ID: call_3eY5a0BRwyYj7kaNpAxkquTD\n",
+ " Args:\n",
+ " url: https://news.ycombinator.com/\n",
+ "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
+ "Name: navigate_browser\n",
+ "\n",
+ "Navigating to https://news.ycombinator.com/ returned status code 200\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "Tool Calls:\n",
+ " extract_web_data_from_browser (call_WvRrZKGGo8mq3JewRlaIS5xx)\n",
+ " Call ID: call_WvRrZKGGo8mq3JewRlaIS5xx\n",
+ " Args:\n",
+ " prompt: Extract all the news titles from this page.\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.11/dist-packages/agentql/_core/_utils.py:167: UserWarning: \u001b[31m🚨 The function get_data_by_prompt_experimental is experimental and may not work as expected 🚨\u001b[0m\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
+ "Name: extract_web_data_from_browser\n",
+ "\n",
+ "{\"news_item\": [{\"title\": \"I Went to SQL Injection Court\"}, {\"title\": \"Framework's first desktop is a strange–but unique–mini ITX gaming PC\"}, {\"title\": \"Hyperspace\"}, {\"title\": \"The XB-70 (2019)\"}, {\"title\": \"How core Git developers configure Git\"}, {\"title\": \"Emergent Misalignment: Narrow finetuning can produce broadly misaligned LLMs [pdf]\"}, {\"title\": \"Hard problems that reduce to document ranking\"}, {\"title\": \"Ggwave: Tiny Data-over-Sound Library\"}, {\"title\": \"Bald eagles are thriving again after near extinction\"}, {\"title\": \"Forum with 2.6M posts being deleted due to UK Online Safety Act\"}, {\"title\": \"Launch HN: Browser Use (YC W25) – open-source web agents\"}, {\"title\": \"Part two of Grant Sanderson's video with Terry Tao on the cosmic distance ladder\"}, {\"title\": \"New maps of the chaotic space-time inside black holes\"}, {\"title\": \"Knitting Your Parachute\"}, {\"title\": \"Chicory: A JVM native WebAssembly runtime\"}, {\"title\": \"Low Overhead Allocation Sampling with VMProf in PyPy's GC\"}, {\"title\": \"Sigma BF Camera\"}, {\"title\": \"DeepSearcher: A local open-source Deep Research\"}, {\"title\": \"Xonsh – A Python-powered shell\"}, {\"title\": \"A possible future of Python in the browser\"}, {\"title\": \"Show HN: GoatDB – A lightweight, offline-first, realtime NoDB for Deno and React\"}, {\"title\": \"Embedding Python in Elixir, it's fine\"}, {\"title\": \"The Deep Research problem\"}, {\"title\": \"Why are QR Codes with capital letters smaller than QR codes with lower case?\"}, {\"title\": \"Show HN: My new wiki for Silicon Graphics stuff\"}, {\"title\": \"AI is blurring the line between PMs and engineers?\"}, {\"title\": \"I recreated Shazam's algorithm with Go [video]\"}, {\"title\": \"Dogs may have domesticated themselves because they liked snacks, model suggests\"}, {\"title\": \"Show HN: Txtl – Fast static website of text utilities\"}, {\"title\": \"Have we been wrong about why Mars is red?\"}]}\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "Tool Calls:\n",
+ " get_web_element_from_browser (call_B6jn5ItasceNW7eeb640UhQQ)\n",
+ " Call ID: call_B6jn5ItasceNW7eeb640UhQQ\n",
+ " Args:\n",
+ " prompt: button or link to go to the next page\n",
+ " extract_web_data_from_browser (call_Wyh2VH76bzrlDozp7gpkVBl7)\n",
+ " Call ID: call_Wyh2VH76bzrlDozp7gpkVBl7\n",
+ " Args:\n",
+ " prompt: Extract the current page URL\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.11/dist-packages/agentql/_core/_utils.py:167: UserWarning: \u001b[31m🚨 The function get_data_by_prompt_experimental is experimental and may not work as expected 🚨\u001b[0m\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
+ "Name: extract_web_data_from_browser\n",
+ "\n",
+ "{\"current_page_url\": \"https://news.ycombinator.com/news\"}\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "Tool Calls:\n",
+ " click_element (call_NLGIW1lLutkZ6k0vqkfGbOD7)\n",
+ " Call ID: call_NLGIW1lLutkZ6k0vqkfGbOD7\n",
+ " Args:\n",
+ " selector: [tf623_id='944']\n",
+ "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
+ "Name: click_element\n",
+ "\n",
+ "Clicked element '[tf623_id='944']'\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "Tool Calls:\n",
+ " extract_web_data_from_browser (call_QPt8R2hqiSgytUvLcWUUORKF)\n",
+ " Call ID: call_QPt8R2hqiSgytUvLcWUUORKF\n",
+ " Args:\n",
+ " prompt: Extract the current page URL\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.11/dist-packages/agentql/_core/_utils.py:167: UserWarning: \u001b[31m🚨 The function get_data_by_prompt_experimental is experimental and may not work as expected 🚨\u001b[0m\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
+ "Name: extract_web_data_from_browser\n",
+ "\n",
+ "{\"current_page_url\": \"https://news.ycombinator.com/news?p=2\"}\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "Tool Calls:\n",
+ " extract_web_data_from_browser (call_ZZOPrIfVaVQ1A26j8EGE913W)\n",
+ " Call ID: call_ZZOPrIfVaVQ1A26j8EGE913W\n",
+ " Args:\n",
+ " prompt: Extract all the news titles from this page.\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.11/dist-packages/agentql/_core/_utils.py:167: UserWarning: \u001b[31m🚨 The function get_data_by_prompt_experimental is experimental and may not work as expected 🚨\u001b[0m\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
+ "Name: extract_web_data_from_browser\n",
+ "\n",
+ "{\"news_item\": [{\"title\": \"'Hey Number 17 '\"}, {\"title\": \"Building and operating a pretty big storage system called S3 (2023)\"}, {\"title\": \"Ghost House – software for automatic inbetweens\"}, {\"title\": \"Ask HN: Former devs who can't get a job, what did you end up doing for work?\"}, {\"title\": \"DeepSeek open source DeepEP – library for MoE training and Inference\"}, {\"title\": \"SETI's hard steps and how to resolve them\"}, {\"title\": \"A Defense of Weird Research\"}, {\"title\": \"DigiCert: Threat of legal action to stifle Bugzilla discourse\"}, {\"title\": \"Show HN: Tach – Visualize and untangle your Python codebase\"}, {\"title\": \"Ask HN: A retrofitted C dialect?\"}, {\"title\": \"“The closer to the train station, the worse the kebab” – a “study”\"}, {\"title\": \"Brewing Clean Water: The metal-remediating benefits of tea preparation\"}, {\"title\": \"Invoker Commands (Explainer)\"}, {\"title\": \"Freelancing: How I found clients, part 1\"}, {\"title\": \"Claude 3.7 Sonnet and Claude Code\"}, {\"title\": \"Clean Code vs. A Philosophy Of Software Design\"}, {\"title\": \"Show HN: While the world builds AI Agents, I'm just building calculators\"}, {\"title\": \"History of CAD\"}, {\"title\": \"Fans are better than tech at organizing information online (2019)\"}, {\"title\": \"Some Programming Language Ideas\"}, {\"title\": \"The independent researcher (2018)\"}, {\"title\": \"The best way to use text embeddings portably is with Parquet and Polars\"}, {\"title\": \"Show HN: Prioritize Anything with Stacks\"}, {\"title\": \"Ashby (YC W19) Is Hiring Principal Product Engineers\"}, {\"title\": \"GibberLink [AI-AI Communication]\"}, {\"title\": \"Show HN: I made a site to tell the time in corporate\"}, {\"title\": \"It’s still worth blogging in the age of AI\"}, {\"title\": \"What would happen if we didn't use TCP or UDP?\"}, {\"title\": \"Closing the “green gap”: energy savings from the math of the landscape function\"}, {\"title\": \"Larry Ellison's half-billion-dollar quest to change farming\"}]}\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "\n",
+ "Here's a summary of the actions and results:\n",
+ "\n",
+ "### Page 1\n",
+ "- **URL:** [https://news.ycombinator.com/news](https://news.ycombinator.com/news)\n",
+ "- **News Titles:**\n",
+ " 1. I Went to SQL Injection Court\n",
+ " 2. Framework's first desktop is a strange–but unique–mini ITX gaming PC\n",
+ " 3. Hyperspace\n",
+ " 4. The XB-70 (2019)\n",
+ " 5. How core Git developers configure Git\n",
+ " 6. Emergent Misalignment: Narrow finetuning can produce broadly misaligned LLMs [pdf]\n",
+ " 7. Hard problems that reduce to document ranking\n",
+ " 8. Ggwave: Tiny Data-over-Sound Library\n",
+ " 9. Bald eagles are thriving again after near extinction\n",
+ " 10. Forum with 2.6M posts being deleted due to UK Online Safety Act\n",
+ " 11. Launch HN: Browser Use (YC W25) – open-source web agents\n",
+ " 12. Part two of Grant Sanderson's video with Terry Tao on the cosmic distance ladder\n",
+ " 13. New maps of the chaotic space-time inside black holes\n",
+ " 14. Knitting Your Parachute\n",
+ " 15. Chicory: A JVM native WebAssembly runtime\n",
+ " 16. Low Overhead Allocation Sampling with VMProf in PyPy's GC\n",
+ " 17. Sigma BF Camera\n",
+ " 18. DeepSearcher: A local open-source Deep Research\n",
+ " 19. Xonsh – A Python-powered shell\n",
+ " 20. A possible future of Python in the browser\n",
+ " 21. Show HN: GoatDB – A lightweight, offline-first, realtime NoDB for Deno and React\n",
+ " 22. Embedding Python in Elixir, it's fine\n",
+ " 23. The Deep Research problem\n",
+ " 24. Why are QR Codes with capital letters smaller than QR codes with lower case?\n",
+ " 25. Show HN: My new wiki for Silicon Graphics stuff\n",
+ " 26. **AI is blurring the line between PMs and engineers?**\n",
+ " 27. I recreated Shazam's algorithm with Go [video]\n",
+ " 28. Dogs may have domesticated themselves because they liked snacks, model suggests\n",
+ " 29. Show HN: Txtl – Fast static website of text utilities\n",
+ " 30. Have we been wrong about why Mars is red?\n",
+ "\n",
+ "### Page 2\n",
+ "- **URL:** [https://news.ycombinator.com/news?p=2](https://news.ycombinator.com/news?p=2)\n",
+ "- **News Titles:**\n",
+ " 1. 'Hey Number 17'\n",
+ " 2. Building and operating a pretty big storage system called S3 (2023)\n",
+ " 3. Ghost House – software for automatic inbetweens\n",
+ " 4. Ask HN: Former devs who can't get a job, what did you end up doing for work?\n",
+ " 5. DeepSeek open source DeepEP – library for MoE training and Inference\n",
+ " 6. SETI's hard steps and how to resolve them\n",
+ " 7. A Defense of Weird Research\n",
+ " 8. DigiCert: Threat of legal action to stifle Bugzilla discourse\n",
+ " 9. Show HN: Tach – Visualize and untangle your Python codebase\n",
+ " 10. Ask HN: A retrofitted C dialect?\n",
+ " 11. “The closer to the train station, the worse the kebab” – a “study”\n",
+ " 12. Brewing Clean Water: The metal-remediating benefits of tea preparation\n",
+ " 13. Invoker Commands (Explainer)\n",
+ " 14. Freelancing: How I found clients, part 1\n",
+ " 15. Claude 3.7 Sonnet and Claude Code\n",
+ " 16. Clean Code vs. A Philosophy Of Software Design\n",
+ " 17. **Show HN: While the world builds AI Agents, I'm just building calculators**\n",
+ " 18. History of CAD\n",
+ " 19. Fans are better than tech at organizing information online (2019)\n",
+ " 20. Some Programming Language Ideas\n",
+ " 21. The independent researcher (2018)\n",
+ " 22. The best way to use text embeddings portably is with Parquet and Polars\n",
+ " 23. Show HN: Prioritize Anything with Stacks\n",
+ " 24. Ashby (YC W19) Is Hiring Principal Product Engineers\n",
+ " 25. **GibberLink [AI-AI Communication]**\n",
+ " 26. Show HN: I made a site to tell the time in corporate\n",
+ " 27. **It’s still worth blogging in the age of AI**\n",
+ " 28. What would happen if we didn't use TCP or UDP?\n",
+ " 29. Closing the “green gap”: energy savings from the math of the landscape function\n",
+ " 30. Larry Ellison's half-billion-dollar quest to change farming\n",
+ "\n",
+ "### News Titles Mentioning \"AI\":\n",
+ "1. Page 1: **AI is blurring the line between PMs and engineers?**\n",
+ "2. Page 2:\n",
+ " - **Show HN: While the world builds AI Agents, I'm just building calculators**\n",
+ " - **GibberLink [AI-AI Communication]**\n",
+ " - **It’s still worth blogging in the age of AI**\n"
+ ]
+ }
+ ],
+ "source": [
+ "prompt = \"\"\"\n",
+ "Navigate to https://news.ycombinator.com/,\n",
+ "extract the news titles on the current page,\n",
+ "show the current page url,\n",
+ "find the button on the webpage that direct to the next page,\n",
+ "click on the button,\n",
+ "show the current page url,\n",
+ "extract the news title on the current page\n",
+ "extract the news titles that mention \"AI\" from the two pages.\n",
+ "\"\"\"\n",
+ "\n",
+ "events = agent_executor.astream(\n",
+ " {\"messages\": [(\"user\", prompt)]},\n",
+ " stream_mode=\"values\",\n",
+ ")\n",
+ "async for event in events:\n",
+ " event[\"messages\"][-1].pretty_print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9b3660a4",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "For more information on how to use this integration, please refer to the [git repo](https://github.com/tinyfish-io/agentql-integrations/tree/main/langchain) or the [langchain integration documentation](https://docs.agentql.com/integrations/langchain)"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/tools/apify_actors.ipynb b/docs/docs/integrations/tools/apify_actors.ipynb
index 33b564a6ec2..00905bc6837 100644
--- a/docs/docs/integrations/tools/apify_actors.ipynb
+++ b/docs/docs/integrations/tools/apify_actors.ipynb
@@ -1,256 +1,258 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "_9MNj58sIkGN"
- },
- "source": [
- "# Apify Actor\n",
- "\n",
- "## Overview\n",
- "\n",
- ">[Apify Actors](https://docs.apify.com/platform/actors) are cloud programs designed for a wide range of web scraping, crawling, and data extraction tasks. These actors facilitate automated data gathering from the web, enabling users to extract, process, and store information efficiently. Actors can be used to perform tasks like scraping e-commerce sites for product details, monitoring price changes, or gathering search engine results. They integrate seamlessly with [Apify Datasets](https://docs.apify.com/platform/storage/dataset), allowing the structured data collected by actors to be stored, managed, and exported in formats like JSON, CSV, or Excel for further analysis or use.\n",
- "\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "OHLF9t9v9HCb"
- },
- "source": [
- "## Setup\n",
- "\n",
- "This integration lives in the [langchain-apify](https://pypi.org/project/langchain-apify/) package. The package can be installed using pip.\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "4DdGmBn5IbXz"
- },
- "outputs": [],
- "source": [
- "%pip install langchain-apify"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "rEAwonXqwggR"
- },
- "source": [
- "### Prerequisites\n",
- "\n",
- "- **Apify account**: Register your free Apify account [here](https://console.apify.com/sign-up).\n",
- "- **Apify API token**: Learn how to get your API token in the [Apify documentation](https://docs.apify.com/platform/integrations/api)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "9nJOl4MBMkcR"
- },
- "outputs": [],
- "source": [
- "import os\n",
- "\n",
- "os.environ[\"APIFY_API_TOKEN\"] = \"your-apify-api-token\"\n",
- "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "UfoQxAlCxR9q"
- },
- "source": [
- "## Instantiation"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "qG9KtXtLM8i7"
- },
- "source": [
- "Here we instantiate the `ApifyActorsTool` to be able to call [RAG Web Browser](https://apify.com/apify/rag-web-browser) Apify Actor. This Actor provides web browsing functionality for AI and LLM applications, similar to the web browsing feature in ChatGPT. Any Actor from the [Apify Store](https://apify.com/store) can be used in this way."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 43,
- "metadata": {
- "id": "cyxeTlPnM4Ya"
- },
- "outputs": [],
- "source": [
- "from langchain_apify import ApifyActorsTool\n",
- "\n",
- "tool = ApifyActorsTool(\"apify/rag-web-browser\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "fGDLvDCqyKWO"
- },
- "source": [
- "## Invocation\n",
- "\n",
- "The `ApifyActorsTool` takes a single argument, which is `run_input` - a dictionary that is passed as a run input to the Actor. Run input schema documentation can be found in the input section of the Actor details page. See [RAG Web Browser input schema](https://apify.com/apify/rag-web-browser/input-schema).\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "nTWy6Hx1yk04"
- },
- "outputs": [],
- "source": [
- "tool.invoke({\"run_input\": {\"query\": \"what is apify?\", \"maxResults\": 2}})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "kQsa27hoO58S"
- },
- "source": [
- "## Chaining\n",
- "\n",
- "We can provide the created tool to an [agent](https://python.langchain.com/docs/tutorials/agents/). When asked to search for information, the agent will call the Apify Actor, which will search the web, and then retrieve the search results.\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "YySvLskW72Y8"
- },
- "outputs": [],
- "source": [
- "%pip install langgraph langchain-openai"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 44,
- "metadata": {
- "id": "QEDz07btO5Gi"
- },
- "outputs": [],
- "source": [
- "from langchain_core.messages import ToolMessage\n",
- "from langchain_openai import ChatOpenAI\n",
- "from langgraph.prebuilt import create_react_agent\n",
- "\n",
- "model = ChatOpenAI(model=\"gpt-4o\")\n",
- "tools = [tool]\n",
- "graph = create_react_agent(model, tools=tools)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 45,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "XS1GEyNkQxGu",
- "outputId": "195273d7-034c-425b-f3f9-95c0a9fb0c9e"
- },
- "outputs": [
+ "cells": [
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "================================\u001b[1m Human Message \u001b[0m=================================\n",
- "\n",
- "search for what is Apify\n",
- "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
- "Tool Calls:\n",
- " apify_actor_apify_rag-web-browser (call_27mjHLzDzwa5ZaHWCMH510lm)\n",
- " Call ID: call_27mjHLzDzwa5ZaHWCMH510lm\n",
- " Args:\n",
- " run_input: {\"run_input\":{\"query\":\"Apify\",\"maxResults\":3,\"outputFormats\":[\"markdown\"]}}\n",
- "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
- "\n",
- "Apify is a comprehensive platform for web scraping, browser automation, and data extraction. It offers a wide array of tools and services that cater to developers and businesses looking to extract data from websites efficiently and effectively. Here's an overview of Apify:\n",
- "\n",
- "1. **Ecosystem and Tools**:\n",
- " - Apify provides an ecosystem where developers can build, deploy, and publish data extraction and web automation tools called Actors.\n",
- " - The platform supports various use cases such as extracting data from social media platforms, conducting automated browser-based tasks, and more.\n",
- "\n",
- "2. **Offerings**:\n",
- " - Apify offers over 3,000 ready-made scraping tools and code templates.\n",
- " - Users can also build custom solutions or hire Apify's professional services for more tailored data extraction needs.\n",
- "\n",
- "3. **Technology and Integration**:\n",
- " - The platform supports integration with popular tools and services like Zapier, GitHub, Google Sheets, Pinecone, and more.\n",
- " - Apify supports open-source tools and technologies such as JavaScript, Python, Puppeteer, Playwright, Selenium, and its own Crawlee library for web crawling and browser automation.\n",
- "\n",
- "4. **Community and Learning**:\n",
- " - Apify hosts a community on Discord where developers can get help and share expertise.\n",
- " - It offers educational resources through the Web Scraping Academy to help users become proficient in data scraping and automation.\n",
- "\n",
- "5. **Enterprise Solutions**:\n",
- " - Apify provides enterprise-grade web data extraction solutions with high reliability, 99.95% uptime, and compliance with SOC2, GDPR, and CCPA standards.\n",
- "\n",
- "For more information, you can visit [Apify's official website](https://apify.com/) or their [GitHub page](https://github.com/apify) which contains their code repositories and further details about their projects.\n"
- ]
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "_9MNj58sIkGN"
+ },
+ "source": [
+ "# Apify Actor\n",
+ "\n",
+ ">[Apify Actors](https://docs.apify.com/platform/actors) are cloud programs designed for a wide range of web scraping, crawling, and data extraction tasks. These actors facilitate automated data gathering from the web, enabling users to extract, process, and store information efficiently. Actors can be used to perform tasks like scraping e-commerce sites for product details, monitoring price changes, or gathering search engine results. They integrate seamlessly with [Apify Datasets](https://docs.apify.com/platform/storage/dataset), allowing the structured data collected by actors to be stored, managed, and exported in formats like JSON, CSV, or Excel for further analysis or use.\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "This notebook walks you through using [Apify Actors](https://docs.apify.com/platform/actors) with LangChain to automate web scraping and data extraction. The `langchain-apify` package integrates Apify's cloud-based tools with LangChain agents, enabling efficient data collection and processing for AI applications.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "OHLF9t9v9HCb"
+ },
+ "source": [
+ "## Setup\n",
+ "\n",
+ "This integration lives in the [langchain-apify](https://pypi.org/project/langchain-apify/) package. The package can be installed using pip.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "4DdGmBn5IbXz"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install langchain-apify"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "rEAwonXqwggR"
+ },
+ "source": [
+ "### Prerequisites\n",
+ "\n",
+ "- **Apify account**: Register your free Apify account [here](https://console.apify.com/sign-up).\n",
+ "- **Apify API token**: Learn how to get your API token in the [Apify documentation](https://docs.apify.com/platform/integrations/api)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "9nJOl4MBMkcR"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "os.environ[\"APIFY_API_TOKEN\"] = \"your-apify-api-token\"\n",
+ "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "UfoQxAlCxR9q"
+ },
+ "source": [
+ "## Instantiation"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "qG9KtXtLM8i7"
+ },
+ "source": [
+ "Here we instantiate the `ApifyActorsTool` to be able to call [RAG Web Browser](https://apify.com/apify/rag-web-browser) Apify Actor. This Actor provides web browsing functionality for AI and LLM applications, similar to the web browsing feature in ChatGPT. Any Actor from the [Apify Store](https://apify.com/store) can be used in this way."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "cyxeTlPnM4Ya"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_apify import ApifyActorsTool\n",
+ "\n",
+ "tool = ApifyActorsTool(\"apify/rag-web-browser\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "fGDLvDCqyKWO"
+ },
+ "source": [
+ "## Invocation\n",
+ "\n",
+ "The `ApifyActorsTool` takes a single argument, which is `run_input` - a dictionary that is passed as a run input to the Actor. Run input schema documentation can be found in the input section of the Actor details page. See [RAG Web Browser input schema](https://apify.com/apify/rag-web-browser/input-schema).\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "nTWy6Hx1yk04"
+ },
+ "outputs": [],
+ "source": [
+ "tool.invoke({\"run_input\": {\"query\": \"what is apify?\", \"maxResults\": 2}})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "kQsa27hoO58S"
+ },
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "We can provide the created tool to an [agent](https://python.langchain.com/docs/tutorials/agents/). When asked to search for information, the agent will call the Apify Actor, which will search the web, and then retrieve the search results.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "YySvLskW72Y8"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install langgraph langchain-openai"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "QEDz07btO5Gi"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_core.messages import ToolMessage\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langgraph.prebuilt import create_react_agent\n",
+ "\n",
+ "model = ChatOpenAI(model=\"gpt-4o\")\n",
+ "tools = [tool]\n",
+ "graph = create_react_agent(model, tools=tools)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "XS1GEyNkQxGu",
+ "outputId": "195273d7-034c-425b-f3f9-95c0a9fb0c9e"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
+ "\n",
+ "search for what is Apify\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "Tool Calls:\n",
+ " apify_actor_apify_rag-web-browser (call_27mjHLzDzwa5ZaHWCMH510lm)\n",
+ " Call ID: call_27mjHLzDzwa5ZaHWCMH510lm\n",
+ " Args:\n",
+ " run_input: {\"run_input\":{\"query\":\"Apify\",\"maxResults\":3,\"outputFormats\":[\"markdown\"]}}\n",
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
+ "\n",
+ "Apify is a comprehensive platform for web scraping, browser automation, and data extraction. It offers a wide array of tools and services that cater to developers and businesses looking to extract data from websites efficiently and effectively. Here's an overview of Apify:\n",
+ "\n",
+ "1. **Ecosystem and Tools**:\n",
+ " - Apify provides an ecosystem where developers can build, deploy, and publish data extraction and web automation tools called Actors.\n",
+ " - The platform supports various use cases such as extracting data from social media platforms, conducting automated browser-based tasks, and more.\n",
+ "\n",
+ "2. **Offerings**:\n",
+ " - Apify offers over 3,000 ready-made scraping tools and code templates.\n",
+ " - Users can also build custom solutions or hire Apify's professional services for more tailored data extraction needs.\n",
+ "\n",
+ "3. **Technology and Integration**:\n",
+ " - The platform supports integration with popular tools and services like Zapier, GitHub, Google Sheets, Pinecone, and more.\n",
+ " - Apify supports open-source tools and technologies such as JavaScript, Python, Puppeteer, Playwright, Selenium, and its own Crawlee library for web crawling and browser automation.\n",
+ "\n",
+ "4. **Community and Learning**:\n",
+ " - Apify hosts a community on Discord where developers can get help and share expertise.\n",
+ " - It offers educational resources through the Web Scraping Academy to help users become proficient in data scraping and automation.\n",
+ "\n",
+ "5. **Enterprise Solutions**:\n",
+ " - Apify provides enterprise-grade web data extraction solutions with high reliability, 99.95% uptime, and compliance with SOC2, GDPR, and CCPA standards.\n",
+ "\n",
+ "For more information, you can visit [Apify's official website](https://apify.com/) or their [GitHub page](https://github.com/apify) which contains their code repositories and further details about their projects.\n"
+ ]
+ }
+ ],
+ "source": [
+ "inputs = {\"messages\": [(\"user\", \"search for what is Apify\")]}\n",
+ "for s in graph.stream(inputs, stream_mode=\"values\"):\n",
+ " message = s[\"messages\"][-1]\n",
+ " # skip tool messages\n",
+ " if isinstance(message, ToolMessage):\n",
+ " continue\n",
+ " message.pretty_print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "WYXuQIQx8AvG"
+ },
+ "source": [
+ "## API reference\n",
+ "\n",
+ "For more information on how to use this integration, see the [git repository](https://github.com/apify/langchain-apify) or the [Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "f1NnMik78oib"
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
}
- ],
- "source": [
- "inputs = {\"messages\": [(\"user\", \"search for what is Apify\")]}\n",
- "for s in graph.stream(inputs, stream_mode=\"values\"):\n",
- " message = s[\"messages\"][-1]\n",
- " # skip tool messages\n",
- " if isinstance(message, ToolMessage):\n",
- " continue\n",
- " message.pretty_print()"
- ]
},
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "WYXuQIQx8AvG"
- },
- "source": [
- "## API reference\n",
- "\n",
- "For more information on how to use this integration, see the [git repository](https://github.com/apify/langchain-apify) or the [Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "f1NnMik78oib"
- },
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "colab": {
- "provenance": [],
- "toc_visible": true
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- },
- "language_info": {
- "name": "python"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
diff --git a/docs/docs/integrations/tools/opengradient_toolkit.ipynb b/docs/docs/integrations/tools/opengradient_toolkit.ipynb
new file mode 100644
index 00000000000..bfc81404f26
--- /dev/null
+++ b/docs/docs/integrations/tools/opengradient_toolkit.ipynb
@@ -0,0 +1,345 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "afaf8039",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "sidebar_label: OpenGradient\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e49f1e0d",
+ "metadata": {},
+ "source": [
+ "# OpenGradientToolkit\n",
+ "\n",
+ "This notebook shows how to build tools using the OpenGradient toolkit. This toolkit gives users the ability to create custom tools based on models and workflows on the [OpenGradient network](https://www.opengradient.ai/).\n",
+ "\n",
+ "## Setup\n",
+ "\n",
+ "Ensure that you have an OpenGradient API key in order to access the OpenGradient network. If you already have an API key, simply set the environment variable:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8f7303e1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!export OPENGRADIENT_PRIVATE_KEY=\"your-api-key\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0a7af45e",
+ "metadata": {},
+ "source": [
+ "If you need to set up a new API key, download the opengradient SDK and follow the instructions to initialize a new configuration."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a7777f1e",
+ "metadata": {
+ "vscode": {
+ "languageId": "shellscript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "!pip install opengradient\n",
+ "!opengradient config init"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0730d6a1-c893-4840-9817-5e5251676d5d",
+ "metadata": {},
+ "source": [
+ "### Installation\n",
+ "\n",
+ "This toolkit lives in the `langchain-opengradient` package:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "652d6238-1f87-422a-b135-f5abbb8652fc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain-opengradient"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a38cde65-254d-4219-a441-068766c0d4b5",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "Now we can instantiate our toolkit with the API key from before."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_opengradient import OpenGradientToolkit\n",
+ "\n",
+ "toolkit = OpenGradientToolkit(\n",
+ " # Not required if you have already set the environment variable OPENGRADIENT_PRIVATE_KEY\n",
+ " private_key=\"your-api-key\"\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ad986625",
+ "metadata": {},
+ "source": [
+ "## Build your own tools\n",
+ "The OpenGradientToolkit offers two main methods for creating custom tools:\n",
+ "\n",
+ "### 1. Create a tool to run ML models\n",
+ "You can create tools that leverage ML models deployed on the [OpenGradient model hub](https://hub.opengradient.ai/). User-created models can be uploaded, inferenced, and shared to the model hub through the [OpenGradient SDK](https://docs.opengradient.ai/developers/sdk/model_management.html).\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f7a03746",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import opengradient as og\n",
+ "from pydantic import BaseModel, Field\n",
+ "\n",
+ "\n",
+ "# Example 1: Simple tool with no input schema\n",
+ "def price_data_provider():\n",
+ " \"\"\"Function that provides input data to the model.\"\"\"\n",
+ " return {\n",
+ " \"open_high_low_close\": [\n",
+ " [2535.79, 2535.79, 2505.37, 2515.36],\n",
+ " [2515.37, 2516.37, 2497.27, 2506.94],\n",
+ " [2506.94, 2515, 2506.35, 2508.77],\n",
+ " [2508.77, 2519, 2507.55, 2518.79],\n",
+ " [2518.79, 2522.1, 2513.79, 2517.92],\n",
+ " [2517.92, 2521.4, 2514.65, 2518.13],\n",
+ " [2518.13, 2525.4, 2517.2, 2522.6],\n",
+ " [2522.59, 2528.81, 2519.49, 2526.12],\n",
+ " [2526.12, 2530, 2524.11, 2529.99],\n",
+ " [2529.99, 2530.66, 2525.29, 2526],\n",
+ " ]\n",
+ " }\n",
+ "\n",
+ "\n",
+ "def format_volatility(inference_result):\n",
+ " \"\"\"Function that formats the model output.\"\"\"\n",
+ " return format(float(inference_result.model_output[\"Y\"].item()), \".3%\")\n",
+ "\n",
+ "\n",
+ "# Create the tool\n",
+ "volatility_tool = toolkit.create_run_model_tool(\n",
+ " model_cid=\"QmRhcpDXfYCKsimTmJYrAVM4Bbvck59Zb2onj3MHv9Kw5N\",\n",
+ " tool_name=\"eth_volatility\",\n",
+ " model_input_provider=price_data_provider,\n",
+ " model_output_formatter=format_volatility,\n",
+ " tool_description=\"Generates volatility measurement for ETH/USDT trading pair\",\n",
+ " inference_mode=og.InferenceMode.VANILLA,\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Example 2: Tool with input schema from the agent\n",
+ "class TokenInputSchema(BaseModel):\n",
+ " token: str = Field(description=\"Token name (ethereum or bitcoin)\")\n",
+ "\n",
+ "\n",
+ "def token_data_provider(**inputs):\n",
+ " \"\"\"Dynamic function that changes behavior based on agent input.\"\"\"\n",
+ " token = inputs.get(\"token\")\n",
+ " if token == \"bitcoin\":\n",
+ " return {\"price_series\": [100001.1, 100013.2, 100149.2, 99998.1]}\n",
+ " else: # ethereum\n",
+ " return {\"price_series\": [2010.1, 2012.3, 2020.1, 2019.2]}\n",
+ "\n",
+ "\n",
+ "# Create the tool with schema\n",
+ "token_tool = toolkit.create_run_model_tool(\n",
+ " model_cid=\"QmZdSfHWGJyzBiB2K98egzu3MypPcv4R1ASypUxwZ1MFUG\",\n",
+ " tool_name=\"token_volatility\",\n",
+ " model_input_provider=token_data_provider,\n",
+ " model_output_formatter=lambda x: format(float(x.model_output[\"std\"].item()), \".3%\"),\n",
+ " tool_input_schema=TokenInputSchema,\n",
+ " tool_description=\"Measures return volatility for a specified token\",\n",
+ ")\n",
+ "\n",
+ "# Add tools to the toolkit\n",
+ "toolkit.add_tool(volatility_tool)\n",
+ "toolkit.add_tool(token_tool)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "45627b99",
+ "metadata": {},
+ "source": [
+ "### 2. Create a tool to read workflow results\n",
+ "\n",
+ "Read workflows are scheduled inferences that regularly run models stored on smart-contracts with live oracle data. More information on these can be [found here](https://docs.opengradient.ai/developers/sdk/ml_workflows.html).\n",
+ "\n",
+ "You can create tools that read results from workflow smart contracts:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "197cccbd",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create a tool to read from a workflow\n",
+ "forecast_tool = toolkit.create_read_workflow_tool(\n",
+ " workflow_contract_address=\"0x58826c6dc9A608238d9d57a65bDd50EcaE27FE99\",\n",
+ " tool_name=\"ETH_Price_Forecast\",\n",
+ " tool_description=\"Reads latest forecast for ETH price from deployed workflow\",\n",
+ " output_formatter=lambda x: f\"Price change forecast: {format(float(x.numbers['regression_output'].item()), '.2%')}\",\n",
+ ")\n",
+ "\n",
+ "# Add the tool to the toolkit\n",
+ "toolkit.add_tool(forecast_tool)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5c5f2839-4020-424e-9fc9-07777eede442",
+ "metadata": {},
+ "source": [
+ "## Tools\n",
+ "\n",
+ "Use the built in `get_tools()` method to view a list of the available tools within the OpenGradient toolkit."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tools = toolkit.get_tools()\n",
+ "\n",
+ "# View tools\n",
+ "for tool in tools:\n",
+ " print(tool)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e",
+ "metadata": {},
+ "source": [
+ "## Use within an agent\n",
+ "Here's how to use your OpenGradient tools with a LangChain agent:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_openai import ChatOpenAI\n",
+ "from langgraph.prebuilt import create_react_agent\n",
+ "\n",
+ "# Initialize LLM\n",
+ "llm = ChatOpenAI(model=\"gpt-4o\")\n",
+ "\n",
+ "# Create tools from the toolkit\n",
+ "tools = toolkit.get_tools()\n",
+ "\n",
+ "# Create agent\n",
+ "agent_executor = create_react_agent(llm, tools)\n",
+ "\n",
+ "# Example query for the agent\n",
+ "example_query = \"What's the current volatility of ETH?\"\n",
+ "\n",
+ "# Execute the agent\n",
+ "events = agent_executor.stream(\n",
+ " {\"messages\": [(\"user\", example_query)]},\n",
+ " stream_mode=\"values\",\n",
+ ")\n",
+ "for event in events:\n",
+ " event[\"messages\"][-1].pretty_print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2bb2b716",
+ "metadata": {},
+ "source": [
+ "Here's a sample output of everything put together:\n",
+ "\n",
+ "```\n",
+ "================================ Human Message =================================\n",
+ "\n",
+ "What's the current volatility of ETH?\n",
+ "================================== Ai Message ==================================\n",
+ "Tool Calls:\n",
+ " eth_volatility (chatcmpl-tool-d66ab9ee8f2c40e5a2634d90c7aeb17d)\n",
+ " Call ID: chatcmpl-tool-d66ab9ee8f2c40e5a2634d90c7aeb17d\n",
+ " Args:\n",
+ "================================= Tool Message =================================\n",
+ "Name: eth_volatility\n",
+ "\n",
+ "0.038%\n",
+ "================================== Ai Message ==================================\n",
+ "\n",
+ "The current volatility of the ETH/USDT trading pair is 0.038%.\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "268bc64a",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "See the [Github page](https://github.com/OpenGradient/og-langchain) for more detail."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/tools/tableau.ipynb b/docs/docs/integrations/tools/tableau.ipynb
new file mode 100644
index 00000000000..e82cc80b01d
--- /dev/null
+++ b/docs/docs/integrations/tools/tableau.ipynb
@@ -0,0 +1,315 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "1f302499-eb05-4296-8716-950babc0f10e",
+ "metadata": {},
+ "source": [
+ "# Tableau\n",
+ "\n",
+ "This notebook provides a quick overview for getting started with [Tableau](https://help.tableau.com/current/api/vizql-data-service/en-us/index.html). "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4d57b913-819e-4676-9f6e-3afe0a80030e",
+ "metadata": {},
+ "source": [
+ "### Overview\n",
+ "\n",
+ "Tableau's VizQL Data Service (aka VDS) provides developers with programmatic access to their Tableau Published Data Sources, allowing them to extend their business semantics for any custom workload or application, including AI Agents. The simple_datasource_qa tool adds VDS to the Langchain framework. This notebook shows you how you can use it to build agents that answer analytical questions grounded on your enterprise semantic models. \n",
+ "\n",
+ "Follow the [tableau-langchain](https://github.com/Tab-SE/tableau_langchain) project for more tools coming soon!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "311bce64",
+ "metadata": {},
+ "source": [
+ "#### Setup\n",
+ "Make sure you are running and have access to:\n",
+ "1. python version 3.12.2 or higher\n",
+ "2. A Tableau Cloud or Server environment with at least 1 published data source\n",
+ "\n",
+ "Get started by installing and/or importing the required packages"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9b178e95-ffae-4f04-ad77-1fdc2ab05edf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# %pip install langchain-openai"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8605e87a-2253-4c89-992a-ecdbec955ef6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# %pip install langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c13dca76",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Requirement already satisfied: regex>=2022.1.18 in /Users/joe.constantino/.pyenv/versions/3.12.2/lib/python3.12/site-packages (from tiktoken<1,>=0.7->langchain-openai->langchain-tableau) (2024.11.6)\r\n",
+ "Requirement already satisfied: httpcore==1.* in /Users/joe.constantino/.pyenv/versions/3.12.2/lib/python3.12/site-packages (from httpx>=0.25.2->langgraph-sdk<0.2.0,>=0.1.42->langgraph->langchain-tableau) (1.0.7)\r\n",
+ "Requirement already satisfied: h11<0.15,>=0.13 in /Users/joe.constantino/.pyenv/versions/3.12.2/lib/python3.12/site-packages (from httpcore==1.*->httpx>=0.25.2->langgraph-sdk<0.2.0,>=0.1.42->langgraph->langchain-tableau) (0.14.0)\r\n"
+ ]
+ }
+ ],
+ "source": [
+ "# %pip install langchain-tableau --upgrade"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bbaa05f4",
+ "metadata": {},
+ "source": [
+ "Note you may need to restart your kernal to use updated packages"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "80473fcc",
+ "metadata": {},
+ "source": [
+ "### Credentials\n",
+ "\n",
+ "You can declare your environment variables explicitly, as shown in several cases in this doc. However, if these parameters are not provided, the simple_datasource_qa tool will attempt to automatically read them from environment variables.\n",
+ "\n",
+ "For the Data Source that you choose to query, make sure you've updated the VizqlDataApiAccess permission in Tableau to allow the VDS API to access that Data Source via REST. More info [here](https://help.tableau.com/current/server/en-us/permissions_capabilities.htm#data-sources\n",
+ "). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "310d21b3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# langchain package imports\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "# langchain_tableau and langgraph imports\n",
+ "from langchain_tableau.tools.simple_datasource_qa import initialize_simple_datasource_qa\n",
+ "from langgraph.prebuilt import create_react_agent"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "596d6718-f2e1-44bb-b614-65447862661c",
+ "metadata": {},
+ "source": [
+ "## Authentication Variables\n",
+ "You can declare your environment variables explicitly, as shown in several cases in this cookbook. However, if these parameters are not provided, the simple_datasource_qa tool will attempt to automatically read them from environment variables.\n",
+ "\n",
+ "For the Data Source that you choose, make sure you've updated the VizqlDataApiAccess permission in Tableau to allow the VDS API to access that Data Source via REST. More info [here](https://help.tableau.com/current/server/en-us/permissions_capabilities.htm#data-sources\n",
+ "). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "ccfb4159-34ac-4816-a8f0-795c5442c0b2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "load_dotenv()\n",
+ "\n",
+ "tableau_server = \"https://stage-dataplane2.tableau.sfdc-shbmgi.svc.sfdcfc.net/\" # replace with your Tableau server name\n",
+ "tableau_site = \"vizqldataservicestage02\" # replace with your Tableau site\n",
+ "tableau_jwt_client_id = os.getenv(\n",
+ " \"TABLEAU_JWT_CLIENT_ID\"\n",
+ ") # a JWT client ID (obtained through Tableau's admin UI)\n",
+ "tableau_jwt_secret_id = os.getenv(\n",
+ " \"TABLEAU_JWT_SECRET_ID\"\n",
+ ") # a JWT secret ID (obtained through Tableau's admin UI)\n",
+ "tableau_jwt_secret = os.getenv(\n",
+ " \"TABLEAU_JWT_SECRET\"\n",
+ ") # a JWT secret ID (obtained through Tableau's admin UI)\n",
+ "tableau_api_version = \"3.21\" # the current Tableau REST API Version\n",
+ "tableau_user = \"joe.constantino@salesforce.com\" # replace with the username querying the target Tableau Data Source\n",
+ "\n",
+ "# For this cookbook we are connecting to the Superstore dataset that comes by default with every Tableau server\n",
+ "datasource_luid = (\n",
+ " \"0965e61b-a072-43cf-994c-8c6cf526940d\" # the target data source for this Tool\n",
+ ")\n",
+ "\n",
+ "# Add variables to control LLM models for the Agent and Tools\n",
+ "os.environ[\"OPENAI_API_KEY\"] # set an your model API key as an environment variable\n",
+ "tooling_llm_model = \"gpt-4o\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "64d08107",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "The initialize_simple_datasource_qa initializes the Langgraph tool called [simple_datasource_qa](https://github.com/Tab-SE/tableau_langchain/blob/3ff9047414479cd55d797c18a78f834d57860761/pip_package/langchain_tableau/tools/simple_datasource_qa.py#L101), which can be used for analytical questions and answers on a Tableau Data Source.\n",
+ "\n",
+ "This initializer function:\n",
+ "1. Authenticates to Tableau using Tableau's connected-app framework for JWT-based authentication. All the required variables must be defined at runtime or as environment variables.\n",
+ "2. Asynchronously queries for the field metadata of the target datasource specified in the datasource_luid variable.\n",
+ "3. Grounds on the metadata of the target datasource to transform natural language questions into the json-formatted query payload required to make VDS query-datasource requests.\n",
+ "4. Executes a POST request to VDS.\n",
+ "5. Formats and returns the results in a structured response."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "72ee3eca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Initalize simple_datasource_qa for querying Tableau Datasources through VDS\n",
+ "analyze_datasource = initialize_simple_datasource_qa(\n",
+ " domain=tableau_server,\n",
+ " site=tableau_site,\n",
+ " jwt_client_id=tableau_jwt_client_id,\n",
+ " jwt_secret_id=tableau_jwt_secret_id,\n",
+ " jwt_secret=tableau_jwt_secret,\n",
+ " tableau_api_version=tableau_api_version,\n",
+ " tableau_user=tableau_user,\n",
+ " datasource_luid=datasource_luid,\n",
+ " tooling_llm_model=tooling_llm_model,\n",
+ ")\n",
+ "\n",
+ "# load the List of Tools to be used by the Agent. In this case we will just load our data source Q&A tool.\n",
+ "tools = [analyze_datasource]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0ac5daa0-4336-48d0-9c26-20bf2c252bad",
+ "metadata": {},
+ "source": [
+ "## Invocation - Langgraph Example\n",
+ "First, we'll initlialize the LLM of our choice. Then, we define an agent using a langgraph agent constructor class and invoke it with a query related to the target data source. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "06a1d3f7-79a8-452e-b37e-9070d15445b0",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/markdown": [
+ "Here are the results for the states with the highest sales and profits based on the data queried:\n",
+ "\n",
+ "### States with the Most Sales\n",
+ "1. **California**: $457,687.63\n",
+ "2. **New York**: $310,876.27\n",
+ "3. **Texas**: $170,188.05\n",
+ "4. **Washington**: $138,641.27\n",
+ "5. **Pennsylvania**: $116,511.91\n",
+ "\n",
+ "### States with the Most Profit\n",
+ "1. **California**: $76,381.39\n",
+ "2. **New York**: $74,038.55\n",
+ "3. **Washington**: $33,402.65\n",
+ "4. **Michigan**: $24,463.19\n",
+ "5. **Virginia**: $18,597.95\n",
+ "\n",
+ "### Comparison\n",
+ "- **California** and **New York** are the only states that appear in both lists, indicating they are the top sellers and also generate the most profit.\n",
+ "- **Texas**, while having the third highest sales, does not rank in the top five for profit, showing a potential issue with profitability despite high sales.\n",
+ "\n",
+ "This analysis suggests that high sales do not always correlate with high profits, as seen with Texas."
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "from IPython.display import Markdown, display\n",
+ "\n",
+ "model = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
+ "\n",
+ "tableauAgent = create_react_agent(model, tools)\n",
+ "\n",
+ "# Run the agent\n",
+ "messages = tableauAgent.invoke(\n",
+ " {\n",
+ " \"messages\": [\n",
+ " (\n",
+ " \"human\",\n",
+ " \"which states sell the most? Are those the same states with the most profits?\",\n",
+ " )\n",
+ " ]\n",
+ " }\n",
+ ")\n",
+ "messages\n",
+ "# display(Markdown(messages['messages'][4].content)) #display a nicely formatted answer for successful generations"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e6b20093",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "TODO."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "12ab3d7b",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "TODO."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python (package_test_env)",
+ "language": "python",
+ "name": "package_test_env"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/tools/valthera.ipynb b/docs/docs/integrations/tools/valthera.ipynb
new file mode 100644
index 00000000000..eb15fe1fd08
--- /dev/null
+++ b/docs/docs/integrations/tools/valthera.ipynb
@@ -0,0 +1,439 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "sidebar_label: Valthera\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Valthera\n",
+ "\n",
+ "Enable AI agents to engage users when they're most likely to respond.\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "Valthera is an open-source framework that enables LLM Agents to engage users in a more meaningful way. It is built on BJ Fogg's Behavior Model (B=MAT) and leverages data from multiple sources (such as HubSpot, PostHog, and Snowflake) to assess a user's **motivation** and **ability** before triggering an action.\n",
+ "\n",
+ "In this guide, you'll learn:\n",
+ "\n",
+ "- **Core Concepts:** Overview of the components (Data Aggregator, Scorer, Reasoning Engine, and Trigger Generator).\n",
+ "- **System Architecture:** How data flows through the system and how decisions are made.\n",
+ "- **Customization:** How to extend connectors, scoring metrics, and decision rules to fit your needs.\n",
+ "\n",
+ "Let's dive in!\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Setup\n",
+ "\n",
+ "This section covers installation of dependencies and setting up custom data connectors for Valthera."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "shellscript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "pip install openai langchain langchain_openai valthera langchain_valthera langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from typing import Any, Dict, List\n",
+ "\n",
+ "from valthera.connectors.base import BaseConnector\n",
+ "\n",
+ "\n",
+ "class MockHubSpotConnector(BaseConnector):\n",
+ " \"\"\"\n",
+ " Simulates data retrieval from HubSpot. Provides information such as lead score,\n",
+ " lifecycle stage, and marketing metrics.\n",
+ " \"\"\"\n",
+ "\n",
+ " def get_user_data(self, user_id: str) -> Dict[str, Any]:\n",
+ " \"\"\"\n",
+ " Retrieve mock HubSpot data for a given user.\n",
+ "\n",
+ " Args:\n",
+ " user_id: The unique identifier for the user\n",
+ "\n",
+ " Returns:\n",
+ " A dictionary containing HubSpot user data\n",
+ " \"\"\"\n",
+ " return {\n",
+ " \"hubspot_contact_id\": \"999-ZZZ\",\n",
+ " \"lifecycle_stage\": \"opportunity\",\n",
+ " \"lead_status\": \"engaged\",\n",
+ " \"hubspot_lead_score\": 100,\n",
+ " \"company_name\": \"MaxMotivation Corp.\",\n",
+ " \"last_contacted_date\": \"2023-09-20\",\n",
+ " \"hubspot_marketing_emails_opened\": 20,\n",
+ " \"marketing_emails_clicked\": 10,\n",
+ " }\n",
+ "\n",
+ "\n",
+ "class MockPostHogConnector(BaseConnector):\n",
+ " \"\"\"\n",
+ " Simulates data retrieval from PostHog. Provides session data and engagement events.\n",
+ " \"\"\"\n",
+ "\n",
+ " def get_user_data(self, user_id: str) -> Dict[str, Any]:\n",
+ " \"\"\"\n",
+ " Retrieve mock PostHog data for a given user.\n",
+ "\n",
+ " Args:\n",
+ " user_id: The unique identifier for the user\n",
+ "\n",
+ " Returns:\n",
+ " A dictionary containing PostHog user data\n",
+ " \"\"\"\n",
+ " return {\n",
+ " \"distinct_ids\": [user_id, f\"email_{user_id}\"],\n",
+ " \"last_event_timestamp\": \"2023-09-20T12:34:56Z\",\n",
+ " \"feature_flags\": [\"beta_dashboard\", \"early_access\"],\n",
+ " \"posthog_session_count\": 30,\n",
+ " \"avg_session_duration_sec\": 400,\n",
+ " \"recent_event_types\": [\"pageview\", \"button_click\", \"premium_feature_used\"],\n",
+ " \"posthog_events_count_past_30days\": 80,\n",
+ " \"posthog_onboarding_steps_completed\": 5,\n",
+ " }\n",
+ "\n",
+ "\n",
+ "class MockSnowflakeConnector(BaseConnector):\n",
+ " \"\"\"\n",
+ " Simulates retrieval of additional user profile data from Snowflake.\n",
+ " \"\"\"\n",
+ "\n",
+ " def get_user_data(self, user_id: str) -> Dict[str, Any]:\n",
+ " \"\"\"\n",
+ " Retrieve mock Snowflake data for a given user.\n",
+ "\n",
+ " Args:\n",
+ " user_id: The unique identifier for the user\n",
+ "\n",
+ " Returns:\n",
+ " A dictionary containing Snowflake user data\n",
+ " \"\"\"\n",
+ " return {\n",
+ " \"user_id\": user_id,\n",
+ " \"email\": f\"{user_id}@example.com\",\n",
+ " \"subscription_status\": \"paid\",\n",
+ " \"plan_tier\": \"premium\",\n",
+ " \"account_creation_date\": \"2023-01-01\",\n",
+ " \"preferred_language\": \"en\",\n",
+ " \"last_login_datetime\": \"2023-09-20T12:00:00Z\",\n",
+ " \"behavior_complexity\": 3,\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "In this section, we instantiate the core components. First, we create a Data Aggregator to combine data from the custom connectors. Then, we configure the scoring metrics for motivation and ability."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from valthera.aggregator import DataAggregator\n",
+ "\n",
+ "# Constants for configuration\n",
+ "LEAD_SCORE_MAX = 100\n",
+ "EVENTS_COUNT_MAX = 50\n",
+ "EMAILS_OPENED_FACTOR = 10.0\n",
+ "SESSION_COUNT_FACTOR_1 = 5.0\n",
+ "ONBOARDING_STEPS_FACTOR = 5.0\n",
+ "SESSION_COUNT_FACTOR_2 = 10.0\n",
+ "BEHAVIOR_COMPLEXITY_MAX = 5.0\n",
+ "\n",
+ "# Initialize data aggregator\n",
+ "data_aggregator = DataAggregator(\n",
+ " connectors={\n",
+ " \"hubspot\": MockHubSpotConnector(),\n",
+ " \"posthog\": MockPostHogConnector(),\n",
+ " \"snowflake\": MockSnowflakeConnector(),\n",
+ " }\n",
+ ")\n",
+ "\n",
+ "# You can now fetch unified user data by calling data_aggregator.get_user_context(user_id)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from typing import Callable, Union\n",
+ "\n",
+ "from valthera.scorer import ValtheraScorer\n",
+ "\n",
+ "\n",
+ "# Define transform functions with proper type annotations\n",
+ "def transform_lead_score(x: Union[int, float]) -> float:\n",
+ " \"\"\"Transform lead score to a value between 0 and 1.\"\"\"\n",
+ " return min(x, LEAD_SCORE_MAX) / LEAD_SCORE_MAX\n",
+ "\n",
+ "\n",
+ "def transform_events_count(x: Union[int, float]) -> float:\n",
+ " \"\"\"Transform events count to a value between 0 and 1.\"\"\"\n",
+ " return min(x, EVENTS_COUNT_MAX) / EVENTS_COUNT_MAX\n",
+ "\n",
+ "\n",
+ "def transform_emails_opened(x: Union[int, float]) -> float:\n",
+ " \"\"\"Transform emails opened to a value between 0 and 1.\"\"\"\n",
+ " return min(x / EMAILS_OPENED_FACTOR, 1.0)\n",
+ "\n",
+ "\n",
+ "def transform_session_count_1(x: Union[int, float]) -> float:\n",
+ " \"\"\"Transform session count for motivation to a value between 0 and 1.\"\"\"\n",
+ " return min(x / SESSION_COUNT_FACTOR_1, 1.0)\n",
+ "\n",
+ "\n",
+ "def transform_onboarding_steps(x: Union[int, float]) -> float:\n",
+ " \"\"\"Transform onboarding steps to a value between 0 and 1.\"\"\"\n",
+ " return min(x / ONBOARDING_STEPS_FACTOR, 1.0)\n",
+ "\n",
+ "\n",
+ "def transform_session_count_2(x: Union[int, float]) -> float:\n",
+ " \"\"\"Transform session count for ability to a value between 0 and 1.\"\"\"\n",
+ " return min(x / SESSION_COUNT_FACTOR_2, 1.0)\n",
+ "\n",
+ "\n",
+ "def transform_behavior_complexity(x: Union[int, float]) -> float:\n",
+ " \"\"\"Transform behavior complexity to a value between 0 and 1.\"\"\"\n",
+ " return 1 - (min(x, BEHAVIOR_COMPLEXITY_MAX) / BEHAVIOR_COMPLEXITY_MAX)\n",
+ "\n",
+ "\n",
+ "# Scoring configuration for user motivation\n",
+ "motivation_config = [\n",
+ " {\"key\": \"hubspot_lead_score\", \"weight\": 0.30, \"transform\": transform_lead_score},\n",
+ " {\n",
+ " \"key\": \"posthog_events_count_past_30days\",\n",
+ " \"weight\": 0.30,\n",
+ " \"transform\": transform_events_count,\n",
+ " },\n",
+ " {\n",
+ " \"key\": \"hubspot_marketing_emails_opened\",\n",
+ " \"weight\": 0.20,\n",
+ " \"transform\": transform_emails_opened,\n",
+ " },\n",
+ " {\n",
+ " \"key\": \"posthog_session_count\",\n",
+ " \"weight\": 0.20,\n",
+ " \"transform\": transform_session_count_1,\n",
+ " },\n",
+ "]\n",
+ "\n",
+ "# Scoring configuration for user ability\n",
+ "ability_config = [\n",
+ " {\n",
+ " \"key\": \"posthog_onboarding_steps_completed\",\n",
+ " \"weight\": 0.30,\n",
+ " \"transform\": transform_onboarding_steps,\n",
+ " },\n",
+ " {\n",
+ " \"key\": \"posthog_session_count\",\n",
+ " \"weight\": 0.30,\n",
+ " \"transform\": transform_session_count_2,\n",
+ " },\n",
+ " {\n",
+ " \"key\": \"behavior_complexity\",\n",
+ " \"weight\": 0.40,\n",
+ " \"transform\": transform_behavior_complexity,\n",
+ " },\n",
+ "]\n",
+ "\n",
+ "# Instantiate the scorer\n",
+ "scorer = ValtheraScorer(motivation_config, ability_config)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Invocation\n",
+ "\n",
+ "Next, we set up the Reasoning Engine and Trigger Generator, then bring all components together by instantiating the Valthera Tool. Finally, we execute the agent workflow to process an input message."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from valthera.reasoning_engine import ReasoningEngine\n",
+ "\n",
+ "# Define threshold as constant\n",
+ "SCORE_THRESHOLD = 0.75\n",
+ "\n",
+ "\n",
+ "# Function to safely get API key\n",
+ "def get_openai_api_key() -> str:\n",
+ " \"\"\"Get OpenAI API key with error handling.\"\"\"\n",
+ " api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
+ " if not api_key:\n",
+ " raise ValueError(\"OPENAI_API_KEY not found in environment variables\")\n",
+ " return api_key\n",
+ "\n",
+ "\n",
+ "# Decision rules using constant\n",
+ "decision_rules = [\n",
+ " {\n",
+ " \"condition\": f\"motivation >= {SCORE_THRESHOLD} and ability >= {SCORE_THRESHOLD}\",\n",
+ " \"action\": \"trigger\",\n",
+ " \"description\": \"Both scores are high enough.\",\n",
+ " },\n",
+ " {\n",
+ " \"condition\": f\"motivation < {SCORE_THRESHOLD}\",\n",
+ " \"action\": \"improve_motivation\",\n",
+ " \"description\": \"User motivation is low.\",\n",
+ " },\n",
+ " {\n",
+ " \"condition\": f\"ability < {SCORE_THRESHOLD}\",\n",
+ " \"action\": \"improve_ability\",\n",
+ " \"description\": \"User ability is low.\",\n",
+ " },\n",
+ " {\n",
+ " \"condition\": \"otherwise\",\n",
+ " \"action\": \"defer\",\n",
+ " \"description\": \"No action needed at this time.\",\n",
+ " },\n",
+ "]\n",
+ "\n",
+ "try:\n",
+ " api_key = get_openai_api_key()\n",
+ " reasoning_engine = ReasoningEngine(\n",
+ " llm=ChatOpenAI(\n",
+ " model_name=\"gpt-4-turbo\", temperature=0.0, openai_api_key=api_key\n",
+ " ),\n",
+ " decision_rules=decision_rules,\n",
+ " )\n",
+ "except ValueError as e:\n",
+ " print(f\"Error initializing reasoning engine: {e}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from valthera.trigger_generator import TriggerGenerator\n",
+ "\n",
+ "try:\n",
+ " api_key = get_openai_api_key() # Reuse the function for consistency\n",
+ " trigger_generator = TriggerGenerator(\n",
+ " llm=ChatOpenAI(\n",
+ " model_name=\"gpt-4-turbo\", temperature=0.7, openai_api_key=api_key\n",
+ " )\n",
+ " )\n",
+ "except ValueError as e:\n",
+ " print(f\"Error initializing trigger generator: {e}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_valthera.tools import ValtheraTool\n",
+ "from langgraph.prebuilt import create_react_agent\n",
+ "\n",
+ "try:\n",
+ " api_key = get_openai_api_key()\n",
+ "\n",
+ " # Initialize Valthera tool\n",
+ " valthera_tool = ValtheraTool(\n",
+ " data_aggregator=data_aggregator,\n",
+ " motivation_config=motivation_config,\n",
+ " ability_config=ability_config,\n",
+ " reasoning_engine=reasoning_engine,\n",
+ " trigger_generator=trigger_generator,\n",
+ " )\n",
+ "\n",
+ " # Create agent with LLM\n",
+ " llm = ChatOpenAI(model_name=\"gpt-4-turbo\", temperature=0.0, openai_api_key=api_key)\n",
+ " tools = [valthera_tool]\n",
+ " graph = create_react_agent(llm, tools=tools)\n",
+ "\n",
+ " # Define input message for testing\n",
+ " inputs = {\n",
+ " \"messages\": [(\"user\", \"Evaluate behavior for user_12345: Finish Onboarding\")]\n",
+ " }\n",
+ "\n",
+ " # Process the input and display responses\n",
+ " print(\"Running Valthera agent workflow...\")\n",
+ " for response in graph.stream(inputs, stream_mode=\"values\"):\n",
+ " print(response)\n",
+ "\n",
+ "except Exception as e:\n",
+ " print(f\"Error running Valthera workflow: {e}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "This integration does not currently support chaining operations. Future releases may include chaining support."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "Below is an overview of the key APIs provided by the Valthera integration:\n",
+ "\n",
+ "- **Data Aggregator:** Use `data_aggregator.get_user_context(user_id)` to fetch aggregated user data.\n",
+ "- **Scorer:** The `ValtheraScorer` computes motivation and ability scores based on the provided configurations.\n",
+ "- **Reasoning Engine:** The `ReasoningEngine` evaluates decision rules to determine the appropriate action (trigger, improve motivation, improve ability, or defer).\n",
+ "- **Trigger Generator:** Generates personalized trigger messages using the LLM.\n",
+ "- **Valthera Tool:** Integrates all the components to process inputs and execute the agent workflow.\n",
+ "\n",
+ "For detailed usage, refer to the inline documentation in the source code."
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ },
+ "title": "Valthera Developer Guide"
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/scripts/check_templates.py b/docs/scripts/check_templates.py
index 4a029009552..40955f13474 100644
--- a/docs/scripts/check_templates.py
+++ b/docs/scripts/check_templates.py
@@ -59,7 +59,10 @@ def check_header_order(path: Path) -> None:
if doc_dir not in INFO_BY_DIR:
# Skip if not a directory we care about
return
- headers = _get_headers(doc_dir)
+ if "toolkit" in path.name:
+ headers = _get_headers("toolkits")
+ else:
+ headers = _get_headers(doc_dir)
issue_number = INFO_BY_DIR[doc_dir].get("issue_number", "nonexistent")
print(f"Checking {doc_dir} page {path}")
diff --git a/docs/scripts/tool_feat_table.py b/docs/scripts/tool_feat_table.py
index c92d2344a93..5e5a3498413 100644
--- a/docs/scripts/tool_feat_table.py
+++ b/docs/scripts/tool_feat_table.py
@@ -147,6 +147,11 @@ WEBBROWSING_TOOL_FEAT_TABLE = {
"interactions": True,
"pricing": "40 free requests/day",
},
+ "AgentQL Toolkit": {
+ "link": "/docs/integrations/tools/agentql",
+ "interactions": True,
+ "pricing": "Free trial, with pay-as-you-go and flat rate plans after",
+ },
}
DATABASE_TOOL_FEAT_TABLE = {
diff --git a/docs/src/theme/FeatureTables.js b/docs/src/theme/FeatureTables.js
index 73e9bfaa090..2703760561c 100644
--- a/docs/src/theme/FeatureTables.js
+++ b/docs/src/theme/FeatureTables.js
@@ -819,6 +819,13 @@ const FEATURE_TABLES = {
source: "Platform for running and scaling headless browsers, can be used to scrape/crawl any site",
api: "API",
apiLink: "https://python.langchain.com/docs/integrations/document_loaders/hyperbrowser/"
+ },
+ {
+ name: "AgentQL",
+ link: "agentql",
+ source: "Web interaction and structured data extraction from any web page using an AgentQL query or a Natural Language prompt",
+ api: "API",
+ apiLink: "https://python.langchain.com/docs/integrations/document_loaders/agentql/"
}
]
},
diff --git a/docs/static/img/logo-dark.svg b/docs/static/img/logo-dark.svg
new file mode 100644
index 00000000000..81fad4638e6
--- /dev/null
+++ b/docs/static/img/logo-dark.svg
@@ -0,0 +1,25 @@
+
+
\ No newline at end of file
diff --git a/docs/static/img/logo-light.svg b/docs/static/img/logo-light.svg
new file mode 100644
index 00000000000..6ee90746271
--- /dev/null
+++ b/docs/static/img/logo-light.svg
@@ -0,0 +1,25 @@
+
+
\ No newline at end of file
diff --git a/libs/cli/Makefile b/libs/cli/Makefile
index a50ea28e6c8..b62dc1ab703 100644
--- a/libs/cli/Makefile
+++ b/libs/cli/Makefile
@@ -48,4 +48,5 @@ _e2e_test:
poetry run pip install -e ../../../standard-tests && \
make format lint tests && \
poetry install --with test_integration && \
+ poetry run pip install -e ../../../core && \
make integration_test
diff --git a/libs/cli/langchain_cli/integration_template/docs/tools.ipynb b/libs/cli/langchain_cli/integration_template/docs/tools.ipynb
index 7ca2ac5378d..f62e83fc6d8 100644
--- a/libs/cli/langchain_cli/integration_template/docs/tools.ipynb
+++ b/libs/cli/langchain_cli/integration_template/docs/tools.ipynb
@@ -181,11 +181,11 @@
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
"metadata": {},
"source": [
- "## Chaining\n",
+ "## Use within an agent\n",
"\n",
"- TODO: Add user question and run cells\n",
"\n",
- "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n",
+ "We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
"\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
@@ -208,6 +208,19 @@
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bea35fa1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langgraph.prebuilt import create_react_agent\n",
+ "\n",
+ "tools = [tool]\n",
+ "agent = create_react_agent(llm, tools)"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -215,32 +228,14 @@
"metadata": {},
"outputs": [],
"source": [
- "from langchain_core.prompts import ChatPromptTemplate\n",
- "from langchain_core.runnables import RunnableConfig, chain\n",
+ "example_query = \"...\"\n",
"\n",
- "prompt = ChatPromptTemplate(\n",
- " [\n",
- " (\"system\", \"You are a helpful assistant.\"),\n",
- " (\"human\", \"{user_input}\"),\n",
- " (\"placeholder\", \"{messages}\"),\n",
- " ]\n",
+ "events = agent.stream(\n",
+ " {\"messages\": [(\"user\", example_query)]},\n",
+ " stream_mode=\"values\",\n",
")\n",
- "\n",
- "# specifying tool_choice will force the model to call this tool.\n",
- "llm_with_tools = llm.bind_tools([tool], tool_choice=tool.name)\n",
- "\n",
- "llm_chain = prompt | llm_with_tools\n",
- "\n",
- "\n",
- "@chain\n",
- "def tool_chain(user_input: str, config: RunnableConfig):\n",
- " input_ = {\"user_input\": user_input}\n",
- " ai_msg = llm_chain.invoke(input_, config=config)\n",
- " tool_msgs = tool.batch(ai_msg.tool_calls, config=config)\n",
- " return llm_chain.invoke({**input_, \"messages\": [ai_msg, *tool_msgs]}, config=config)\n",
- "\n",
- "\n",
- "tool_chain.invoke(\"...\")"
+ "for event in events:\n",
+ " event[\"messages\"][-1].pretty_print()"
]
},
{
diff --git a/libs/cli/pyproject.toml b/libs/cli/pyproject.toml
index 5daf92cc91d..4014e7dc5b2 100644
--- a/libs/cli/pyproject.toml
+++ b/libs/cli/pyproject.toml
@@ -15,7 +15,7 @@ dependencies = [
"gritql<1.0.0,>=0.2.0",
]
name = "langchain-cli"
-version = "0.0.35"
+version = "0.0.36"
description = "CLI for interacting with LangChain"
readme = "README.md"
@@ -31,11 +31,12 @@ langchain-cli = "langchain_cli.cli:app"
[dependency-groups]
dev = ["pytest<8.0.0,>=7.4.2", "pytest-watch<5.0.0,>=4.2.0"]
lint = ["ruff<1.0,>=0.5", "mypy<2.0.0,>=1.13.0"]
-test = ["langchain"]
+test = ["langchain-core", "langchain"]
typing = ["langchain"]
test_integration = []
[tool.uv.sources]
+langchain-core = { path = "../core", editable = true }
langchain = { path = "../langchain", editable = true }
[tool.ruff.lint]
diff --git a/libs/cli/uv.lock b/libs/cli/uv.lock
index 356175a2406..580aec41ef0 100644
--- a/libs/cli/uv.lock
+++ b/libs/cli/uv.lock
@@ -6,120 +6,6 @@ resolution-markers = [
"python_full_version < '3.12'",
]
-[[package]]
-name = "aiohappyeyeballs"
-version = "2.4.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7f/55/e4373e888fdacb15563ef6fa9fa8c8252476ea071e96fb46defac9f18bf2/aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745", size = 21977 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b9/74/fbb6559de3607b3300b9be3cc64e97548d55678e44623db17820dbd20002/aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8", size = 14756 },
-]
-
-[[package]]
-name = "aiohttp"
-version = "3.11.11"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "aiohappyeyeballs" },
- { name = "aiosignal" },
- { name = "async-timeout", marker = "python_full_version < '3.11'" },
- { name = "attrs" },
- { name = "frozenlist" },
- { name = "multidict" },
- { name = "propcache" },
- { name = "yarl" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/fe/ed/f26db39d29cd3cb2f5a3374304c713fe5ab5a0e4c8ee25a0c45cc6adf844/aiohttp-3.11.11.tar.gz", hash = "sha256:bb49c7f1e6ebf3821a42d81d494f538107610c3a705987f53068546b0e90303e", size = 7669618 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/75/7d/ff2e314b8f9e0b1df833e2d4778eaf23eae6b8cc8f922495d110ddcbf9e1/aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8", size = 708550 },
- { url = "https://files.pythonhosted.org/packages/09/b8/aeb4975d5bba233d6f246941f5957a5ad4e3def8b0855a72742e391925f2/aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5", size = 468430 },
- { url = "https://files.pythonhosted.org/packages/9c/5b/5b620279b3df46e597008b09fa1e10027a39467387c2332657288e25811a/aiohttp-3.11.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:731468f555656767cda219ab42e033355fe48c85fbe3ba83a349631541715ba2", size = 455593 },
- { url = "https://files.pythonhosted.org/packages/d8/75/0cdf014b816867d86c0bc26f3d3e3f194198dbf33037890beed629cd4f8f/aiohttp-3.11.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb23d8bb86282b342481cad4370ea0853a39e4a32a0042bb52ca6bdde132df43", size = 1584635 },
- { url = "https://files.pythonhosted.org/packages/df/2f/95b8f4e4dfeb57c1d9ad9fa911ede35a0249d75aa339edd2c2270dc539da/aiohttp-3.11.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f047569d655f81cb70ea5be942ee5d4421b6219c3f05d131f64088c73bb0917f", size = 1632363 },
- { url = "https://files.pythonhosted.org/packages/39/cb/70cf69ea7c50f5b0021a84f4c59c3622b2b3b81695f48a2f0e42ef7eba6e/aiohttp-3.11.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd7659baae9ccf94ae5fe8bfaa2c7bc2e94d24611528395ce88d009107e00c6d", size = 1668315 },
- { url = "https://files.pythonhosted.org/packages/2f/cc/3a3fc7a290eabc59839a7e15289cd48f33dd9337d06e301064e1e7fb26c5/aiohttp-3.11.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af01e42ad87ae24932138f154105e88da13ce7d202a6de93fafdafb2883a00ef", size = 1589546 },
- { url = "https://files.pythonhosted.org/packages/15/b4/0f7b0ed41ac6000e283e7332f0f608d734b675a8509763ca78e93714cfb0/aiohttp-3.11.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5854be2f3e5a729800bac57a8d76af464e160f19676ab6aea74bde18ad19d438", size = 1544581 },
- { url = "https://files.pythonhosted.org/packages/58/b9/4d06470fd85c687b6b0e31935ef73dde6e31767c9576d617309a2206556f/aiohttp-3.11.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6526e5fb4e14f4bbf30411216780c9967c20c5a55f2f51d3abd6de68320cc2f3", size = 1529256 },
- { url = "https://files.pythonhosted.org/packages/61/a2/6958b1b880fc017fd35f5dfb2c26a9a50c755b75fd9ae001dc2236a4fb79/aiohttp-3.11.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:85992ee30a31835fc482468637b3e5bd085fa8fe9392ba0bdcbdc1ef5e9e3c55", size = 1536592 },
- { url = "https://files.pythonhosted.org/packages/0f/dd/b974012a9551fd654f5bb95a6dd3f03d6e6472a17e1a8216dd42e9638d6c/aiohttp-3.11.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88a12ad8ccf325a8a5ed80e6d7c3bdc247d66175afedbe104ee2aaca72960d8e", size = 1607446 },
- { url = "https://files.pythonhosted.org/packages/e0/d3/6c98fd87e638e51f074a3f2061e81fcb92123bcaf1439ac1b4a896446e40/aiohttp-3.11.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0a6d3fbf2232e3a08c41eca81ae4f1dff3d8f1a30bae415ebe0af2d2458b8a33", size = 1628809 },
- { url = "https://files.pythonhosted.org/packages/a8/2e/86e6f85cbca02be042c268c3d93e7f35977a0e127de56e319bdd1569eaa8/aiohttp-3.11.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84a585799c58b795573c7fa9b84c455adf3e1d72f19a2bf498b54a95ae0d194c", size = 1564291 },
- { url = "https://files.pythonhosted.org/packages/0b/8d/1f4ef3503b767717f65e1f5178b0173ab03cba1a19997ebf7b052161189f/aiohttp-3.11.11-cp310-cp310-win32.whl", hash = "sha256:bfde76a8f430cf5c5584553adf9926534352251d379dcb266ad2b93c54a29745", size = 416601 },
- { url = "https://files.pythonhosted.org/packages/ad/86/81cb83691b5ace3d9aa148dc42bacc3450d749fc88c5ec1973573c1c1779/aiohttp-3.11.11-cp310-cp310-win_amd64.whl", hash = "sha256:0fd82b8e9c383af11d2b26f27a478640b6b83d669440c0a71481f7c865a51da9", size = 442007 },
- { url = "https://files.pythonhosted.org/packages/34/ae/e8806a9f054e15f1d18b04db75c23ec38ec954a10c0a68d3bd275d7e8be3/aiohttp-3.11.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba74ec819177af1ef7f59063c6d35a214a8fde6f987f7661f4f0eecc468a8f76", size = 708624 },
- { url = "https://files.pythonhosted.org/packages/c7/e0/313ef1a333fb4d58d0c55a6acb3cd772f5d7756604b455181049e222c020/aiohttp-3.11.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4af57160800b7a815f3fe0eba9b46bf28aafc195555f1824555fa2cfab6c1538", size = 468507 },
- { url = "https://files.pythonhosted.org/packages/a9/60/03455476bf1f467e5b4a32a465c450548b2ce724eec39d69f737191f936a/aiohttp-3.11.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffa336210cf9cd8ed117011085817d00abe4c08f99968deef0013ea283547204", size = 455571 },
- { url = "https://files.pythonhosted.org/packages/be/f9/469588603bd75bf02c8ffb8c8a0d4b217eed446b49d4a767684685aa33fd/aiohttp-3.11.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b8fe282183e4a3c7a1b72f5ade1094ed1c6345a8f153506d114af5bf8accd9", size = 1685694 },
- { url = "https://files.pythonhosted.org/packages/88/b9/1b7fa43faf6c8616fa94c568dc1309ffee2b6b68b04ac268e5d64b738688/aiohttp-3.11.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af41686ccec6a0f2bdc66686dc0f403c41ac2089f80e2214a0f82d001052c03", size = 1743660 },
- { url = "https://files.pythonhosted.org/packages/2a/8b/0248d19dbb16b67222e75f6aecedd014656225733157e5afaf6a6a07e2e8/aiohttp-3.11.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70d1f9dde0e5dd9e292a6d4d00058737052b01f3532f69c0c65818dac26dc287", size = 1785421 },
- { url = "https://files.pythonhosted.org/packages/c4/11/f478e071815a46ca0a5ae974651ff0c7a35898c55063305a896e58aa1247/aiohttp-3.11.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:249cc6912405917344192b9f9ea5cd5b139d49e0d2f5c7f70bdfaf6b4dbf3a2e", size = 1675145 },
- { url = "https://files.pythonhosted.org/packages/26/5d/284d182fecbb5075ae10153ff7374f57314c93a8681666600e3a9e09c505/aiohttp-3.11.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb98d90b6690827dcc84c246811feeb4e1eea683c0eac6caed7549be9c84665", size = 1619804 },
- { url = "https://files.pythonhosted.org/packages/1b/78/980064c2ad685c64ce0e8aeeb7ef1e53f43c5b005edcd7d32e60809c4992/aiohttp-3.11.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec82bf1fda6cecce7f7b915f9196601a1bd1a3079796b76d16ae4cce6d0ef89b", size = 1654007 },
- { url = "https://files.pythonhosted.org/packages/21/8d/9e658d63b1438ad42b96f94da227f2e2c1d5c6001c9e8ffcc0bfb22e9105/aiohttp-3.11.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9fd46ce0845cfe28f108888b3ab17abff84ff695e01e73657eec3f96d72eef34", size = 1650022 },
- { url = "https://files.pythonhosted.org/packages/85/fd/a032bf7f2755c2df4f87f9effa34ccc1ef5cea465377dbaeef93bb56bbd6/aiohttp-3.11.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd176afcf8f5d2aed50c3647d4925d0db0579d96f75a31e77cbaf67d8a87742d", size = 1732899 },
- { url = "https://files.pythonhosted.org/packages/c5/0c/c2b85fde167dd440c7ba50af2aac20b5a5666392b174df54c00f888c5a75/aiohttp-3.11.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ec2aa89305006fba9ffb98970db6c8221541be7bee4c1d027421d6f6df7d1ce2", size = 1755142 },
- { url = "https://files.pythonhosted.org/packages/bc/78/91ae1a3b3b3bed8b893c5d69c07023e151b1c95d79544ad04cf68f596c2f/aiohttp-3.11.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:92cde43018a2e17d48bb09c79e4d4cb0e236de5063ce897a5e40ac7cb4878773", size = 1692736 },
- { url = "https://files.pythonhosted.org/packages/77/89/a7ef9c4b4cdb546fcc650ca7f7395aaffbd267f0e1f648a436bec33c9b95/aiohttp-3.11.11-cp311-cp311-win32.whl", hash = "sha256:aba807f9569455cba566882c8938f1a549f205ee43c27b126e5450dc9f83cc62", size = 416418 },
- { url = "https://files.pythonhosted.org/packages/fc/db/2192489a8a51b52e06627506f8ac8df69ee221de88ab9bdea77aa793aa6a/aiohttp-3.11.11-cp311-cp311-win_amd64.whl", hash = "sha256:ae545f31489548c87b0cced5755cfe5a5308d00407000e72c4fa30b19c3220ac", size = 442509 },
- { url = "https://files.pythonhosted.org/packages/69/cf/4bda538c502f9738d6b95ada11603c05ec260807246e15e869fc3ec5de97/aiohttp-3.11.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e595c591a48bbc295ebf47cb91aebf9bd32f3ff76749ecf282ea7f9f6bb73886", size = 704666 },
- { url = "https://files.pythonhosted.org/packages/46/7b/87fcef2cad2fad420ca77bef981e815df6904047d0a1bd6aeded1b0d1d66/aiohttp-3.11.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ea1b59dc06396b0b424740a10a0a63974c725b1c64736ff788a3689d36c02d2", size = 464057 },
- { url = "https://files.pythonhosted.org/packages/5a/a6/789e1f17a1b6f4a38939fbc39d29e1d960d5f89f73d0629a939410171bc0/aiohttp-3.11.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8811f3f098a78ffa16e0ea36dffd577eb031aea797cbdba81be039a4169e242c", size = 455996 },
- { url = "https://files.pythonhosted.org/packages/b7/dd/485061fbfef33165ce7320db36e530cd7116ee1098e9c3774d15a732b3fd/aiohttp-3.11.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7227b87a355ce1f4bf83bfae4399b1f5bb42e0259cb9405824bd03d2f4336a", size = 1682367 },
- { url = "https://files.pythonhosted.org/packages/e9/d7/9ec5b3ea9ae215c311d88b2093e8da17e67b8856673e4166c994e117ee3e/aiohttp-3.11.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d40f9da8cabbf295d3a9dae1295c69975b86d941bc20f0a087f0477fa0a66231", size = 1736989 },
- { url = "https://files.pythonhosted.org/packages/d6/fb/ea94927f7bfe1d86178c9d3e0a8c54f651a0a655214cce930b3c679b8f64/aiohttp-3.11.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffb3dc385f6bb1568aa974fe65da84723210e5d9707e360e9ecb51f59406cd2e", size = 1793265 },
- { url = "https://files.pythonhosted.org/packages/40/7f/6de218084f9b653026bd7063cd8045123a7ba90c25176465f266976d8c82/aiohttp-3.11.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8f5f7515f3552d899c61202d99dcb17d6e3b0de777900405611cd747cecd1b8", size = 1691841 },
- { url = "https://files.pythonhosted.org/packages/77/e2/992f43d87831cbddb6b09c57ab55499332f60ad6fdbf438ff4419c2925fc/aiohttp-3.11.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3499c7ffbfd9c6a3d8d6a2b01c26639da7e43d47c7b4f788016226b1e711caa8", size = 1619317 },
- { url = "https://files.pythonhosted.org/packages/96/74/879b23cdd816db4133325a201287c95bef4ce669acde37f8f1b8669e1755/aiohttp-3.11.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8e2bf8029dbf0810c7bfbc3e594b51c4cc9101fbffb583a3923aea184724203c", size = 1641416 },
- { url = "https://files.pythonhosted.org/packages/30/98/b123f6b15d87c54e58fd7ae3558ff594f898d7f30a90899718f3215ad328/aiohttp-3.11.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6212a60e5c482ef90f2d788835387070a88d52cf6241d3916733c9176d39eab", size = 1646514 },
- { url = "https://files.pythonhosted.org/packages/d7/38/257fda3dc99d6978ab943141d5165ec74fd4b4164baa15e9c66fa21da86b/aiohttp-3.11.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d119fafe7b634dbfa25a8c597718e69a930e4847f0b88e172744be24515140da", size = 1702095 },
- { url = "https://files.pythonhosted.org/packages/0c/f4/ddab089053f9fb96654df5505c0a69bde093214b3c3454f6bfdb1845f558/aiohttp-3.11.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:6fba278063559acc730abf49845d0e9a9e1ba74f85f0ee6efd5803f08b285853", size = 1734611 },
- { url = "https://files.pythonhosted.org/packages/c3/d6/f30b2bc520c38c8aa4657ed953186e535ae84abe55c08d0f70acd72ff577/aiohttp-3.11.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92fc484e34b733704ad77210c7957679c5c3877bd1e6b6d74b185e9320cc716e", size = 1694576 },
- { url = "https://files.pythonhosted.org/packages/bc/97/b0a88c3f4c6d0020b34045ee6d954058abc870814f6e310c4c9b74254116/aiohttp-3.11.11-cp312-cp312-win32.whl", hash = "sha256:9f5b3c1ed63c8fa937a920b6c1bec78b74ee09593b3f5b979ab2ae5ef60d7600", size = 411363 },
- { url = "https://files.pythonhosted.org/packages/7f/23/cc36d9c398980acaeeb443100f0216f50a7cfe20c67a9fd0a2f1a5a846de/aiohttp-3.11.11-cp312-cp312-win_amd64.whl", hash = "sha256:1e69966ea6ef0c14ee53ef7a3d68b564cc408121ea56c0caa2dc918c1b2f553d", size = 437666 },
- { url = "https://files.pythonhosted.org/packages/49/d1/d8af164f400bad432b63e1ac857d74a09311a8334b0481f2f64b158b50eb/aiohttp-3.11.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:541d823548ab69d13d23730a06f97460f4238ad2e5ed966aaf850d7c369782d9", size = 697982 },
- { url = "https://files.pythonhosted.org/packages/92/d1/faad3bf9fa4bfd26b95c69fc2e98937d52b1ff44f7e28131855a98d23a17/aiohttp-3.11.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:929f3ed33743a49ab127c58c3e0a827de0664bfcda566108989a14068f820194", size = 460662 },
- { url = "https://files.pythonhosted.org/packages/db/61/0d71cc66d63909dabc4590f74eba71f91873a77ea52424401c2498d47536/aiohttp-3.11.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0882c2820fd0132240edbb4a51eb8ceb6eef8181db9ad5291ab3332e0d71df5f", size = 452950 },
- { url = "https://files.pythonhosted.org/packages/07/db/6d04bc7fd92784900704e16b745484ef45b77bd04e25f58f6febaadf7983/aiohttp-3.11.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63de12e44935d5aca7ed7ed98a255a11e5cb47f83a9fded7a5e41c40277d104", size = 1665178 },
- { url = "https://files.pythonhosted.org/packages/54/5c/e95ade9ae29f375411884d9fd98e50535bf9fe316c9feb0f30cd2ac8f508/aiohttp-3.11.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa54f8ef31d23c506910c21163f22b124facb573bff73930735cf9fe38bf7dff", size = 1717939 },
- { url = "https://files.pythonhosted.org/packages/6f/1c/1e7d5c5daea9e409ed70f7986001b8c9e3a49a50b28404498d30860edab6/aiohttp-3.11.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a344d5dc18074e3872777b62f5f7d584ae4344cd6006c17ba12103759d407af3", size = 1775125 },
- { url = "https://files.pythonhosted.org/packages/5d/66/890987e44f7d2f33a130e37e01a164168e6aff06fce15217b6eaf14df4f6/aiohttp-3.11.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7fb429ab1aafa1f48578eb315ca45bd46e9c37de11fe45c7f5f4138091e2f1", size = 1677176 },
- { url = "https://files.pythonhosted.org/packages/8f/dc/e2ba57d7a52df6cdf1072fd5fa9c6301a68e1cd67415f189805d3eeb031d/aiohttp-3.11.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c341c7d868750e31961d6d8e60ff040fb9d3d3a46d77fd85e1ab8e76c3e9a5c4", size = 1603192 },
- { url = "https://files.pythonhosted.org/packages/6c/9e/8d08a57de79ca3a358da449405555e668f2c8871a7777ecd2f0e3912c272/aiohttp-3.11.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed9ee95614a71e87f1a70bc81603f6c6760128b140bc4030abe6abaa988f1c3d", size = 1618296 },
- { url = "https://files.pythonhosted.org/packages/56/51/89822e3ec72db352c32e7fc1c690370e24e231837d9abd056490f3a49886/aiohttp-3.11.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:de8d38f1c2810fa2a4f1d995a2e9c70bb8737b18da04ac2afbf3971f65781d87", size = 1616524 },
- { url = "https://files.pythonhosted.org/packages/2c/fa/e2e6d9398f462ffaa095e84717c1732916a57f1814502929ed67dd7568ef/aiohttp-3.11.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a9b7371665d4f00deb8f32208c7c5e652059b0fda41cf6dbcac6114a041f1cc2", size = 1685471 },
- { url = "https://files.pythonhosted.org/packages/ae/5f/6bb976e619ca28a052e2c0ca7b0251ccd893f93d7c24a96abea38e332bf6/aiohttp-3.11.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:620598717fce1b3bd14dd09947ea53e1ad510317c85dda2c9c65b622edc96b12", size = 1715312 },
- { url = "https://files.pythonhosted.org/packages/79/c1/756a7e65aa087c7fac724d6c4c038f2faaa2a42fe56dbc1dd62a33ca7213/aiohttp-3.11.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf8d9bfee991d8acc72d060d53860f356e07a50f0e0d09a8dfedea1c554dd0d5", size = 1672783 },
- { url = "https://files.pythonhosted.org/packages/73/ba/a6190ebb02176c7f75e6308da31f5d49f6477b651a3dcfaaaca865a298e2/aiohttp-3.11.11-cp313-cp313-win32.whl", hash = "sha256:9d73ee3725b7a737ad86c2eac5c57a4a97793d9f442599bea5ec67ac9f4bdc3d", size = 410229 },
- { url = "https://files.pythonhosted.org/packages/b8/62/c9fa5bafe03186a0e4699150a7fed9b1e73240996d0d2f0e5f70f3fdf471/aiohttp-3.11.11-cp313-cp313-win_amd64.whl", hash = "sha256:c7a06301c2fb096bdb0bd25fe2011531c1453b9f2c163c8031600ec73af1cc99", size = 436081 },
- { url = "https://files.pythonhosted.org/packages/9f/37/326ee86b7640be6ca4493c8121cb9a4386e07cf1e5757ce6b7fa854d0a5f/aiohttp-3.11.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3e23419d832d969f659c208557de4a123e30a10d26e1e14b73431d3c13444c2e", size = 709424 },
- { url = "https://files.pythonhosted.org/packages/9c/c5/a88ec2160b06c22e57e483a1f78f99f005fcd4e7d6855a2d3d6510881b65/aiohttp-3.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21fef42317cf02e05d3b09c028712e1d73a9606f02467fd803f7c1f39cc59add", size = 468907 },
- { url = "https://files.pythonhosted.org/packages/b2/f0/02f03f818e91996161cce200241b631bb2b4a87e61acddb5b974e254a288/aiohttp-3.11.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f21bb8d0235fc10c09ce1d11ffbd40fc50d3f08a89e4cf3a0c503dc2562247a", size = 455981 },
- { url = "https://files.pythonhosted.org/packages/0e/17/c8be12436ec19915f67b1ab8240d4105aba0f7e0894a1f0d8939c3e79c70/aiohttp-3.11.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1642eceeaa5ab6c9b6dfeaaa626ae314d808188ab23ae196a34c9d97efb68350", size = 1587395 },
- { url = "https://files.pythonhosted.org/packages/43/c0/f4db1ac30ebe855b2fefd6fa98767862d88ac54ab08a6ad07d619146270c/aiohttp-3.11.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2170816e34e10f2fd120f603e951630f8a112e1be3b60963a1f159f5699059a6", size = 1636243 },
- { url = "https://files.pythonhosted.org/packages/ea/a7/9acf20e9a09b0d38b5b55691410500d051a9f4194692cac22b0d0fc92ad9/aiohttp-3.11.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8be8508d110d93061197fd2d6a74f7401f73b6d12f8822bbcd6d74f2b55d71b1", size = 1672323 },
- { url = "https://files.pythonhosted.org/packages/f7/5b/a27e8fe1a3b0e245ca80863eefd83fc00136752d27d2cf1afa0130a76f34/aiohttp-3.11.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eed954b161e6b9b65f6be446ed448ed3921763cc432053ceb606f89d793927e", size = 1589521 },
- { url = "https://files.pythonhosted.org/packages/25/50/8bccd08004e15906791b46f0a908a8e7f5e0c5882b17da96d1933bd34ac0/aiohttp-3.11.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6c9af134da4bc9b3bd3e6a70072509f295d10ee60c697826225b60b9959acdd", size = 1544059 },
- { url = "https://files.pythonhosted.org/packages/84/5a/42250b37b06ee0cb7a03dd1630243b1d739ca3edb5abd8b18f479a539900/aiohttp-3.11.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44167fc6a763d534a6908bdb2592269b4bf30a03239bcb1654781adf5e49caf1", size = 1530217 },
- { url = "https://files.pythonhosted.org/packages/18/08/eb334da86cd2cdbd0621bb7039255b19ca74ce8b05e8fb61850e2589938c/aiohttp-3.11.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:479b8c6ebd12aedfe64563b85920525d05d394b85f166b7873c8bde6da612f9c", size = 1536081 },
- { url = "https://files.pythonhosted.org/packages/1a/a9/9d59958084d5bad7e77a44841013bd59768cda94f9f744769461b66038fc/aiohttp-3.11.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:10b4ff0ad793d98605958089fabfa350e8e62bd5d40aa65cdc69d6785859f94e", size = 1606918 },
- { url = "https://files.pythonhosted.org/packages/4f/e7/27feb1cff17dcddb7a5b703199106196718d622a3aa70f80a386d15361d7/aiohttp-3.11.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b540bd67cfb54e6f0865ceccd9979687210d7ed1a1cc8c01f8e67e2f1e883d28", size = 1629101 },
- { url = "https://files.pythonhosted.org/packages/e8/29/49debcd858b997c655fca274c5247fcfe29bf31a4ddb1ce3f088539b14e4/aiohttp-3.11.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dac54e8ce2ed83b1f6b1a54005c87dfed139cf3f777fdc8afc76e7841101226", size = 1567338 },
- { url = "https://files.pythonhosted.org/packages/3b/34/33af1e97aba1862e1812e2e2b96a1e050c5a6e9cecd5a5370591122fb07b/aiohttp-3.11.11-cp39-cp39-win32.whl", hash = "sha256:568c1236b2fde93b7720f95a890741854c1200fba4a3471ff48b2934d2d93fd3", size = 416914 },
- { url = "https://files.pythonhosted.org/packages/2d/47/28b3fbd97026963af2774423c64341e0d4ec180ea3b79a2762a3c18d5d94/aiohttp-3.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:943a8b052e54dfd6439fd7989f67fc6a7f2138d0a2cf0a7de5f18aa4fe7eb3b1", size = 442225 },
-]
-
-[[package]]
-name = "aiosignal"
-version = "1.3.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "frozenlist" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597 },
-]
-
[[package]]
name = "annotated-types"
version = "0.7.0"
@@ -153,15 +39,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028", size = 5721 },
]
-[[package]]
-name = "attrs"
-version = "25.1.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/49/7c/fdf464bcc51d23881d110abd74b512a42b3d5d376a55a831b44c603ae17f/attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e", size = 810562 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/fc/30/d4986a882011f9df997a55e6becd864812ccfcd821d64aac8570ee39f719/attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a", size = 63152 },
-]
-
[[package]]
name = "certifi"
version = "2025.1.31"
@@ -364,90 +241,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/8f/7d/2d6ce181d7a5f51dedb8c06206cbf0ec026a99bf145edd309f9e17c3282f/fastapi-0.115.8-py3-none-any.whl", hash = "sha256:753a96dd7e036b34eeef8babdfcfe3f28ff79648f86551eb36bfc1b0bf4a8cbf", size = 94814 },
]
-[[package]]
-name = "frozenlist"
-version = "1.5.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/8f/ed/0f4cec13a93c02c47ec32d81d11c0c1efbadf4a471e3f3ce7cad366cbbd3/frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817", size = 39930 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/54/79/29d44c4af36b2b240725dce566b20f63f9b36ef267aaaa64ee7466f4f2f8/frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a", size = 94451 },
- { url = "https://files.pythonhosted.org/packages/47/47/0c999aeace6ead8a44441b4f4173e2261b18219e4ad1fe9a479871ca02fc/frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb", size = 54301 },
- { url = "https://files.pythonhosted.org/packages/8d/60/107a38c1e54176d12e06e9d4b5d755b677d71d1219217cee063911b1384f/frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec", size = 52213 },
- { url = "https://files.pythonhosted.org/packages/17/62/594a6829ac5679c25755362a9dc93486a8a45241394564309641425d3ff6/frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5", size = 240946 },
- { url = "https://files.pythonhosted.org/packages/7e/75/6c8419d8f92c80dd0ee3f63bdde2702ce6398b0ac8410ff459f9b6f2f9cb/frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76", size = 264608 },
- { url = "https://files.pythonhosted.org/packages/88/3e/82a6f0b84bc6fb7e0be240e52863c6d4ab6098cd62e4f5b972cd31e002e8/frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17", size = 261361 },
- { url = "https://files.pythonhosted.org/packages/fd/85/14e5f9ccac1b64ff2f10c927b3ffdf88772aea875882406f9ba0cec8ad84/frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba", size = 231649 },
- { url = "https://files.pythonhosted.org/packages/ee/59/928322800306f6529d1852323014ee9008551e9bb027cc38d276cbc0b0e7/frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d", size = 241853 },
- { url = "https://files.pythonhosted.org/packages/7d/bd/e01fa4f146a6f6c18c5d34cab8abdc4013774a26c4ff851128cd1bd3008e/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2", size = 243652 },
- { url = "https://files.pythonhosted.org/packages/a5/bd/e4771fd18a8ec6757033f0fa903e447aecc3fbba54e3630397b61596acf0/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f", size = 241734 },
- { url = "https://files.pythonhosted.org/packages/21/13/c83821fa5544af4f60c5d3a65d054af3213c26b14d3f5f48e43e5fb48556/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c", size = 260959 },
- { url = "https://files.pythonhosted.org/packages/71/f3/1f91c9a9bf7ed0e8edcf52698d23f3c211d8d00291a53c9f115ceb977ab1/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab", size = 262706 },
- { url = "https://files.pythonhosted.org/packages/4c/22/4a256fdf5d9bcb3ae32622c796ee5ff9451b3a13a68cfe3f68e2c95588ce/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5", size = 250401 },
- { url = "https://files.pythonhosted.org/packages/af/89/c48ebe1f7991bd2be6d5f4ed202d94960c01b3017a03d6954dd5fa9ea1e8/frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb", size = 45498 },
- { url = "https://files.pythonhosted.org/packages/28/2f/cc27d5f43e023d21fe5c19538e08894db3d7e081cbf582ad5ed366c24446/frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4", size = 51622 },
- { url = "https://files.pythonhosted.org/packages/79/43/0bed28bf5eb1c9e4301003b74453b8e7aa85fb293b31dde352aac528dafc/frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30", size = 94987 },
- { url = "https://files.pythonhosted.org/packages/bb/bf/b74e38f09a246e8abbe1e90eb65787ed745ccab6eaa58b9c9308e052323d/frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5", size = 54584 },
- { url = "https://files.pythonhosted.org/packages/2c/31/ab01375682f14f7613a1ade30149f684c84f9b8823a4391ed950c8285656/frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778", size = 52499 },
- { url = "https://files.pythonhosted.org/packages/98/a8/d0ac0b9276e1404f58fec3ab6e90a4f76b778a49373ccaf6a563f100dfbc/frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a", size = 276357 },
- { url = "https://files.pythonhosted.org/packages/ad/c9/c7761084fa822f07dac38ac29f841d4587570dd211e2262544aa0b791d21/frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869", size = 287516 },
- { url = "https://files.pythonhosted.org/packages/a1/ff/cd7479e703c39df7bdab431798cef89dc75010d8aa0ca2514c5b9321db27/frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d", size = 283131 },
- { url = "https://files.pythonhosted.org/packages/59/a0/370941beb47d237eca4fbf27e4e91389fd68699e6f4b0ebcc95da463835b/frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45", size = 261320 },
- { url = "https://files.pythonhosted.org/packages/b8/5f/c10123e8d64867bc9b4f2f510a32042a306ff5fcd7e2e09e5ae5100ee333/frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d", size = 274877 },
- { url = "https://files.pythonhosted.org/packages/fa/79/38c505601ae29d4348f21706c5d89755ceded02a745016ba2f58bd5f1ea6/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3", size = 269592 },
- { url = "https://files.pythonhosted.org/packages/19/e2/39f3a53191b8204ba9f0bb574b926b73dd2efba2a2b9d2d730517e8f7622/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a", size = 265934 },
- { url = "https://files.pythonhosted.org/packages/d5/c9/3075eb7f7f3a91f1a6b00284af4de0a65a9ae47084930916f5528144c9dd/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9", size = 283859 },
- { url = "https://files.pythonhosted.org/packages/05/f5/549f44d314c29408b962fa2b0e69a1a67c59379fb143b92a0a065ffd1f0f/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2", size = 287560 },
- { url = "https://files.pythonhosted.org/packages/9d/f8/cb09b3c24a3eac02c4c07a9558e11e9e244fb02bf62c85ac2106d1eb0c0b/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf", size = 277150 },
- { url = "https://files.pythonhosted.org/packages/37/48/38c2db3f54d1501e692d6fe058f45b6ad1b358d82cd19436efab80cfc965/frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942", size = 45244 },
- { url = "https://files.pythonhosted.org/packages/ca/8c/2ddffeb8b60a4bce3b196c32fcc30d8830d4615e7b492ec2071da801b8ad/frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d", size = 51634 },
- { url = "https://files.pythonhosted.org/packages/79/73/fa6d1a96ab7fd6e6d1c3500700963eab46813847f01ef0ccbaa726181dd5/frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21", size = 94026 },
- { url = "https://files.pythonhosted.org/packages/ab/04/ea8bf62c8868b8eada363f20ff1b647cf2e93377a7b284d36062d21d81d1/frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d", size = 54150 },
- { url = "https://files.pythonhosted.org/packages/d0/9a/8e479b482a6f2070b26bda572c5e6889bb3ba48977e81beea35b5ae13ece/frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e", size = 51927 },
- { url = "https://files.pythonhosted.org/packages/e3/12/2aad87deb08a4e7ccfb33600871bbe8f0e08cb6d8224371387f3303654d7/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a", size = 282647 },
- { url = "https://files.pythonhosted.org/packages/77/f2/07f06b05d8a427ea0060a9cef6e63405ea9e0d761846b95ef3fb3be57111/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a", size = 289052 },
- { url = "https://files.pythonhosted.org/packages/bd/9f/8bf45a2f1cd4aa401acd271b077989c9267ae8463e7c8b1eb0d3f561b65e/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee", size = 291719 },
- { url = "https://files.pythonhosted.org/packages/41/d1/1f20fd05a6c42d3868709b7604c9f15538a29e4f734c694c6bcfc3d3b935/frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6", size = 267433 },
- { url = "https://files.pythonhosted.org/packages/af/f2/64b73a9bb86f5a89fb55450e97cd5c1f84a862d4ff90d9fd1a73ab0f64a5/frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e", size = 283591 },
- { url = "https://files.pythonhosted.org/packages/29/e2/ffbb1fae55a791fd6c2938dd9ea779509c977435ba3940b9f2e8dc9d5316/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9", size = 273249 },
- { url = "https://files.pythonhosted.org/packages/2e/6e/008136a30798bb63618a114b9321b5971172a5abddff44a100c7edc5ad4f/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039", size = 271075 },
- { url = "https://files.pythonhosted.org/packages/ae/f0/4e71e54a026b06724cec9b6c54f0b13a4e9e298cc8db0f82ec70e151f5ce/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784", size = 285398 },
- { url = "https://files.pythonhosted.org/packages/4d/36/70ec246851478b1c0b59f11ef8ade9c482ff447c1363c2bd5fad45098b12/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631", size = 294445 },
- { url = "https://files.pythonhosted.org/packages/37/e0/47f87544055b3349b633a03c4d94b405956cf2437f4ab46d0928b74b7526/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f", size = 280569 },
- { url = "https://files.pythonhosted.org/packages/f9/7c/490133c160fb6b84ed374c266f42800e33b50c3bbab1652764e6e1fc498a/frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8", size = 44721 },
- { url = "https://files.pythonhosted.org/packages/b1/56/4e45136ffc6bdbfa68c29ca56ef53783ef4c2fd395f7cbf99a2624aa9aaa/frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f", size = 51329 },
- { url = "https://files.pythonhosted.org/packages/da/3b/915f0bca8a7ea04483622e84a9bd90033bab54bdf485479556c74fd5eaf5/frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953", size = 91538 },
- { url = "https://files.pythonhosted.org/packages/c7/d1/a7c98aad7e44afe5306a2b068434a5830f1470675f0e715abb86eb15f15b/frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0", size = 52849 },
- { url = "https://files.pythonhosted.org/packages/3a/c8/76f23bf9ab15d5f760eb48701909645f686f9c64fbb8982674c241fbef14/frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2", size = 50583 },
- { url = "https://files.pythonhosted.org/packages/1f/22/462a3dd093d11df623179d7754a3b3269de3b42de2808cddef50ee0f4f48/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f", size = 265636 },
- { url = "https://files.pythonhosted.org/packages/80/cf/e075e407fc2ae7328155a1cd7e22f932773c8073c1fc78016607d19cc3e5/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608", size = 270214 },
- { url = "https://files.pythonhosted.org/packages/a1/58/0642d061d5de779f39c50cbb00df49682832923f3d2ebfb0fedf02d05f7f/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b", size = 273905 },
- { url = "https://files.pythonhosted.org/packages/ab/66/3fe0f5f8f2add5b4ab7aa4e199f767fd3b55da26e3ca4ce2cc36698e50c4/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840", size = 250542 },
- { url = "https://files.pythonhosted.org/packages/f6/b8/260791bde9198c87a465224e0e2bb62c4e716f5d198fc3a1dacc4895dbd1/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439", size = 267026 },
- { url = "https://files.pythonhosted.org/packages/2e/a4/3d24f88c527f08f8d44ade24eaee83b2627793fa62fa07cbb7ff7a2f7d42/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de", size = 257690 },
- { url = "https://files.pythonhosted.org/packages/de/9a/d311d660420b2beeff3459b6626f2ab4fb236d07afbdac034a4371fe696e/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641", size = 253893 },
- { url = "https://files.pythonhosted.org/packages/c6/23/e491aadc25b56eabd0f18c53bb19f3cdc6de30b2129ee0bc39cd387cd560/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e", size = 267006 },
- { url = "https://files.pythonhosted.org/packages/08/c4/ab918ce636a35fb974d13d666dcbe03969592aeca6c3ab3835acff01f79c/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9", size = 276157 },
- { url = "https://files.pythonhosted.org/packages/c0/29/3b7a0bbbbe5a34833ba26f686aabfe982924adbdcafdc294a7a129c31688/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03", size = 264642 },
- { url = "https://files.pythonhosted.org/packages/ab/42/0595b3dbffc2e82d7fe658c12d5a5bafcd7516c6bf2d1d1feb5387caa9c1/frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c", size = 44914 },
- { url = "https://files.pythonhosted.org/packages/17/c4/b7db1206a3fea44bf3b838ca61deb6f74424a8a5db1dd53ecb21da669be6/frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28", size = 51167 },
- { url = "https://files.pythonhosted.org/packages/da/4d/d94ff0fb0f5313902c132817c62d19cdc5bdcd0c195d392006ef4b779fc6/frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972", size = 95319 },
- { url = "https://files.pythonhosted.org/packages/8c/1b/d90e554ca2b483d31cb2296e393f72c25bdc38d64526579e95576bfda587/frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336", size = 54749 },
- { url = "https://files.pythonhosted.org/packages/f8/66/7fdecc9ef49f8db2aa4d9da916e4ecf357d867d87aea292efc11e1b2e932/frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f", size = 52718 },
- { url = "https://files.pythonhosted.org/packages/08/04/e2fddc92135276e07addbc1cf413acffa0c2d848b3e54cacf684e146df49/frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f", size = 241756 },
- { url = "https://files.pythonhosted.org/packages/c6/52/be5ff200815d8a341aee5b16b6b707355e0ca3652953852238eb92b120c2/frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6", size = 267718 },
- { url = "https://files.pythonhosted.org/packages/88/be/4bd93a58be57a3722fc544c36debdf9dcc6758f761092e894d78f18b8f20/frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411", size = 263494 },
- { url = "https://files.pythonhosted.org/packages/32/ba/58348b90193caa096ce9e9befea6ae67f38dabfd3aacb47e46137a6250a8/frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08", size = 232838 },
- { url = "https://files.pythonhosted.org/packages/f6/33/9f152105227630246135188901373c4f322cc026565ca6215b063f4c82f4/frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2", size = 242912 },
- { url = "https://files.pythonhosted.org/packages/a0/10/3db38fb3ccbafadd80a1b0d6800c987b0e3fe3ef2d117c6ced0246eea17a/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d", size = 244763 },
- { url = "https://files.pythonhosted.org/packages/e2/cd/1df468fdce2f66a4608dffe44c40cdc35eeaa67ef7fd1d813f99a9a37842/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b", size = 242841 },
- { url = "https://files.pythonhosted.org/packages/ee/5f/16097a5ca0bb6b6779c02cc9379c72fe98d56115d4c54d059fb233168fb6/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b", size = 263407 },
- { url = "https://files.pythonhosted.org/packages/0f/f7/58cd220ee1c2248ee65a32f5b4b93689e3fe1764d85537eee9fc392543bc/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0", size = 265083 },
- { url = "https://files.pythonhosted.org/packages/62/b8/49768980caabf81ac4a2d156008f7cbd0107e6b36d08a313bb31035d9201/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c", size = 251564 },
- { url = "https://files.pythonhosted.org/packages/cb/83/619327da3b86ef957ee7a0cbf3c166a09ed1e87a3f7f1ff487d7d0284683/frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3", size = 45691 },
- { url = "https://files.pythonhosted.org/packages/8b/28/407bc34a745151ed2322c690b6e7d83d7101472e81ed76e1ebdac0b70a78/frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0", size = 51767 },
- { url = "https://files.pythonhosted.org/packages/c6/c8/a5be5b7550c10858fcf9b0ea054baccab474da77d37f1e828ce043a3a5d4/frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", size = 11901 },
-]
-
[[package]]
name = "gitdb"
version = "4.0.12"
@@ -620,26 +413,21 @@ wheels = [
[[package]]
name = "langchain"
-version = "0.3.18"
+version = "0.3.20"
source = { editable = "../langchain" }
dependencies = [
- { name = "aiohttp" },
{ name = "async-timeout", marker = "python_full_version < '3.11'" },
{ name = "langchain-core" },
{ name = "langchain-text-splitters" },
{ name = "langsmith" },
- { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" },
- { name = "numpy", version = "2.2.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" },
{ name = "pydantic" },
{ name = "pyyaml" },
{ name = "requests" },
{ name = "sqlalchemy" },
- { name = "tenacity" },
]
[package.metadata]
requires-dist = [
- { name = "aiohttp", specifier = ">=3.8.3,<4.0.0" },
{ name = "async-timeout", marker = "python_full_version < '3.11'", specifier = ">=4.0.0,<5.0.0" },
{ name = "langchain-anthropic", marker = "extra == 'anthropic'" },
{ name = "langchain-aws", marker = "extra == 'aws'" },
@@ -657,14 +445,12 @@ requires-dist = [
{ name = "langchain-openai", marker = "extra == 'openai'", editable = "../partners/openai" },
{ name = "langchain-text-splitters", editable = "../text-splitters" },
{ name = "langchain-together", marker = "extra == 'together'" },
+ { name = "langchain-xai", marker = "extra == 'xai'" },
{ name = "langsmith", specifier = ">=0.1.17,<0.4" },
- { name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.26.4,<2" },
- { name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.2,<3" },
{ name = "pydantic", specifier = ">=2.7.4,<3.0.0" },
{ name = "pyyaml", specifier = ">=5.3" },
{ name = "requests", specifier = ">=2,<3" },
{ name = "sqlalchemy", specifier = ">=1.4,<3" },
- { name = "tenacity", specifier = ">=8.1.0,!=8.4.0,<10" },
]
[package.metadata.requires-dev]
@@ -682,7 +468,7 @@ lint = [
{ name = "ruff", specifier = ">=0.9.2,<1.0.0" },
]
test = [
- { name = "blockbuster", specifier = ">=1.5.14,<1.6" },
+ { name = "blockbuster", specifier = ">=1.5.18,<1.6" },
{ name = "cffi", marker = "python_full_version < '3.10'", specifier = "<1.17.1" },
{ name = "cffi", marker = "python_full_version >= '3.10'" },
{ name = "duckdb-engine", specifier = ">=0.9.2,<1.0.0" },
@@ -692,6 +478,7 @@ test = [
{ name = "langchain-tests", editable = "../standard-tests" },
{ name = "langchain-text-splitters", editable = "../text-splitters" },
{ name = "lark", specifier = ">=1.1.5,<2.0.0" },
+ { name = "numpy", specifier = ">=1.26.4,<3" },
{ name = "packaging", specifier = ">=24.2" },
{ name = "pandas", specifier = ">=2.0.0,<3.0.0" },
{ name = "pytest", specifier = ">=8,<9" },
@@ -722,6 +509,7 @@ typing = [
{ name = "langchain-text-splitters", editable = "../text-splitters" },
{ name = "mypy", specifier = ">=1.10,<2.0" },
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
+ { name = "numpy", specifier = ">=1.26.4,<3" },
{ name = "types-chardet", specifier = ">=5.0.4.6,<6.0.0.0" },
{ name = "types-pytz", specifier = ">=2023.3.0.0,<2024.0.0.0" },
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
@@ -754,6 +542,7 @@ lint = [
]
test = [
{ name = "langchain" },
+ { name = "langchain-core" },
]
typing = [
{ name = "langchain" },
@@ -778,13 +567,16 @@ lint = [
{ name = "mypy", specifier = ">=1.13.0,<2.0.0" },
{ name = "ruff", specifier = ">=0.5,<1.0" },
]
-test = [{ name = "langchain", editable = "../langchain" }]
+test = [
+ { name = "langchain", editable = "../langchain" },
+ { name = "langchain-core", editable = "../core" },
+]
test-integration = []
typing = [{ name = "langchain", editable = "../langchain" }]
[[package]]
name = "langchain-core"
-version = "0.3.35"
+version = "0.3.41"
source = { editable = "../core" }
dependencies = [
{ name = "jsonpatch" },
@@ -816,7 +608,7 @@ dev = [
]
lint = [{ name = "ruff", specifier = ">=0.9.2,<1.0.0" }]
test = [
- { name = "blockbuster", specifier = "~=1.5.11" },
+ { name = "blockbuster", specifier = "~=1.5.18" },
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
{ name = "grandalf", specifier = ">=0.8,<1.0" },
{ name = "langchain-tests", directory = "../standard-tests" },
@@ -943,93 +735,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
]
-[[package]]
-name = "multidict"
-version = "6.1.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions", marker = "python_full_version < '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/d6/be/504b89a5e9ca731cd47487e91c469064f8ae5af93b7259758dcfc2b9c848/multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a", size = 64002 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/29/68/259dee7fd14cf56a17c554125e534f6274c2860159692a414d0b402b9a6d/multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60", size = 48628 },
- { url = "https://files.pythonhosted.org/packages/50/79/53ba256069fe5386a4a9e80d4e12857ced9de295baf3e20c68cdda746e04/multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1", size = 29327 },
- { url = "https://files.pythonhosted.org/packages/ff/10/71f1379b05b196dae749b5ac062e87273e3f11634f447ebac12a571d90ae/multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53", size = 29689 },
- { url = "https://files.pythonhosted.org/packages/71/45/70bac4f87438ded36ad4793793c0095de6572d433d98575a5752629ef549/multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5", size = 126639 },
- { url = "https://files.pythonhosted.org/packages/80/cf/17f35b3b9509b4959303c05379c4bfb0d7dd05c3306039fc79cf035bbac0/multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581", size = 134315 },
- { url = "https://files.pythonhosted.org/packages/ef/1f/652d70ab5effb33c031510a3503d4d6efc5ec93153562f1ee0acdc895a57/multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56", size = 129471 },
- { url = "https://files.pythonhosted.org/packages/a6/64/2dd6c4c681688c0165dea3975a6a4eab4944ea30f35000f8b8af1df3148c/multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429", size = 124585 },
- { url = "https://files.pythonhosted.org/packages/87/56/e6ee5459894c7e554b57ba88f7257dc3c3d2d379cb15baaa1e265b8c6165/multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748", size = 116957 },
- { url = "https://files.pythonhosted.org/packages/36/9e/616ce5e8d375c24b84f14fc263c7ef1d8d5e8ef529dbc0f1df8ce71bb5b8/multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db", size = 128609 },
- { url = "https://files.pythonhosted.org/packages/8c/4f/4783e48a38495d000f2124020dc96bacc806a4340345211b1ab6175a6cb4/multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056", size = 123016 },
- { url = "https://files.pythonhosted.org/packages/3e/b3/4950551ab8fc39862ba5e9907dc821f896aa829b4524b4deefd3e12945ab/multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76", size = 133542 },
- { url = "https://files.pythonhosted.org/packages/96/4d/f0ce6ac9914168a2a71df117935bb1f1781916acdecbb43285e225b484b8/multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160", size = 130163 },
- { url = "https://files.pythonhosted.org/packages/be/72/17c9f67e7542a49dd252c5ae50248607dfb780bcc03035907dafefb067e3/multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7", size = 126832 },
- { url = "https://files.pythonhosted.org/packages/71/9f/72d719e248cbd755c8736c6d14780533a1606ffb3fbb0fbd77da9f0372da/multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0", size = 26402 },
- { url = "https://files.pythonhosted.org/packages/04/5a/d88cd5d00a184e1ddffc82aa2e6e915164a6d2641ed3606e766b5d2f275a/multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d", size = 28800 },
- { url = "https://files.pythonhosted.org/packages/93/13/df3505a46d0cd08428e4c8169a196131d1b0c4b515c3649829258843dde6/multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6", size = 48570 },
- { url = "https://files.pythonhosted.org/packages/f0/e1/a215908bfae1343cdb72f805366592bdd60487b4232d039c437fe8f5013d/multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156", size = 29316 },
- { url = "https://files.pythonhosted.org/packages/70/0f/6dc70ddf5d442702ed74f298d69977f904960b82368532c88e854b79f72b/multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb", size = 29640 },
- { url = "https://files.pythonhosted.org/packages/d8/6d/9c87b73a13d1cdea30b321ef4b3824449866bd7f7127eceed066ccb9b9ff/multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b", size = 131067 },
- { url = "https://files.pythonhosted.org/packages/cc/1e/1b34154fef373371fd6c65125b3d42ff5f56c7ccc6bfff91b9b3c60ae9e0/multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72", size = 138507 },
- { url = "https://files.pythonhosted.org/packages/fb/e0/0bc6b2bac6e461822b5f575eae85da6aae76d0e2a79b6665d6206b8e2e48/multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304", size = 133905 },
- { url = "https://files.pythonhosted.org/packages/ba/af/73d13b918071ff9b2205fcf773d316e0f8fefb4ec65354bbcf0b10908cc6/multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351", size = 129004 },
- { url = "https://files.pythonhosted.org/packages/74/21/23960627b00ed39643302d81bcda44c9444ebcdc04ee5bedd0757513f259/multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb", size = 121308 },
- { url = "https://files.pythonhosted.org/packages/8b/5c/cf282263ffce4a596ed0bb2aa1a1dddfe1996d6a62d08842a8d4b33dca13/multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3", size = 132608 },
- { url = "https://files.pythonhosted.org/packages/d7/3e/97e778c041c72063f42b290888daff008d3ab1427f5b09b714f5a8eff294/multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399", size = 127029 },
- { url = "https://files.pythonhosted.org/packages/47/ac/3efb7bfe2f3aefcf8d103e9a7162572f01936155ab2f7ebcc7c255a23212/multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423", size = 137594 },
- { url = "https://files.pythonhosted.org/packages/42/9b/6c6e9e8dc4f915fc90a9b7798c44a30773dea2995fdcb619870e705afe2b/multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3", size = 134556 },
- { url = "https://files.pythonhosted.org/packages/1d/10/8e881743b26aaf718379a14ac58572a240e8293a1c9d68e1418fb11c0f90/multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753", size = 130993 },
- { url = "https://files.pythonhosted.org/packages/45/84/3eb91b4b557442802d058a7579e864b329968c8d0ea57d907e7023c677f2/multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80", size = 26405 },
- { url = "https://files.pythonhosted.org/packages/9f/0b/ad879847ecbf6d27e90a6eabb7eff6b62c129eefe617ea45eae7c1f0aead/multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926", size = 28795 },
- { url = "https://files.pythonhosted.org/packages/fd/16/92057c74ba3b96d5e211b553895cd6dc7cc4d1e43d9ab8fafc727681ef71/multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa", size = 48713 },
- { url = "https://files.pythonhosted.org/packages/94/3d/37d1b8893ae79716179540b89fc6a0ee56b4a65fcc0d63535c6f5d96f217/multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436", size = 29516 },
- { url = "https://files.pythonhosted.org/packages/a2/12/adb6b3200c363062f805275b4c1e656be2b3681aada66c80129932ff0bae/multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761", size = 29557 },
- { url = "https://files.pythonhosted.org/packages/47/e9/604bb05e6e5bce1e6a5cf80a474e0f072e80d8ac105f1b994a53e0b28c42/multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e", size = 130170 },
- { url = "https://files.pythonhosted.org/packages/7e/13/9efa50801785eccbf7086b3c83b71a4fb501a4d43549c2f2f80b8787d69f/multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef", size = 134836 },
- { url = "https://files.pythonhosted.org/packages/bf/0f/93808b765192780d117814a6dfcc2e75de6dcc610009ad408b8814dca3ba/multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95", size = 133475 },
- { url = "https://files.pythonhosted.org/packages/d3/c8/529101d7176fe7dfe1d99604e48d69c5dfdcadb4f06561f465c8ef12b4df/multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925", size = 131049 },
- { url = "https://files.pythonhosted.org/packages/ca/0c/fc85b439014d5a58063e19c3a158a889deec399d47b5269a0f3b6a2e28bc/multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966", size = 120370 },
- { url = "https://files.pythonhosted.org/packages/db/46/d4416eb20176492d2258fbd47b4abe729ff3b6e9c829ea4236f93c865089/multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305", size = 125178 },
- { url = "https://files.pythonhosted.org/packages/5b/46/73697ad7ec521df7de5531a32780bbfd908ded0643cbe457f981a701457c/multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2", size = 119567 },
- { url = "https://files.pythonhosted.org/packages/cd/ed/51f060e2cb0e7635329fa6ff930aa5cffa17f4c7f5c6c3ddc3500708e2f2/multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2", size = 129822 },
- { url = "https://files.pythonhosted.org/packages/df/9e/ee7d1954b1331da3eddea0c4e08d9142da5f14b1321c7301f5014f49d492/multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6", size = 128656 },
- { url = "https://files.pythonhosted.org/packages/77/00/8538f11e3356b5d95fa4b024aa566cde7a38aa7a5f08f4912b32a037c5dc/multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3", size = 125360 },
- { url = "https://files.pythonhosted.org/packages/be/05/5d334c1f2462d43fec2363cd00b1c44c93a78c3925d952e9a71caf662e96/multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133", size = 26382 },
- { url = "https://files.pythonhosted.org/packages/a3/bf/f332a13486b1ed0496d624bcc7e8357bb8053823e8cd4b9a18edc1d97e73/multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1", size = 28529 },
- { url = "https://files.pythonhosted.org/packages/22/67/1c7c0f39fe069aa4e5d794f323be24bf4d33d62d2a348acdb7991f8f30db/multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008", size = 48771 },
- { url = "https://files.pythonhosted.org/packages/3c/25/c186ee7b212bdf0df2519eacfb1981a017bda34392c67542c274651daf23/multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f", size = 29533 },
- { url = "https://files.pythonhosted.org/packages/67/5e/04575fd837e0958e324ca035b339cea174554f6f641d3fb2b4f2e7ff44a2/multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28", size = 29595 },
- { url = "https://files.pythonhosted.org/packages/d3/b2/e56388f86663810c07cfe4a3c3d87227f3811eeb2d08450b9e5d19d78876/multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b", size = 130094 },
- { url = "https://files.pythonhosted.org/packages/6c/ee/30ae9b4186a644d284543d55d491fbd4239b015d36b23fea43b4c94f7052/multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c", size = 134876 },
- { url = "https://files.pythonhosted.org/packages/84/c7/70461c13ba8ce3c779503c70ec9d0345ae84de04521c1f45a04d5f48943d/multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3", size = 133500 },
- { url = "https://files.pythonhosted.org/packages/4a/9f/002af221253f10f99959561123fae676148dd730e2daa2cd053846a58507/multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44", size = 131099 },
- { url = "https://files.pythonhosted.org/packages/82/42/d1c7a7301d52af79d88548a97e297f9d99c961ad76bbe6f67442bb77f097/multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2", size = 120403 },
- { url = "https://files.pythonhosted.org/packages/68/f3/471985c2c7ac707547553e8f37cff5158030d36bdec4414cb825fbaa5327/multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3", size = 125348 },
- { url = "https://files.pythonhosted.org/packages/67/2c/e6df05c77e0e433c214ec1d21ddd203d9a4770a1f2866a8ca40a545869a0/multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa", size = 119673 },
- { url = "https://files.pythonhosted.org/packages/c5/cd/bc8608fff06239c9fb333f9db7743a1b2eafe98c2666c9a196e867a3a0a4/multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa", size = 129927 },
- { url = "https://files.pythonhosted.org/packages/44/8e/281b69b7bc84fc963a44dc6e0bbcc7150e517b91df368a27834299a526ac/multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4", size = 128711 },
- { url = "https://files.pythonhosted.org/packages/12/a4/63e7cd38ed29dd9f1881d5119f272c898ca92536cdb53ffe0843197f6c85/multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6", size = 125519 },
- { url = "https://files.pythonhosted.org/packages/38/e0/4f5855037a72cd8a7a2f60a3952d9aa45feedb37ae7831642102604e8a37/multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81", size = 26426 },
- { url = "https://files.pythonhosted.org/packages/7e/a5/17ee3a4db1e310b7405f5d25834460073a8ccd86198ce044dfaf69eac073/multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774", size = 28531 },
- { url = "https://files.pythonhosted.org/packages/e7/c9/9e153a6572b38ac5ff4434113af38acf8d5e9957897cdb1f513b3d6614ed/multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c", size = 48550 },
- { url = "https://files.pythonhosted.org/packages/76/f5/79565ddb629eba6c7f704f09a09df085c8dc04643b12506f10f718cee37a/multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1", size = 29298 },
- { url = "https://files.pythonhosted.org/packages/60/1b/9851878b704bc98e641a3e0bce49382ae9e05743dac6d97748feb5b7baba/multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c", size = 29641 },
- { url = "https://files.pythonhosted.org/packages/89/87/d451d45aab9e422cb0fb2f7720c31a4c1d3012c740483c37f642eba568fb/multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c", size = 126202 },
- { url = "https://files.pythonhosted.org/packages/fa/b4/27cbe9f3e2e469359887653f2e45470272eef7295139916cc21107c6b48c/multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f", size = 133925 },
- { url = "https://files.pythonhosted.org/packages/4d/a3/afc841899face8adfd004235ce759a37619f6ec99eafd959650c5ce4df57/multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875", size = 129039 },
- { url = "https://files.pythonhosted.org/packages/5e/41/0d0fb18c1ad574f807196f5f3d99164edf9de3e169a58c6dc2d6ed5742b9/multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255", size = 124072 },
- { url = "https://files.pythonhosted.org/packages/00/22/defd7a2e71a44e6e5b9a5428f972e5b572e7fe28e404dfa6519bbf057c93/multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30", size = 116532 },
- { url = "https://files.pythonhosted.org/packages/91/25/f7545102def0b1d456ab6449388eed2dfd822debba1d65af60194904a23a/multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057", size = 128173 },
- { url = "https://files.pythonhosted.org/packages/45/79/3dbe8d35fc99f5ea610813a72ab55f426cb9cf482f860fa8496e5409be11/multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657", size = 122654 },
- { url = "https://files.pythonhosted.org/packages/97/cb/209e735eeab96e1b160825b5d0b36c56d3862abff828fc43999bb957dcad/multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28", size = 133197 },
- { url = "https://files.pythonhosted.org/packages/e4/3a/a13808a7ada62808afccea67837a79d00ad6581440015ef00f726d064c2d/multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972", size = 129754 },
- { url = "https://files.pythonhosted.org/packages/77/dd/8540e139eafb240079242da8f8ffdf9d3f4b4ad1aac5a786cd4050923783/multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43", size = 126402 },
- { url = "https://files.pythonhosted.org/packages/86/99/e82e1a275d8b1ea16d3a251474262258dbbe41c05cce0c01bceda1fc8ea5/multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada", size = 26421 },
- { url = "https://files.pythonhosted.org/packages/86/1c/9fa630272355af7e4446a2c7550c259f11ee422ab2d30ff90a0a71cf3d9e/multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a", size = 28791 },
- { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 },
-]
-
[[package]]
name = "mypy"
version = "1.14.1"
@@ -1083,118 +788,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 },
]
-[[package]]
-name = "numpy"
-version = "1.26.4"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "python_full_version < '3.12'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/65/6e/09db70a523a96d25e115e71cc56a6f9031e7b8cd166c1ac8438307c14058/numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010", size = 15786129 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a7/94/ace0fdea5241a27d13543ee117cbc65868e82213fb31a8eb7fe9ff23f313/numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0", size = 20631468 },
- { url = "https://files.pythonhosted.org/packages/20/f7/b24208eba89f9d1b58c1668bc6c8c4fd472b20c45573cb767f59d49fb0f6/numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a", size = 13966411 },
- { url = "https://files.pythonhosted.org/packages/fc/a5/4beee6488160798683eed5bdb7eead455892c3b4e1f78d79d8d3f3b084ac/numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4", size = 14219016 },
- { url = "https://files.pythonhosted.org/packages/4b/d7/ecf66c1cd12dc28b4040b15ab4d17b773b87fa9d29ca16125de01adb36cd/numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f", size = 18240889 },
- { url = "https://files.pythonhosted.org/packages/24/03/6f229fe3187546435c4f6f89f6d26c129d4f5bed40552899fcf1f0bf9e50/numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a", size = 13876746 },
- { url = "https://files.pythonhosted.org/packages/39/fe/39ada9b094f01f5a35486577c848fe274e374bbf8d8f472e1423a0bbd26d/numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2", size = 18078620 },
- { url = "https://files.pythonhosted.org/packages/d5/ef/6ad11d51197aad206a9ad2286dc1aac6a378059e06e8cf22cd08ed4f20dc/numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07", size = 5972659 },
- { url = "https://files.pythonhosted.org/packages/19/77/538f202862b9183f54108557bfda67e17603fc560c384559e769321c9d92/numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5", size = 15808905 },
- { url = "https://files.pythonhosted.org/packages/11/57/baae43d14fe163fa0e4c47f307b6b2511ab8d7d30177c491960504252053/numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71", size = 20630554 },
- { url = "https://files.pythonhosted.org/packages/1a/2e/151484f49fd03944c4a3ad9c418ed193cfd02724e138ac8a9505d056c582/numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef", size = 13997127 },
- { url = "https://files.pythonhosted.org/packages/79/ae/7e5b85136806f9dadf4878bf73cf223fe5c2636818ba3ab1c585d0403164/numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e", size = 14222994 },
- { url = "https://files.pythonhosted.org/packages/3a/d0/edc009c27b406c4f9cbc79274d6e46d634d139075492ad055e3d68445925/numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5", size = 18252005 },
- { url = "https://files.pythonhosted.org/packages/09/bf/2b1aaf8f525f2923ff6cfcf134ae5e750e279ac65ebf386c75a0cf6da06a/numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a", size = 13885297 },
- { url = "https://files.pythonhosted.org/packages/df/a0/4e0f14d847cfc2a633a1c8621d00724f3206cfeddeb66d35698c4e2cf3d2/numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a", size = 18093567 },
- { url = "https://files.pythonhosted.org/packages/d2/b7/a734c733286e10a7f1a8ad1ae8c90f2d33bf604a96548e0a4a3a6739b468/numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20", size = 5968812 },
- { url = "https://files.pythonhosted.org/packages/3f/6b/5610004206cf7f8e7ad91c5a85a8c71b2f2f8051a0c0c4d5916b76d6cbb2/numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2", size = 15811913 },
- { url = "https://files.pythonhosted.org/packages/95/12/8f2020a8e8b8383ac0177dc9570aad031a3beb12e38847f7129bacd96228/numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218", size = 20335901 },
- { url = "https://files.pythonhosted.org/packages/75/5b/ca6c8bd14007e5ca171c7c03102d17b4f4e0ceb53957e8c44343a9546dcc/numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b", size = 13685868 },
- { url = "https://files.pythonhosted.org/packages/79/f8/97f10e6755e2a7d027ca783f63044d5b1bc1ae7acb12afe6a9b4286eac17/numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b", size = 13925109 },
- { url = "https://files.pythonhosted.org/packages/0f/50/de23fde84e45f5c4fda2488c759b69990fd4512387a8632860f3ac9cd225/numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed", size = 17950613 },
- { url = "https://files.pythonhosted.org/packages/4c/0c/9c603826b6465e82591e05ca230dfc13376da512b25ccd0894709b054ed0/numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a", size = 13572172 },
- { url = "https://files.pythonhosted.org/packages/76/8c/2ba3902e1a0fc1c74962ea9bb33a534bb05984ad7ff9515bf8d07527cadd/numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0", size = 17786643 },
- { url = "https://files.pythonhosted.org/packages/28/4a/46d9e65106879492374999e76eb85f87b15328e06bd1550668f79f7b18c6/numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110", size = 5677803 },
- { url = "https://files.pythonhosted.org/packages/16/2e/86f24451c2d530c88daf997cb8d6ac622c1d40d19f5a031ed68a4b73a374/numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", size = 15517754 },
- { url = "https://files.pythonhosted.org/packages/7d/24/ce71dc08f06534269f66e73c04f5709ee024a1afe92a7b6e1d73f158e1f8/numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c", size = 20636301 },
- { url = "https://files.pythonhosted.org/packages/ae/8c/ab03a7c25741f9ebc92684a20125fbc9fc1b8e1e700beb9197d750fdff88/numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be", size = 13971216 },
- { url = "https://files.pythonhosted.org/packages/6d/64/c3bcdf822269421d85fe0d64ba972003f9bb4aa9a419da64b86856c9961f/numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764", size = 14226281 },
- { url = "https://files.pythonhosted.org/packages/54/30/c2a907b9443cf42b90c17ad10c1e8fa801975f01cb9764f3f8eb8aea638b/numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3", size = 18249516 },
- { url = "https://files.pythonhosted.org/packages/43/12/01a563fc44c07095996d0129b8899daf89e4742146f7044cdbdb3101c57f/numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd", size = 13882132 },
- { url = "https://files.pythonhosted.org/packages/16/ee/9df80b06680aaa23fc6c31211387e0db349e0e36d6a63ba3bd78c5acdf11/numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c", size = 18084181 },
- { url = "https://files.pythonhosted.org/packages/28/7d/4b92e2fe20b214ffca36107f1a3e75ef4c488430e64de2d9af5db3a4637d/numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6", size = 5976360 },
- { url = "https://files.pythonhosted.org/packages/b5/42/054082bd8220bbf6f297f982f0a8f5479fcbc55c8b511d928df07b965869/numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea", size = 15814633 },
- { url = "https://files.pythonhosted.org/packages/3f/72/3df6c1c06fc83d9cfe381cccb4be2532bbd38bf93fbc9fad087b6687f1c0/numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30", size = 20455961 },
- { url = "https://files.pythonhosted.org/packages/8e/02/570545bac308b58ffb21adda0f4e220ba716fb658a63c151daecc3293350/numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c", size = 18061071 },
- { url = "https://files.pythonhosted.org/packages/f4/5f/fafd8c51235f60d49f7a88e2275e13971e90555b67da52dd6416caec32fe/numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0", size = 15709730 },
-]
-
-[[package]]
-name = "numpy"
-version = "2.2.2"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "python_full_version >= '3.12.4'",
- "python_full_version >= '3.12' and python_full_version < '3.12.4'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ec/d0/c12ddfd3a02274be06ffc71f3efc6d0e457b0409c4481596881e748cb264/numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f", size = 20233295 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/70/2a/69033dc22d981ad21325314f8357438078f5c28310a6d89fb3833030ec8a/numpy-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7079129b64cb78bdc8d611d1fd7e8002c0a2565da6a47c4df8062349fee90e3e", size = 21215825 },
- { url = "https://files.pythonhosted.org/packages/31/2c/39f91e00bbd3d5639b027ac48c55dc5f2992bd2b305412d26be4c830862a/numpy-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ec6c689c61df613b783aeb21f945c4cbe6c51c28cb70aae8430577ab39f163e", size = 14354996 },
- { url = "https://files.pythonhosted.org/packages/0a/2c/d468ebd253851af10de5b3e8f3418ebabfaab5f0337a75299fbeb8b8c17a/numpy-2.2.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:40c7ff5da22cd391944a28c6a9c638a5eef77fcf71d6e3a79e1d9d9e82752715", size = 5393621 },
- { url = "https://files.pythonhosted.org/packages/7f/f4/3d8a5a0da297034106c5de92be881aca7079cde6058934215a1de91334f6/numpy-2.2.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:995f9e8181723852ca458e22de5d9b7d3ba4da3f11cc1cb113f093b271d7965a", size = 6928931 },
- { url = "https://files.pythonhosted.org/packages/47/a7/029354ab56edd43dd3f5efbfad292b8844f98b93174f322f82353fa46efa/numpy-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78ea78450fd96a498f50ee096f69c75379af5138f7881a51355ab0e11286c97", size = 14333157 },
- { url = "https://files.pythonhosted.org/packages/e3/d7/11fc594838d35c43519763310c316d4fd56f8600d3fc80a8e13e325b5c5c/numpy-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fbe72d347fbc59f94124125e73fc4976a06927ebc503ec5afbfb35f193cd957", size = 16381794 },
- { url = "https://files.pythonhosted.org/packages/af/d4/dd9b19cd4aff9c79d3f54d17f8be815407520d3116004bc574948336981b/numpy-2.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8e6da5cffbbe571f93588f562ed130ea63ee206d12851b60819512dd3e1ba50d", size = 15543990 },
- { url = "https://files.pythonhosted.org/packages/30/97/ab96b7650f27f684a9b1e46757a7294ecc50cab27701d05f146e9f779627/numpy-2.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:09d6a2032faf25e8d0cadde7fd6145118ac55d2740132c1d845f98721b5ebcfd", size = 18170896 },
- { url = "https://files.pythonhosted.org/packages/81/9b/bae9618cab20db67a2ca9d711795cad29b2ca4b73034dd3b5d05b962070a/numpy-2.2.2-cp310-cp310-win32.whl", hash = "sha256:159ff6ee4c4a36a23fe01b7c3d07bd8c14cc433d9720f977fcd52c13c0098160", size = 6573458 },
- { url = "https://files.pythonhosted.org/packages/92/9b/95678092febd14070cfb7906ea7932e71e9dd5a6ab3ee948f9ed975e905d/numpy-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:64bd6e1762cd7f0986a740fee4dff927b9ec2c5e4d9a28d056eb17d332158014", size = 12915812 },
- { url = "https://files.pythonhosted.org/packages/21/67/32c68756eed84df181c06528ff57e09138f893c4653448c4967311e0f992/numpy-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:642199e98af1bd2b6aeb8ecf726972d238c9877b0f6e8221ee5ab945ec8a2189", size = 21220002 },
- { url = "https://files.pythonhosted.org/packages/3b/89/f43bcad18f2b2e5814457b1c7f7b0e671d0db12c8c0e43397ab8cb1831ed/numpy-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6d9fc9d812c81e6168b6d405bf00b8d6739a7f72ef22a9214c4241e0dc70b323", size = 14391215 },
- { url = "https://files.pythonhosted.org/packages/9c/e6/efb8cd6122bf25e86e3dd89d9dbfec9e6861c50e8810eed77d4be59b51c6/numpy-2.2.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c7d1fd447e33ee20c1f33f2c8e6634211124a9aabde3c617687d8b739aa69eac", size = 5391918 },
- { url = "https://files.pythonhosted.org/packages/47/e2/fccf89d64d9b47ffb242823d4e851fc9d36fa751908c9aac2807924d9b4e/numpy-2.2.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:451e854cfae0febe723077bd0cf0a4302a5d84ff25f0bfece8f29206c7bed02e", size = 6933133 },
- { url = "https://files.pythonhosted.org/packages/34/22/5ece749c0e5420a9380eef6fbf83d16a50010bd18fef77b9193d80a6760e/numpy-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd249bc894af67cbd8bad2c22e7cbcd46cf87ddfca1f1289d1e7e54868cc785c", size = 14338187 },
- { url = "https://files.pythonhosted.org/packages/5b/86/caec78829311f62afa6fa334c8dfcd79cffb4d24bcf96ee02ae4840d462b/numpy-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02935e2c3c0c6cbe9c7955a8efa8908dd4221d7755644c59d1bba28b94fd334f", size = 16393429 },
- { url = "https://files.pythonhosted.org/packages/c8/4e/0c25f74c88239a37924577d6ad780f3212a50f4b4b5f54f5e8c918d726bd/numpy-2.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a972cec723e0563aa0823ee2ab1df0cb196ed0778f173b381c871a03719d4826", size = 15559103 },
- { url = "https://files.pythonhosted.org/packages/d4/bd/d557f10fa50dc4d5871fb9606af563249b66af2fc6f99041a10e8757c6f1/numpy-2.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d6d6a0910c3b4368d89dde073e630882cdb266755565155bc33520283b2d9df8", size = 18182967 },
- { url = "https://files.pythonhosted.org/packages/30/e9/66cc0f66386d78ed89e45a56e2a1d051e177b6e04477c4a41cd590ef4017/numpy-2.2.2-cp311-cp311-win32.whl", hash = "sha256:860fd59990c37c3ef913c3ae390b3929d005243acca1a86facb0773e2d8d9e50", size = 6571499 },
- { url = "https://files.pythonhosted.org/packages/66/a3/4139296b481ae7304a43581046b8f0a20da6a0dfe0ee47a044cade796603/numpy-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:da1eeb460ecce8d5b8608826595c777728cdf28ce7b5a5a8c8ac8d949beadcf2", size = 12919805 },
- { url = "https://files.pythonhosted.org/packages/0c/e6/847d15770ab7a01e807bdfcd4ead5bdae57c0092b7dc83878171b6af97bb/numpy-2.2.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ac9bea18d6d58a995fac1b2cb4488e17eceeac413af014b1dd26170b766d8467", size = 20912636 },
- { url = "https://files.pythonhosted.org/packages/d1/af/f83580891577b13bd7e261416120e036d0d8fb508c8a43a73e38928b794b/numpy-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23ae9f0c2d889b7b2d88a3791f6c09e2ef827c2446f1c4a3e3e76328ee4afd9a", size = 14098403 },
- { url = "https://files.pythonhosted.org/packages/2b/86/d019fb60a9d0f1d4cf04b014fe88a9135090adfadcc31c1fadbb071d7fa7/numpy-2.2.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3074634ea4d6df66be04f6728ee1d173cfded75d002c75fac79503a880bf3825", size = 5128938 },
- { url = "https://files.pythonhosted.org/packages/7a/1b/50985edb6f1ec495a1c36452e860476f5b7ecdc3fc59ea89ccad3c4926c5/numpy-2.2.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ec0636d3f7d68520afc6ac2dc4b8341ddb725039de042faf0e311599f54eb37", size = 6661937 },
- { url = "https://files.pythonhosted.org/packages/f4/1b/17efd94cad1b9d605c3f8907fb06bcffc4ce4d1d14d46b95316cccccf2b9/numpy-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ffbb1acd69fdf8e89dd60ef6182ca90a743620957afb7066385a7bbe88dc748", size = 14049518 },
- { url = "https://files.pythonhosted.org/packages/5b/73/65d2f0b698df1731e851e3295eb29a5ab8aa06f763f7e4188647a809578d/numpy-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0349b025e15ea9d05c3d63f9657707a4e1d471128a3b1d876c095f328f8ff7f0", size = 16099146 },
- { url = "https://files.pythonhosted.org/packages/d5/69/308f55c0e19d4b5057b5df286c5433822e3c8039ede06d4051d96f1c2c4e/numpy-2.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:463247edcee4a5537841d5350bc87fe8e92d7dd0e8c71c995d2c6eecb8208278", size = 15246336 },
- { url = "https://files.pythonhosted.org/packages/f0/d8/d8d333ad0d8518d077a21aeea7b7c826eff766a2b1ce1194dea95ca0bacf/numpy-2.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9dd47ff0cb2a656ad69c38da850df3454da88ee9a6fde0ba79acceee0e79daba", size = 17863507 },
- { url = "https://files.pythonhosted.org/packages/82/6e/0b84ad3103ffc16d6673e63b5acbe7901b2af96c2837174c6318c98e27ab/numpy-2.2.2-cp312-cp312-win32.whl", hash = "sha256:4525b88c11906d5ab1b0ec1f290996c0020dd318af8b49acaa46f198b1ffc283", size = 6276491 },
- { url = "https://files.pythonhosted.org/packages/fc/84/7f801a42a67b9772a883223a0a1e12069a14626c81a732bd70aac57aebc1/numpy-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:5acea83b801e98541619af398cc0109ff48016955cc0818f478ee9ef1c5c3dcb", size = 12616372 },
- { url = "https://files.pythonhosted.org/packages/e1/fe/df5624001f4f5c3e0b78e9017bfab7fdc18a8d3b3d3161da3d64924dd659/numpy-2.2.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b208cfd4f5fe34e1535c08983a1a6803fdbc7a1e86cf13dd0c61de0b51a0aadc", size = 20899188 },
- { url = "https://files.pythonhosted.org/packages/a9/80/d349c3b5ed66bd3cb0214be60c27e32b90a506946857b866838adbe84040/numpy-2.2.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d0bbe7dd86dca64854f4b6ce2ea5c60b51e36dfd597300057cf473d3615f2369", size = 14113972 },
- { url = "https://files.pythonhosted.org/packages/9d/50/949ec9cbb28c4b751edfa64503f0913cbfa8d795b4a251e7980f13a8a655/numpy-2.2.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:22ea3bb552ade325530e72a0c557cdf2dea8914d3a5e1fecf58fa5dbcc6f43cd", size = 5114294 },
- { url = "https://files.pythonhosted.org/packages/8d/f3/399c15629d5a0c68ef2aa7621d430b2be22034f01dd7f3c65a9c9666c445/numpy-2.2.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:128c41c085cab8a85dc29e66ed88c05613dccf6bc28b3866cd16050a2f5448be", size = 6648426 },
- { url = "https://files.pythonhosted.org/packages/2c/03/c72474c13772e30e1bc2e558cdffd9123c7872b731263d5648b5c49dd459/numpy-2.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:250c16b277e3b809ac20d1f590716597481061b514223c7badb7a0f9993c7f84", size = 14045990 },
- { url = "https://files.pythonhosted.org/packages/83/9c/96a9ab62274ffafb023f8ee08c88d3d31ee74ca58869f859db6845494fa6/numpy-2.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c8854b09bc4de7b041148d8550d3bd712b5c21ff6a8ed308085f190235d7ff", size = 16096614 },
- { url = "https://files.pythonhosted.org/packages/d5/34/cd0a735534c29bec7093544b3a509febc9b0df77718a9b41ffb0809c9f46/numpy-2.2.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b6fb9c32a91ec32a689ec6410def76443e3c750e7cfc3fb2206b985ffb2b85f0", size = 15242123 },
- { url = "https://files.pythonhosted.org/packages/5e/6d/541717a554a8f56fa75e91886d9b79ade2e595918690eb5d0d3dbd3accb9/numpy-2.2.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:57b4012e04cc12b78590a334907e01b3a85efb2107df2b8733ff1ed05fce71de", size = 17859160 },
- { url = "https://files.pythonhosted.org/packages/b9/a5/fbf1f2b54adab31510728edd06a05c1b30839f37cf8c9747cb85831aaf1b/numpy-2.2.2-cp313-cp313-win32.whl", hash = "sha256:4dbd80e453bd34bd003b16bd802fac70ad76bd463f81f0c518d1245b1c55e3d9", size = 6273337 },
- { url = "https://files.pythonhosted.org/packages/56/e5/01106b9291ef1d680f82bc47d0c5b5e26dfed15b0754928e8f856c82c881/numpy-2.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:5a8c863ceacae696aff37d1fd636121f1a512117652e5dfb86031c8d84836369", size = 12609010 },
- { url = "https://files.pythonhosted.org/packages/9f/30/f23d9876de0f08dceb707c4dcf7f8dd7588266745029debb12a3cdd40be6/numpy-2.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b3482cb7b3325faa5f6bc179649406058253d91ceda359c104dac0ad320e1391", size = 20924451 },
- { url = "https://files.pythonhosted.org/packages/6a/ec/6ea85b2da9d5dfa1dbb4cb3c76587fc8ddcae580cb1262303ab21c0926c4/numpy-2.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9491100aba630910489c1d0158034e1c9a6546f0b1340f716d522dc103788e39", size = 14122390 },
- { url = "https://files.pythonhosted.org/packages/68/05/bfbdf490414a7dbaf65b10c78bc243f312c4553234b6d91c94eb7c4b53c2/numpy-2.2.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:41184c416143defa34cc8eb9d070b0a5ba4f13a0fa96a709e20584638254b317", size = 5156590 },
- { url = "https://files.pythonhosted.org/packages/f7/ec/fe2e91b2642b9d6544518388a441bcd65c904cea38d9ff998e2e8ebf808e/numpy-2.2.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:7dca87ca328f5ea7dafc907c5ec100d187911f94825f8700caac0b3f4c384b49", size = 6671958 },
- { url = "https://files.pythonhosted.org/packages/b1/6f/6531a78e182f194d33ee17e59d67d03d0d5a1ce7f6be7343787828d1bd4a/numpy-2.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bc61b307655d1a7f9f4b043628b9f2b721e80839914ede634e3d485913e1fb2", size = 14019950 },
- { url = "https://files.pythonhosted.org/packages/e1/fb/13c58591d0b6294a08cc40fcc6b9552d239d773d520858ae27f39997f2ae/numpy-2.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fad446ad0bc886855ddf5909cbf8cb5d0faa637aaa6277fb4b19ade134ab3c7", size = 16079759 },
- { url = "https://files.pythonhosted.org/packages/2c/f2/f2f8edd62abb4b289f65a7f6d1f3650273af00b91b7267a2431be7f1aec6/numpy-2.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:149d1113ac15005652e8d0d3f6fd599360e1a708a4f98e43c9c77834a28238cb", size = 15226139 },
- { url = "https://files.pythonhosted.org/packages/aa/29/14a177f1a90b8ad8a592ca32124ac06af5eff32889874e53a308f850290f/numpy-2.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:106397dbbb1896f99e044efc90360d098b3335060375c26aa89c0d8a97c5f648", size = 17856316 },
- { url = "https://files.pythonhosted.org/packages/95/03/242ae8d7b97f4e0e4ab8dd51231465fb23ed5e802680d629149722e3faf1/numpy-2.2.2-cp313-cp313t-win32.whl", hash = "sha256:0eec19f8af947a61e968d5429f0bd92fec46d92b0008d0a6685b40d6adf8a4f4", size = 6329134 },
- { url = "https://files.pythonhosted.org/packages/80/94/cd9e9b04012c015cb6320ab3bf43bc615e248dddfeb163728e800a5d96f0/numpy-2.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:97b974d3ba0fb4612b77ed35d7627490e8e3dff56ab41454d9e8b23448940576", size = 12696208 },
- { url = "https://files.pythonhosted.org/packages/96/7e/1dd770ee68916ed358991ab62c2cc353ffd98d0b75b901d52183ca28e8bb/numpy-2.2.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b0531f0b0e07643eb089df4c509d30d72c9ef40defa53e41363eca8a8cc61495", size = 21047291 },
- { url = "https://files.pythonhosted.org/packages/d1/3c/ccd08578dc532a8e6927952339d4a02682b776d5e85be49ed0760308433e/numpy-2.2.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:e9e82dcb3f2ebbc8cb5ce1102d5f1c5ed236bf8a11730fb45ba82e2841ec21df", size = 6792494 },
- { url = "https://files.pythonhosted.org/packages/7c/28/8754b9aee4f97199f9a047f73bb644b5a2014994a6d7b061ba67134a42de/numpy-2.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d4142eb40ca6f94539e4db929410f2a46052a0fe7a2c1c59f6179c39938d2a", size = 16197312 },
- { url = "https://files.pythonhosted.org/packages/26/96/deb93f871f401045a684ca08a009382b247d14996d7a94fea6aa43c67b94/numpy-2.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:356ca982c188acbfa6af0d694284d8cf20e95b1c3d0aefa8929376fea9146f60", size = 12822674 },
-]
-
[[package]]
name = "orjson"
version = "3.10.15"
@@ -1286,95 +879,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 },
]
-[[package]]
-name = "propcache"
-version = "0.2.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/20/c8/2a13f78d82211490855b2fb303b6721348d0787fdd9a12ac46d99d3acde1/propcache-0.2.1.tar.gz", hash = "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64", size = 41735 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a7/a5/0ea64c9426959ef145a938e38c832fc551843481d356713ececa9a8a64e8/propcache-0.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6b3f39a85d671436ee3d12c017f8fdea38509e4f25b28eb25877293c98c243f6", size = 79296 },
- { url = "https://files.pythonhosted.org/packages/76/5a/916db1aba735f55e5eca4733eea4d1973845cf77dfe67c2381a2ca3ce52d/propcache-0.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d51fbe4285d5db5d92a929e3e21536ea3dd43732c5b177c7ef03f918dff9f2", size = 45622 },
- { url = "https://files.pythonhosted.org/packages/2d/62/685d3cf268b8401ec12b250b925b21d152b9d193b7bffa5fdc4815c392c2/propcache-0.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6445804cf4ec763dc70de65a3b0d9954e868609e83850a47ca4f0cb64bd79fea", size = 45133 },
- { url = "https://files.pythonhosted.org/packages/4d/3d/31c9c29ee7192defc05aa4d01624fd85a41cf98e5922aaed206017329944/propcache-0.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9479aa06a793c5aeba49ce5c5692ffb51fcd9a7016e017d555d5e2b0045d212", size = 204809 },
- { url = "https://files.pythonhosted.org/packages/10/a1/e4050776f4797fc86140ac9a480d5dc069fbfa9d499fe5c5d2fa1ae71f07/propcache-0.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9631c5e8b5b3a0fda99cb0d29c18133bca1e18aea9effe55adb3da1adef80d3", size = 219109 },
- { url = "https://files.pythonhosted.org/packages/c9/c0/e7ae0df76343d5e107d81e59acc085cea5fd36a48aa53ef09add7503e888/propcache-0.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3156628250f46a0895f1f36e1d4fbe062a1af8718ec3ebeb746f1d23f0c5dc4d", size = 217368 },
- { url = "https://files.pythonhosted.org/packages/fc/e1/e0a2ed6394b5772508868a977d3238f4afb2eebaf9976f0b44a8d347ad63/propcache-0.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b6fb63ae352e13748289f04f37868099e69dba4c2b3e271c46061e82c745634", size = 205124 },
- { url = "https://files.pythonhosted.org/packages/50/c1/e388c232d15ca10f233c778bbdc1034ba53ede14c207a72008de45b2db2e/propcache-0.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:887d9b0a65404929641a9fabb6452b07fe4572b269d901d622d8a34a4e9043b2", size = 195463 },
- { url = "https://files.pythonhosted.org/packages/0a/fd/71b349b9def426cc73813dbd0f33e266de77305e337c8c12bfb0a2a82bfb/propcache-0.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a96dc1fa45bd8c407a0af03b2d5218392729e1822b0c32e62c5bf7eeb5fb3958", size = 198358 },
- { url = "https://files.pythonhosted.org/packages/02/f2/d7c497cd148ebfc5b0ae32808e6c1af5922215fe38c7a06e4e722fe937c8/propcache-0.2.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a7e65eb5c003a303b94aa2c3852ef130230ec79e349632d030e9571b87c4698c", size = 195560 },
- { url = "https://files.pythonhosted.org/packages/bb/57/f37041bbe5e0dfed80a3f6be2612a3a75b9cfe2652abf2c99bef3455bbad/propcache-0.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:999779addc413181912e984b942fbcc951be1f5b3663cd80b2687758f434c583", size = 196895 },
- { url = "https://files.pythonhosted.org/packages/83/36/ae3cc3e4f310bff2f064e3d2ed5558935cc7778d6f827dce74dcfa125304/propcache-0.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:19a0f89a7bb9d8048d9c4370c9c543c396e894c76be5525f5e1ad287f1750ddf", size = 207124 },
- { url = "https://files.pythonhosted.org/packages/8c/c4/811b9f311f10ce9d31a32ff14ce58500458443627e4df4ae9c264defba7f/propcache-0.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1ac2f5fe02fa75f56e1ad473f1175e11f475606ec9bd0be2e78e4734ad575034", size = 210442 },
- { url = "https://files.pythonhosted.org/packages/18/dd/a1670d483a61ecac0d7fc4305d91caaac7a8fc1b200ea3965a01cf03bced/propcache-0.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:574faa3b79e8ebac7cb1d7930f51184ba1ccf69adfdec53a12f319a06030a68b", size = 203219 },
- { url = "https://files.pythonhosted.org/packages/f9/2d/30ced5afde41b099b2dc0c6573b66b45d16d73090e85655f1a30c5a24e07/propcache-0.2.1-cp310-cp310-win32.whl", hash = "sha256:03ff9d3f665769b2a85e6157ac8b439644f2d7fd17615a82fa55739bc97863f4", size = 40313 },
- { url = "https://files.pythonhosted.org/packages/23/84/bd9b207ac80da237af77aa6e153b08ffa83264b1c7882495984fcbfcf85c/propcache-0.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:2d3af2e79991102678f53e0dbf4c35de99b6b8b58f29a27ca0325816364caaba", size = 44428 },
- { url = "https://files.pythonhosted.org/packages/bc/0f/2913b6791ebefb2b25b4efd4bb2299c985e09786b9f5b19184a88e5778dd/propcache-0.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ffc3cca89bb438fb9c95c13fc874012f7b9466b89328c3c8b1aa93cdcfadd16", size = 79297 },
- { url = "https://files.pythonhosted.org/packages/cf/73/af2053aeccd40b05d6e19058419ac77674daecdd32478088b79375b9ab54/propcache-0.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f174bbd484294ed9fdf09437f889f95807e5f229d5d93588d34e92106fbf6717", size = 45611 },
- { url = "https://files.pythonhosted.org/packages/3c/09/8386115ba7775ea3b9537730e8cf718d83bbf95bffe30757ccf37ec4e5da/propcache-0.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:70693319e0b8fd35dd863e3e29513875eb15c51945bf32519ef52927ca883bc3", size = 45146 },
- { url = "https://files.pythonhosted.org/packages/03/7a/793aa12f0537b2e520bf09f4c6833706b63170a211ad042ca71cbf79d9cb/propcache-0.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b480c6a4e1138e1aa137c0079b9b6305ec6dcc1098a8ca5196283e8a49df95a9", size = 232136 },
- { url = "https://files.pythonhosted.org/packages/f1/38/b921b3168d72111769f648314100558c2ea1d52eb3d1ba7ea5c4aa6f9848/propcache-0.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d27b84d5880f6d8aa9ae3edb253c59d9f6642ffbb2c889b78b60361eed449787", size = 239706 },
- { url = "https://files.pythonhosted.org/packages/14/29/4636f500c69b5edea7786db3c34eb6166f3384b905665ce312a6e42c720c/propcache-0.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:857112b22acd417c40fa4595db2fe28ab900c8c5fe4670c7989b1c0230955465", size = 238531 },
- { url = "https://files.pythonhosted.org/packages/85/14/01fe53580a8e1734ebb704a3482b7829a0ef4ea68d356141cf0994d9659b/propcache-0.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf6c4150f8c0e32d241436526f3c3f9cbd34429492abddbada2ffcff506c51af", size = 231063 },
- { url = "https://files.pythonhosted.org/packages/33/5c/1d961299f3c3b8438301ccfbff0143b69afcc30c05fa28673cface692305/propcache-0.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66d4cfda1d8ed687daa4bc0274fcfd5267873db9a5bc0418c2da19273040eeb7", size = 220134 },
- { url = "https://files.pythonhosted.org/packages/00/d0/ed735e76db279ba67a7d3b45ba4c654e7b02bc2f8050671ec365d8665e21/propcache-0.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c2f992c07c0fca81655066705beae35fc95a2fa7366467366db627d9f2ee097f", size = 220009 },
- { url = "https://files.pythonhosted.org/packages/75/90/ee8fab7304ad6533872fee982cfff5a53b63d095d78140827d93de22e2d4/propcache-0.2.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:4a571d97dbe66ef38e472703067021b1467025ec85707d57e78711c085984e54", size = 212199 },
- { url = "https://files.pythonhosted.org/packages/eb/ec/977ffaf1664f82e90737275873461695d4c9407d52abc2f3c3e24716da13/propcache-0.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bb6178c241278d5fe853b3de743087be7f5f4c6f7d6d22a3b524d323eecec505", size = 214827 },
- { url = "https://files.pythonhosted.org/packages/57/48/031fb87ab6081764054821a71b71942161619549396224cbb242922525e8/propcache-0.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ad1af54a62ffe39cf34db1aa6ed1a1873bd548f6401db39d8e7cd060b9211f82", size = 228009 },
- { url = "https://files.pythonhosted.org/packages/1a/06/ef1390f2524850838f2390421b23a8b298f6ce3396a7cc6d39dedd4047b0/propcache-0.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e7048abd75fe40712005bcfc06bb44b9dfcd8e101dda2ecf2f5aa46115ad07ca", size = 231638 },
- { url = "https://files.pythonhosted.org/packages/38/2a/101e6386d5a93358395da1d41642b79c1ee0f3b12e31727932b069282b1d/propcache-0.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:160291c60081f23ee43d44b08a7e5fb76681221a8e10b3139618c5a9a291b84e", size = 222788 },
- { url = "https://files.pythonhosted.org/packages/db/81/786f687951d0979007e05ad9346cd357e50e3d0b0f1a1d6074df334b1bbb/propcache-0.2.1-cp311-cp311-win32.whl", hash = "sha256:819ce3b883b7576ca28da3861c7e1a88afd08cc8c96908e08a3f4dd64a228034", size = 40170 },
- { url = "https://files.pythonhosted.org/packages/cf/59/7cc7037b295d5772eceb426358bb1b86e6cab4616d971bd74275395d100d/propcache-0.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:edc9fc7051e3350643ad929df55c451899bb9ae6d24998a949d2e4c87fb596d3", size = 44404 },
- { url = "https://files.pythonhosted.org/packages/4c/28/1d205fe49be8b1b4df4c50024e62480a442b1a7b818e734308bb0d17e7fb/propcache-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a", size = 79588 },
- { url = "https://files.pythonhosted.org/packages/21/ee/fc4d893f8d81cd4971affef2a6cb542b36617cd1d8ce56b406112cb80bf7/propcache-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0", size = 45825 },
- { url = "https://files.pythonhosted.org/packages/4a/de/bbe712f94d088da1d237c35d735f675e494a816fd6f54e9db2f61ef4d03f/propcache-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d", size = 45357 },
- { url = "https://files.pythonhosted.org/packages/7f/14/7ae06a6cf2a2f1cb382586d5a99efe66b0b3d0c6f9ac2f759e6f7af9d7cf/propcache-0.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4", size = 241869 },
- { url = "https://files.pythonhosted.org/packages/cc/59/227a78be960b54a41124e639e2c39e8807ac0c751c735a900e21315f8c2b/propcache-0.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d", size = 247884 },
- { url = "https://files.pythonhosted.org/packages/84/58/f62b4ffaedf88dc1b17f04d57d8536601e4e030feb26617228ef930c3279/propcache-0.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5", size = 248486 },
- { url = "https://files.pythonhosted.org/packages/1c/07/ebe102777a830bca91bbb93e3479cd34c2ca5d0361b83be9dbd93104865e/propcache-0.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24", size = 243649 },
- { url = "https://files.pythonhosted.org/packages/ed/bc/4f7aba7f08f520376c4bb6a20b9a981a581b7f2e385fa0ec9f789bb2d362/propcache-0.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff", size = 229103 },
- { url = "https://files.pythonhosted.org/packages/fe/d5/04ac9cd4e51a57a96f78795e03c5a0ddb8f23ec098b86f92de028d7f2a6b/propcache-0.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f", size = 226607 },
- { url = "https://files.pythonhosted.org/packages/e3/f0/24060d959ea41d7a7cc7fdbf68b31852331aabda914a0c63bdb0e22e96d6/propcache-0.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec", size = 221153 },
- { url = "https://files.pythonhosted.org/packages/77/a7/3ac76045a077b3e4de4859a0753010765e45749bdf53bd02bc4d372da1a0/propcache-0.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348", size = 222151 },
- { url = "https://files.pythonhosted.org/packages/e7/af/5e29da6f80cebab3f5a4dcd2a3240e7f56f2c4abf51cbfcc99be34e17f0b/propcache-0.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6", size = 233812 },
- { url = "https://files.pythonhosted.org/packages/8c/89/ebe3ad52642cc5509eaa453e9f4b94b374d81bae3265c59d5c2d98efa1b4/propcache-0.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6", size = 238829 },
- { url = "https://files.pythonhosted.org/packages/e9/2f/6b32f273fa02e978b7577159eae7471b3cfb88b48563b1c2578b2d7ca0bb/propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518", size = 230704 },
- { url = "https://files.pythonhosted.org/packages/5c/2e/f40ae6ff5624a5f77edd7b8359b208b5455ea113f68309e2b00a2e1426b6/propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246", size = 40050 },
- { url = "https://files.pythonhosted.org/packages/3b/77/a92c3ef994e47180862b9d7d11e37624fb1c00a16d61faf55115d970628b/propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1", size = 44117 },
- { url = "https://files.pythonhosted.org/packages/0f/2a/329e0547cf2def8857157f9477669043e75524cc3e6251cef332b3ff256f/propcache-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc", size = 77002 },
- { url = "https://files.pythonhosted.org/packages/12/2d/c4df5415e2382f840dc2ecbca0eeb2293024bc28e57a80392f2012b4708c/propcache-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9", size = 44639 },
- { url = "https://files.pythonhosted.org/packages/d0/5a/21aaa4ea2f326edaa4e240959ac8b8386ea31dedfdaa636a3544d9e7a408/propcache-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439", size = 44049 },
- { url = "https://files.pythonhosted.org/packages/4e/3e/021b6cd86c0acc90d74784ccbb66808b0bd36067a1bf3e2deb0f3845f618/propcache-0.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536", size = 224819 },
- { url = "https://files.pythonhosted.org/packages/3c/57/c2fdeed1b3b8918b1770a133ba5c43ad3d78e18285b0c06364861ef5cc38/propcache-0.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629", size = 229625 },
- { url = "https://files.pythonhosted.org/packages/9d/81/70d4ff57bf2877b5780b466471bebf5892f851a7e2ca0ae7ffd728220281/propcache-0.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b", size = 232934 },
- { url = "https://files.pythonhosted.org/packages/3c/b9/bb51ea95d73b3fb4100cb95adbd4e1acaf2cbb1fd1083f5468eeb4a099a8/propcache-0.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052", size = 227361 },
- { url = "https://files.pythonhosted.org/packages/f1/20/3c6d696cd6fd70b29445960cc803b1851a1131e7a2e4ee261ee48e002bcd/propcache-0.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce", size = 213904 },
- { url = "https://files.pythonhosted.org/packages/a1/cb/1593bfc5ac6d40c010fa823f128056d6bc25b667f5393781e37d62f12005/propcache-0.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d", size = 212632 },
- { url = "https://files.pythonhosted.org/packages/6d/5c/e95617e222be14a34c709442a0ec179f3207f8a2b900273720501a70ec5e/propcache-0.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce", size = 207897 },
- { url = "https://files.pythonhosted.org/packages/8e/3b/56c5ab3dc00f6375fbcdeefdede5adf9bee94f1fab04adc8db118f0f9e25/propcache-0.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95", size = 208118 },
- { url = "https://files.pythonhosted.org/packages/86/25/d7ef738323fbc6ebcbce33eb2a19c5e07a89a3df2fded206065bd5e868a9/propcache-0.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf", size = 217851 },
- { url = "https://files.pythonhosted.org/packages/b3/77/763e6cef1852cf1ba740590364ec50309b89d1c818e3256d3929eb92fabf/propcache-0.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f", size = 222630 },
- { url = "https://files.pythonhosted.org/packages/4f/e9/0f86be33602089c701696fbed8d8c4c07b6ee9605c5b7536fd27ed540c5b/propcache-0.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30", size = 216269 },
- { url = "https://files.pythonhosted.org/packages/cc/02/5ac83217d522394b6a2e81a2e888167e7ca629ef6569a3f09852d6dcb01a/propcache-0.2.1-cp313-cp313-win32.whl", hash = "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6", size = 39472 },
- { url = "https://files.pythonhosted.org/packages/f4/33/d6f5420252a36034bc8a3a01171bc55b4bff5df50d1c63d9caa50693662f/propcache-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1", size = 43363 },
- { url = "https://files.pythonhosted.org/packages/0a/08/6ab7f65240a16fa01023125e65258acf7e4884f483f267cdd6fcc48f37db/propcache-0.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6a9a8c34fb7bb609419a211e59da8887eeca40d300b5ea8e56af98f6fbbb1541", size = 80403 },
- { url = "https://files.pythonhosted.org/packages/34/fe/e7180285e21b4e6dff7d311fdf22490c9146a09a02834b5232d6248c6004/propcache-0.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae1aa1cd222c6d205853b3013c69cd04515f9d6ab6de4b0603e2e1c33221303e", size = 46152 },
- { url = "https://files.pythonhosted.org/packages/9c/36/aa74d884af826030ba9cee2ac109b0664beb7e9449c315c9c44db99efbb3/propcache-0.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:accb6150ce61c9c4b7738d45550806aa2b71c7668c6942f17b0ac182b6142fd4", size = 45674 },
- { url = "https://files.pythonhosted.org/packages/22/59/6fe80a3fe7720f715f2c0f6df250dacbd7cad42832410dbd84c719c52f78/propcache-0.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eee736daafa7af6d0a2dc15cc75e05c64f37fc37bafef2e00d77c14171c2097", size = 207792 },
- { url = "https://files.pythonhosted.org/packages/4a/68/584cd51dd8f4d0f5fff5b128ce0cdb257cde903898eecfb92156bbc2c780/propcache-0.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7a31fc1e1bd362874863fdeed71aed92d348f5336fd84f2197ba40c59f061bd", size = 223280 },
- { url = "https://files.pythonhosted.org/packages/85/cb/4c3528460c41e61b06ec3f970c0f89f87fa21f63acac8642ed81a886c164/propcache-0.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba4cfa1052819d16699e1d55d18c92b6e094d4517c41dd231a8b9f87b6fa681", size = 221293 },
- { url = "https://files.pythonhosted.org/packages/69/c0/560e050aa6d31eeece3490d1174da508f05ab27536dfc8474af88b97160a/propcache-0.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f089118d584e859c62b3da0892b88a83d611c2033ac410e929cb6754eec0ed16", size = 208259 },
- { url = "https://files.pythonhosted.org/packages/0c/87/d6c86a77632eb1ba86a328e3313159f246e7564cb5951e05ed77555826a0/propcache-0.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:781e65134efaf88feb447e8c97a51772aa75e48b794352f94cb7ea717dedda0d", size = 198632 },
- { url = "https://files.pythonhosted.org/packages/3a/2b/3690ea7b662dc762ab7af5f3ef0e2d7513c823d193d7b2a1b4cda472c2be/propcache-0.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31f5af773530fd3c658b32b6bdc2d0838543de70eb9a2156c03e410f7b0d3aae", size = 203516 },
- { url = "https://files.pythonhosted.org/packages/4d/b5/afe716c16c23c77657185c257a41918b83e03993b6ccdfa748e5e7d328e9/propcache-0.2.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a7a078f5d37bee6690959c813977da5291b24286e7b962e62a94cec31aa5188b", size = 199402 },
- { url = "https://files.pythonhosted.org/packages/a4/c0/2d2df3aa7f8660d0d4cc4f1e00490c48d5958da57082e70dea7af366f876/propcache-0.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cea7daf9fc7ae6687cf1e2c049752f19f146fdc37c2cc376e7d0032cf4f25347", size = 200528 },
- { url = "https://files.pythonhosted.org/packages/21/c8/65ac9142f5e40c8497f7176e71d18826b09e06dd4eb401c9a4ee41aa9c74/propcache-0.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b3489ff1ed1e8315674d0775dc7d2195fb13ca17b3808721b54dbe9fd020faf", size = 211254 },
- { url = "https://files.pythonhosted.org/packages/09/e4/edb70b447a1d8142df51ec7511e84aa64d7f6ce0a0fdf5eb55363cdd0935/propcache-0.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9403db39be1393618dd80c746cb22ccda168efce239c73af13c3763ef56ffc04", size = 214589 },
- { url = "https://files.pythonhosted.org/packages/cb/02/817f309ec8d8883287781d6d9390f80b14db6e6de08bc659dfe798a825c2/propcache-0.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5d97151bc92d2b2578ff7ce779cdb9174337390a535953cbb9452fb65164c587", size = 207283 },
- { url = "https://files.pythonhosted.org/packages/d7/fe/2d18612096ed2212cfef821b6fccdba5d52efc1d64511c206c5c16be28fd/propcache-0.2.1-cp39-cp39-win32.whl", hash = "sha256:9caac6b54914bdf41bcc91e7eb9147d331d29235a7c967c150ef5df6464fd1bb", size = 40866 },
- { url = "https://files.pythonhosted.org/packages/24/2e/b5134802e7b57c403c7b73c7a39374e7a6b7f128d1968b4a4b4c0b700250/propcache-0.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:92fc4500fcb33899b05ba73276dfb684a20d31caa567b7cb5252d48f896a91b1", size = 44975 },
- { url = "https://files.pythonhosted.org/packages/41/b6/c5319caea262f4821995dca2107483b94a3345d4607ad797c76cb9c36bcc/propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54", size = 11818 },
-]
-
[[package]]
name = "pycparser"
version = "2.22"
@@ -1901,100 +1405,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067 },
]
-[[package]]
-name = "yarl"
-version = "1.18.3"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "idna" },
- { name = "multidict" },
- { name = "propcache" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b7/9d/4b94a8e6d2b51b599516a5cb88e5bc99b4d8d4583e468057eaa29d5f0918/yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1", size = 181062 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d2/98/e005bc608765a8a5569f58e650961314873c8469c333616eb40bff19ae97/yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34", size = 141458 },
- { url = "https://files.pythonhosted.org/packages/df/5d/f8106b263b8ae8a866b46d9be869ac01f9b3fb7f2325f3ecb3df8003f796/yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7", size = 94365 },
- { url = "https://files.pythonhosted.org/packages/56/3e/d8637ddb9ba69bf851f765a3ee288676f7cf64fb3be13760c18cbc9d10bd/yarl-1.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed", size = 92181 },
- { url = "https://files.pythonhosted.org/packages/76/f9/d616a5c2daae281171de10fba41e1c0e2d8207166fc3547252f7d469b4e1/yarl-1.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde", size = 315349 },
- { url = "https://files.pythonhosted.org/packages/bb/b4/3ea5e7b6f08f698b3769a06054783e434f6d59857181b5c4e145de83f59b/yarl-1.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b", size = 330494 },
- { url = "https://files.pythonhosted.org/packages/55/f1/e0fc810554877b1b67420568afff51b967baed5b53bcc983ab164eebf9c9/yarl-1.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5", size = 326927 },
- { url = "https://files.pythonhosted.org/packages/a9/42/b1753949b327b36f210899f2dd0a0947c0c74e42a32de3f8eb5c7d93edca/yarl-1.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc", size = 319703 },
- { url = "https://files.pythonhosted.org/packages/f0/6d/e87c62dc9635daefb064b56f5c97df55a2e9cc947a2b3afd4fd2f3b841c7/yarl-1.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd", size = 310246 },
- { url = "https://files.pythonhosted.org/packages/e3/ef/e2e8d1785cdcbd986f7622d7f0098205f3644546da7919c24b95790ec65a/yarl-1.18.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990", size = 319730 },
- { url = "https://files.pythonhosted.org/packages/fc/15/8723e22345bc160dfde68c4b3ae8b236e868f9963c74015f1bc8a614101c/yarl-1.18.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db", size = 321681 },
- { url = "https://files.pythonhosted.org/packages/86/09/bf764e974f1516efa0ae2801494a5951e959f1610dd41edbfc07e5e0f978/yarl-1.18.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62", size = 324812 },
- { url = "https://files.pythonhosted.org/packages/f6/4c/20a0187e3b903c97d857cf0272d687c1b08b03438968ae8ffc50fe78b0d6/yarl-1.18.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760", size = 337011 },
- { url = "https://files.pythonhosted.org/packages/c9/71/6244599a6e1cc4c9f73254a627234e0dad3883ece40cc33dce6265977461/yarl-1.18.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b", size = 338132 },
- { url = "https://files.pythonhosted.org/packages/af/f5/e0c3efaf74566c4b4a41cb76d27097df424052a064216beccae8d303c90f/yarl-1.18.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690", size = 331849 },
- { url = "https://files.pythonhosted.org/packages/8a/b8/3d16209c2014c2f98a8f658850a57b716efb97930aebf1ca0d9325933731/yarl-1.18.3-cp310-cp310-win32.whl", hash = "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6", size = 84309 },
- { url = "https://files.pythonhosted.org/packages/fd/b7/2e9a5b18eb0fe24c3a0e8bae994e812ed9852ab4fd067c0107fadde0d5f0/yarl-1.18.3-cp310-cp310-win_amd64.whl", hash = "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8", size = 90484 },
- { url = "https://files.pythonhosted.org/packages/40/93/282b5f4898d8e8efaf0790ba6d10e2245d2c9f30e199d1a85cae9356098c/yarl-1.18.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069", size = 141555 },
- { url = "https://files.pythonhosted.org/packages/6d/9c/0a49af78df099c283ca3444560f10718fadb8a18dc8b3edf8c7bd9fd7d89/yarl-1.18.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193", size = 94351 },
- { url = "https://files.pythonhosted.org/packages/5a/a1/205ab51e148fdcedad189ca8dd587794c6f119882437d04c33c01a75dece/yarl-1.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889", size = 92286 },
- { url = "https://files.pythonhosted.org/packages/ed/fe/88b690b30f3f59275fb674f5f93ddd4a3ae796c2b62e5bb9ece8a4914b83/yarl-1.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8", size = 340649 },
- { url = "https://files.pythonhosted.org/packages/07/eb/3b65499b568e01f36e847cebdc8d7ccb51fff716dbda1ae83c3cbb8ca1c9/yarl-1.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca", size = 356623 },
- { url = "https://files.pythonhosted.org/packages/33/46/f559dc184280b745fc76ec6b1954de2c55595f0ec0a7614238b9ebf69618/yarl-1.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8", size = 354007 },
- { url = "https://files.pythonhosted.org/packages/af/ba/1865d85212351ad160f19fb99808acf23aab9a0f8ff31c8c9f1b4d671fc9/yarl-1.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae", size = 344145 },
- { url = "https://files.pythonhosted.org/packages/94/cb/5c3e975d77755d7b3d5193e92056b19d83752ea2da7ab394e22260a7b824/yarl-1.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3", size = 336133 },
- { url = "https://files.pythonhosted.org/packages/19/89/b77d3fd249ab52a5c40859815765d35c91425b6bb82e7427ab2f78f5ff55/yarl-1.18.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb", size = 347967 },
- { url = "https://files.pythonhosted.org/packages/35/bd/f6b7630ba2cc06c319c3235634c582a6ab014d52311e7d7c22f9518189b5/yarl-1.18.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e", size = 346397 },
- { url = "https://files.pythonhosted.org/packages/18/1a/0b4e367d5a72d1f095318344848e93ea70da728118221f84f1bf6c1e39e7/yarl-1.18.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59", size = 350206 },
- { url = "https://files.pythonhosted.org/packages/b5/cf/320fff4367341fb77809a2d8d7fe75b5d323a8e1b35710aafe41fdbf327b/yarl-1.18.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d", size = 362089 },
- { url = "https://files.pythonhosted.org/packages/57/cf/aadba261d8b920253204085268bad5e8cdd86b50162fcb1b10c10834885a/yarl-1.18.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e", size = 366267 },
- { url = "https://files.pythonhosted.org/packages/54/58/fb4cadd81acdee6dafe14abeb258f876e4dd410518099ae9a35c88d8097c/yarl-1.18.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a", size = 359141 },
- { url = "https://files.pythonhosted.org/packages/9a/7a/4c571597589da4cd5c14ed2a0b17ac56ec9ee7ee615013f74653169e702d/yarl-1.18.3-cp311-cp311-win32.whl", hash = "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1", size = 84402 },
- { url = "https://files.pythonhosted.org/packages/ae/7b/8600250b3d89b625f1121d897062f629883c2f45339623b69b1747ec65fa/yarl-1.18.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5", size = 91030 },
- { url = "https://files.pythonhosted.org/packages/33/85/bd2e2729752ff4c77338e0102914897512e92496375e079ce0150a6dc306/yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50", size = 142644 },
- { url = "https://files.pythonhosted.org/packages/ff/74/1178322cc0f10288d7eefa6e4a85d8d2e28187ccab13d5b844e8b5d7c88d/yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576", size = 94962 },
- { url = "https://files.pythonhosted.org/packages/be/75/79c6acc0261e2c2ae8a1c41cf12265e91628c8c58ae91f5ff59e29c0787f/yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640", size = 92795 },
- { url = "https://files.pythonhosted.org/packages/6b/32/927b2d67a412c31199e83fefdce6e645247b4fb164aa1ecb35a0f9eb2058/yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2", size = 332368 },
- { url = "https://files.pythonhosted.org/packages/19/e5/859fca07169d6eceeaa4fde1997c91d8abde4e9a7c018e371640c2da2b71/yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75", size = 342314 },
- { url = "https://files.pythonhosted.org/packages/08/75/76b63ccd91c9e03ab213ef27ae6add2e3400e77e5cdddf8ed2dbc36e3f21/yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512", size = 341987 },
- { url = "https://files.pythonhosted.org/packages/1a/e1/a097d5755d3ea8479a42856f51d97eeff7a3a7160593332d98f2709b3580/yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba", size = 336914 },
- { url = "https://files.pythonhosted.org/packages/0b/42/e1b4d0e396b7987feceebe565286c27bc085bf07d61a59508cdaf2d45e63/yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb", size = 325765 },
- { url = "https://files.pythonhosted.org/packages/7e/18/03a5834ccc9177f97ca1bbb245b93c13e58e8225276f01eedc4cc98ab820/yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272", size = 344444 },
- { url = "https://files.pythonhosted.org/packages/c8/03/a713633bdde0640b0472aa197b5b86e90fbc4c5bc05b727b714cd8a40e6d/yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6", size = 340760 },
- { url = "https://files.pythonhosted.org/packages/eb/99/f6567e3f3bbad8fd101886ea0276c68ecb86a2b58be0f64077396cd4b95e/yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e", size = 346484 },
- { url = "https://files.pythonhosted.org/packages/8e/a9/84717c896b2fc6cb15bd4eecd64e34a2f0a9fd6669e69170c73a8b46795a/yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb", size = 359864 },
- { url = "https://files.pythonhosted.org/packages/1e/2e/d0f5f1bef7ee93ed17e739ec8dbcb47794af891f7d165fa6014517b48169/yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393", size = 364537 },
- { url = "https://files.pythonhosted.org/packages/97/8a/568d07c5d4964da5b02621a517532adb8ec5ba181ad1687191fffeda0ab6/yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285", size = 357861 },
- { url = "https://files.pythonhosted.org/packages/7d/e3/924c3f64b6b3077889df9a1ece1ed8947e7b61b0a933f2ec93041990a677/yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2", size = 84097 },
- { url = "https://files.pythonhosted.org/packages/34/45/0e055320daaabfc169b21ff6174567b2c910c45617b0d79c68d7ab349b02/yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477", size = 90399 },
- { url = "https://files.pythonhosted.org/packages/30/c7/c790513d5328a8390be8f47be5d52e141f78b66c6c48f48d241ca6bd5265/yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb", size = 140789 },
- { url = "https://files.pythonhosted.org/packages/30/aa/a2f84e93554a578463e2edaaf2300faa61c8701f0898725842c704ba5444/yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa", size = 94144 },
- { url = "https://files.pythonhosted.org/packages/c6/fc/d68d8f83714b221a85ce7866832cba36d7c04a68fa6a960b908c2c84f325/yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782", size = 91974 },
- { url = "https://files.pythonhosted.org/packages/56/4e/d2563d8323a7e9a414b5b25341b3942af5902a2263d36d20fb17c40411e2/yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0", size = 333587 },
- { url = "https://files.pythonhosted.org/packages/25/c9/cfec0bc0cac8d054be223e9f2c7909d3e8442a856af9dbce7e3442a8ec8d/yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482", size = 344386 },
- { url = "https://files.pythonhosted.org/packages/ab/5d/4c532190113b25f1364d25f4c319322e86232d69175b91f27e3ebc2caf9a/yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186", size = 345421 },
- { url = "https://files.pythonhosted.org/packages/23/d1/6cdd1632da013aa6ba18cee4d750d953104a5e7aac44e249d9410a972bf5/yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58", size = 339384 },
- { url = "https://files.pythonhosted.org/packages/9a/c4/6b3c39bec352e441bd30f432cda6ba51681ab19bb8abe023f0d19777aad1/yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53", size = 326689 },
- { url = "https://files.pythonhosted.org/packages/23/30/07fb088f2eefdc0aa4fc1af4e3ca4eb1a3aadd1ce7d866d74c0f124e6a85/yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2", size = 345453 },
- { url = "https://files.pythonhosted.org/packages/63/09/d54befb48f9cd8eec43797f624ec37783a0266855f4930a91e3d5c7717f8/yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8", size = 341872 },
- { url = "https://files.pythonhosted.org/packages/91/26/fd0ef9bf29dd906a84b59f0cd1281e65b0c3e08c6aa94b57f7d11f593518/yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1", size = 347497 },
- { url = "https://files.pythonhosted.org/packages/d9/b5/14ac7a256d0511b2ac168d50d4b7d744aea1c1aa20c79f620d1059aab8b2/yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a", size = 359981 },
- { url = "https://files.pythonhosted.org/packages/ca/b3/d493221ad5cbd18bc07e642894030437e405e1413c4236dd5db6e46bcec9/yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10", size = 366229 },
- { url = "https://files.pythonhosted.org/packages/04/56/6a3e2a5d9152c56c346df9b8fb8edd2c8888b1e03f96324d457e5cf06d34/yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8", size = 360383 },
- { url = "https://files.pythonhosted.org/packages/fd/b7/4b3c7c7913a278d445cc6284e59b2e62fa25e72758f888b7a7a39eb8423f/yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d", size = 310152 },
- { url = "https://files.pythonhosted.org/packages/f5/d5/688db678e987c3e0fb17867970700b92603cadf36c56e5fb08f23e822a0c/yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c", size = 315723 },
- { url = "https://files.pythonhosted.org/packages/6a/3b/fec4b08f5e88f68e56ee698a59284a73704df2e0e0b5bdf6536c86e76c76/yarl-1.18.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04", size = 142780 },
- { url = "https://files.pythonhosted.org/packages/ed/85/796b0d6a22d536ec8e14bdbb86519250bad980cec450b6e299b1c2a9079e/yarl-1.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719", size = 94981 },
- { url = "https://files.pythonhosted.org/packages/ee/0e/a830fd2238f7a29050f6dd0de748b3d6f33a7dbb67dbbc081a970b2bbbeb/yarl-1.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e", size = 92789 },
- { url = "https://files.pythonhosted.org/packages/0f/4f/438c9fd668954779e48f08c0688ee25e0673380a21bb1e8ccc56de5b55d7/yarl-1.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee", size = 317327 },
- { url = "https://files.pythonhosted.org/packages/bd/79/a78066f06179b4ed4581186c136c12fcfb928c475cbeb23743e71a991935/yarl-1.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789", size = 336999 },
- { url = "https://files.pythonhosted.org/packages/55/02/527963cf65f34a06aed1e766ff9a3b3e7d0eaa1c90736b2948a62e528e1d/yarl-1.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8", size = 331693 },
- { url = "https://files.pythonhosted.org/packages/a2/2a/167447ae39252ba624b98b8c13c0ba35994d40d9110e8a724c83dbbb5822/yarl-1.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c", size = 321473 },
- { url = "https://files.pythonhosted.org/packages/55/03/07955fabb20082373be311c91fd78abe458bc7ff9069d34385e8bddad20e/yarl-1.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5", size = 313571 },
- { url = "https://files.pythonhosted.org/packages/95/e2/67c8d3ec58a8cd8ddb1d63bd06eb7e7b91c9f148707a3eeb5a7ed87df0ef/yarl-1.18.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1", size = 325004 },
- { url = "https://files.pythonhosted.org/packages/06/43/51ceb3e427368fe6ccd9eccd162be227fd082523e02bad1fd3063daf68da/yarl-1.18.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24", size = 322677 },
- { url = "https://files.pythonhosted.org/packages/e4/0e/7ef286bfb23267739a703f7b967a858e2128c10bea898de8fa027e962521/yarl-1.18.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318", size = 332806 },
- { url = "https://files.pythonhosted.org/packages/c8/94/2d1f060f4bfa47c8bd0bcb652bfe71fba881564bcac06ebb6d8ced9ac3bc/yarl-1.18.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985", size = 339919 },
- { url = "https://files.pythonhosted.org/packages/8e/8d/73b5f9a6ab69acddf1ca1d5e7bc92f50b69124512e6c26b36844531d7f23/yarl-1.18.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910", size = 340960 },
- { url = "https://files.pythonhosted.org/packages/41/13/ce6bc32be4476b60f4f8694831f49590884b2c975afcffc8d533bf2be7ec/yarl-1.18.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1", size = 336592 },
- { url = "https://files.pythonhosted.org/packages/81/d5/6e0460292d6299ac3919945f912b16b104f4e81ab20bf53e0872a1296daf/yarl-1.18.3-cp39-cp39-win32.whl", hash = "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5", size = 84833 },
- { url = "https://files.pythonhosted.org/packages/b2/fc/a8aef69156ad5508165d8ae956736d55c3a68890610834bd985540966008/yarl-1.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9", size = 90968 },
- { url = "https://files.pythonhosted.org/packages/f5/4b/a06e0ec3d155924f77835ed2d167ebd3b211a7b0853da1cf8d8414d784ef/yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b", size = 45109 },
-]
-
[[package]]
name = "zstandard"
version = "0.23.0"
diff --git a/libs/community/langchain_community/agent_toolkits/jira/toolkit.py b/libs/community/langchain_community/agent_toolkits/jira/toolkit.py
index ad585b2e491..204a11d6a2d 100644
--- a/libs/community/langchain_community/agent_toolkits/jira/toolkit.py
+++ b/libs/community/langchain_community/agent_toolkits/jira/toolkit.py
@@ -43,27 +43,27 @@ class JiraToolkit(BaseToolkit):
operations: List[Dict] = [
{
"mode": "jql",
- "name": "JQL Query",
+ "name": "jql_query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
- "name": "Get Projects",
+ "name": "get_projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
- "name": "Create Issue",
+ "name": "create_issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
- "name": "Catch all Jira API call",
+ "name": "catch_all_jira_api",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
- "name": "Create confluence page",
+ "name": "create_confluence_page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
diff --git a/libs/community/langchain_community/chat_models/perplexity.py b/libs/community/langchain_community/chat_models/perplexity.py
index 0e5ea613447..0266244223d 100644
--- a/libs/community/langchain_community/chat_models/perplexity.py
+++ b/libs/community/langchain_community/chat_models/perplexity.py
@@ -38,6 +38,7 @@ from langchain_core.messages import (
SystemMessageChunk,
ToolMessageChunk,
)
+from langchain_core.messages.ai import UsageMetadata
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
@@ -59,6 +60,17 @@ def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and is_basemodel_subclass(obj)
+def _create_usage_metadata(token_usage: dict) -> UsageMetadata:
+ input_tokens = token_usage.get("prompt_tokens", 0)
+ output_tokens = token_usage.get("completion_tokens", 0)
+ total_tokens = token_usage.get("total_tokens", input_tokens + output_tokens)
+ return UsageMetadata(
+ input_tokens=input_tokens,
+ output_tokens=output_tokens,
+ total_tokens=total_tokens,
+ )
+
+
class ChatPerplexity(BaseChatModel):
"""`Perplexity AI` Chat models API.
@@ -238,9 +250,27 @@ class ChatPerplexity(BaseChatModel):
messages=message_dicts, stream=True, **params
)
first_chunk = True
+ prev_total_usage: Optional[UsageMetadata] = None
for chunk in stream_resp:
if not isinstance(chunk, dict):
chunk = chunk.dict()
+ # Collect standard usage metadata (transform from aggregate to delta)
+ if total_usage := chunk.get("usage"):
+ lc_total_usage = _create_usage_metadata(total_usage)
+ if prev_total_usage:
+ usage_metadata: Optional[UsageMetadata] = {
+ "input_tokens": lc_total_usage["input_tokens"]
+ - prev_total_usage["input_tokens"],
+ "output_tokens": lc_total_usage["output_tokens"]
+ - prev_total_usage["output_tokens"],
+ "total_tokens": lc_total_usage["total_tokens"]
+ - prev_total_usage["total_tokens"],
+ }
+ else:
+ usage_metadata = lc_total_usage
+ prev_total_usage = lc_total_usage
+ else:
+ usage_metadata = None
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
@@ -249,6 +279,8 @@ class ChatPerplexity(BaseChatModel):
chunk = self._convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
+ if isinstance(chunk, AIMessageChunk) and usage_metadata:
+ chunk.usage_metadata = usage_metadata
if first_chunk:
chunk.additional_kwargs |= {"citations": citations}
first_chunk = False
@@ -278,9 +310,15 @@ class ChatPerplexity(BaseChatModel):
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.client.chat.completions.create(messages=message_dicts, **params)
+ if usage := getattr(response, "usage", None):
+ usage_metadata = _create_usage_metadata(usage.model_dump())
+ else:
+ usage_metadata = None
+
message = AIMessage(
content=response.choices[0].message.content,
additional_kwargs={"citations": response.citations},
+ usage_metadata=usage_metadata,
)
return ChatResult(generations=[ChatGeneration(message=message)])
diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py
index acb099b18b8..4c44272f271 100644
--- a/libs/community/langchain_community/chat_models/tongyi.py
+++ b/libs/community/langchain_community/chat_models/tongyi.py
@@ -123,6 +123,8 @@ def convert_dict_to_message(
tool_calls.append(parsed_tool)
except Exception as e:
invalid_tool_calls.append(make_invalid_tool_call(value, str(e)))
+ elif "partial" in _dict and isinstance(_dict["partial"], bool):
+ additional_kwargs = {"partial": _dict["partial"]}
else:
additional_kwargs = {}
@@ -204,6 +206,9 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
message_dict = {"role": "assistant", "content": message.content}
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
+ # support Partial Mode for text continuation
+ if "partial" in message.additional_kwargs:
+ message_dict["partial"] = message.additional_kwargs["partial"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ToolMessage):
@@ -778,8 +783,6 @@ class ChatTongyi(BaseChatModel):
]
if len(system_message_indices) == 1 and system_message_indices[0] != 0:
raise ValueError("System message can only be the first message.")
- elif len(system_message_indices) > 1:
- raise ValueError("There can be only one system message at most.")
params["messages"] = message_dicts
diff --git a/libs/community/langchain_community/graph_vectorstores/__init__.py b/libs/community/langchain_community/graph_vectorstores/__init__.py
index ad044624b1b..b01e20a2d68 100644
--- a/libs/community/langchain_community/graph_vectorstores/__init__.py
+++ b/libs/community/langchain_community/graph_vectorstores/__init__.py
@@ -34,6 +34,7 @@ Example::
- :class:`How to link Documents on hyperlinks in HTML `
- :class:`How to link Documents on common keywords (using KeyBERT) `
- :class:`How to link Documents on common named entities (using GliNER) `
+ - `langchain-jieba: link extraction tailored for Chinese language `_
Get started
-----------
diff --git a/libs/community/langchain_community/llms/tongyi.py b/libs/community/langchain_community/llms/tongyi.py
index e7289955f66..ade4d502a31 100644
--- a/libs/community/langchain_community/llms/tongyi.py
+++ b/libs/community/langchain_community/llms/tongyi.py
@@ -59,6 +59,7 @@ def check_response(resp: Any) -> Any:
return resp
elif resp["status_code"] in [400, 401]:
raise ValueError(
+ f"request_id: {resp['request_id']} \n "
f"status_code: {resp['status_code']} \n "
f"code: {resp['code']} \n message: {resp['message']}"
)
diff --git a/libs/community/langchain_community/utilities/jira.py b/libs/community/langchain_community/utilities/jira.py
index 1d9adb1d073..ad8258e363d 100644
--- a/libs/community/langchain_community/utilities/jira.py
+++ b/libs/community/langchain_community/utilities/jira.py
@@ -40,6 +40,9 @@ class JiraAPIWrapper(BaseModel):
)
values["jira_instance_url"] = jira_instance_url
+ if "jira_cloud" in values and values["jira_cloud"] is not None:
+ values["jira_cloud"] = str(values["jira_cloud"])
+
jira_cloud_str = get_from_dict_or_env(values, "jira_cloud", "JIRA_CLOUD")
jira_cloud = jira_cloud_str.lower() == "true"
values["jira_cloud"] = jira_cloud
diff --git a/libs/community/langchain_community/vectorstores/lancedb.py b/libs/community/langchain_community/vectorstores/lancedb.py
index 11cc955cac4..d000380053c 100644
--- a/libs/community/langchain_community/vectorstores/lancedb.py
+++ b/libs/community/langchain_community/vectorstores/lancedb.py
@@ -75,6 +75,7 @@ class LanceDB(VectorStore):
):
"""Initialize with Lance DB vectorstore"""
lancedb = guard_import("lancedb")
+ lancedb.remote.table = guard_import("lancedb.remote.table")
self._embedding = embedding
self._vector_key = vector_key
self._id_key = id_key
diff --git a/libs/community/tests/integration_tests/chat_models/test_perplexity.py b/libs/community/tests/integration_tests/chat_models/test_perplexity.py
index 63cdd4a59d3..5288fccc9b7 100644
--- a/libs/community/tests/integration_tests/chat_models/test_perplexity.py
+++ b/libs/community/tests/integration_tests/chat_models/test_perplexity.py
@@ -18,12 +18,6 @@ class TestPerplexityStandard(ChatModelIntegrationTests):
def chat_model_params(self) -> dict:
return {"model": "sonar"}
- @property
- def returns_usage_metadata(self) -> bool:
- # TODO: add usage metadata and delete this property
- # https://docs.perplexity.ai/api-reference/chat-completions#response-usage
- return False
-
@pytest.mark.xfail(reason="TODO: handle in integration.")
def test_double_messages_conversation(self, model: BaseChatModel) -> None:
super().test_double_messages_conversation(model)
diff --git a/libs/community/tests/unit_tests/chat_models/test_tongyi.py b/libs/community/tests/unit_tests/chat_models/test_tongyi.py
index 452870f7aa4..afe151d4ed1 100644
--- a/libs/community/tests/unit_tests/chat_models/test_tongyi.py
+++ b/libs/community/tests/unit_tests/chat_models/test_tongyi.py
@@ -65,6 +65,13 @@ def test__convert_dict_to_message_function_call() -> None:
assert result == expected_output
+def test__convert_dict_to_message_partial_mode() -> None:
+ message_dict = {"role": "assistant", "content": "foo", "partial": True}
+ result = convert_dict_to_message(message_dict)
+ expected_output = AIMessage(content="foo", additional_kwargs={"partial": True})
+ assert result == expected_output
+
+
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = convert_message_to_dict(message)
@@ -79,6 +86,13 @@ def test__convert_message_to_dict_ai() -> None:
assert result == expected_output
+def test__convert_message_to_dict_ai_partial_mode() -> None:
+ message = AIMessage(content="foo", additional_kwargs={"partial": True})
+ result = convert_message_to_dict(message)
+ expected_output = {"role": "assistant", "content": "foo", "partial": True}
+ assert result == expected_output
+
+
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = convert_message_to_dict(message)
diff --git a/libs/community/tests/unit_tests/jira/test_jira_api_wrapper.py b/libs/community/tests/unit_tests/jira/test_jira_api_wrapper.py
new file mode 100644
index 00000000000..ac73f3f5f51
--- /dev/null
+++ b/libs/community/tests/unit_tests/jira/test_jira_api_wrapper.py
@@ -0,0 +1,62 @@
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from langchain_community.utilities.jira import JiraAPIWrapper
+
+
+@pytest.fixture
+def mock_jira(): # type: ignore
+ with patch("atlassian.Jira") as mock_jira:
+ yield mock_jira
+
+
+@pytest.mark.requires("atlassian")
+class TestJiraAPIWrapper:
+ def test_jira_api_wrapper(self, mock_jira: MagicMock) -> None:
+ """Test for Jira API Wrapper using mocks"""
+ # Configure the mock instance
+ mock_jira_instance = mock_jira.return_value
+
+ # Mock projects method to return mock projects
+ mock_project1 = MagicMock(key="PROJ1")
+ mock_project2 = MagicMock(key="PROJ2")
+
+ # Set up the mock to return our mock projects
+ mock_jira_instance.projects.return_value = [mock_project1, mock_project2]
+
+ # Initialize wrapper with mocks in place
+ jira_wrapper = JiraAPIWrapper(
+ jira_username="test_user",
+ jira_api_token="test_token",
+ jira_instance_url="https://test.atlassian.net",
+ jira_cloud=True,
+ )
+
+ mock_jira.assert_called_once_with(
+ url="https://test.atlassian.net",
+ username="test_user",
+ password="test_token",
+ cloud=True,
+ )
+
+ # Test get_projects function
+ result = jira_wrapper.run("get_projects", "")
+
+ # Verify the mock was called and the result contains expected info
+ mock_jira_instance.projects.assert_called_once()
+ assert result.startswith("Found 2 projects")
+
+ def test_jira_api_wrapper_with_cloud_false(self, mock_jira: MagicMock) -> None:
+ JiraAPIWrapper(
+ jira_username="test_user",
+ jira_api_token="test_token",
+ jira_instance_url="https://test.atlassian.net",
+ jira_cloud=False,
+ )
+ mock_jira.assert_called_once_with(
+ url="https://test.atlassian.net",
+ username="test_user",
+ password="test_token",
+ cloud=False,
+ )
diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py
index d380833ae46..f8472bee7c8 100644
--- a/libs/core/langchain_core/language_models/chat_models.py
+++ b/libs/core/langchain_core/language_models/chat_models.py
@@ -65,7 +65,10 @@ from langchain_core.rate_limiters import BaseRateLimiter
from langchain_core.runnables import RunnableMap, RunnablePassthrough
from langchain_core.runnables.config import ensure_config, run_in_executor
from langchain_core.tracers._streaming import _StreamingCallbackHandler
-from langchain_core.utils.function_calling import convert_to_openai_tool
+from langchain_core.utils.function_calling import (
+ convert_to_json_schema,
+ convert_to_openai_tool,
+)
from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass
if TYPE_CHECKING:
@@ -116,6 +119,25 @@ async def agenerate_from_stream(
return await run_in_executor(None, generate_from_stream, iter(chunks))
+def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) -> dict:
+ if ls_structured_output_format:
+ try:
+ ls_structured_output_format_dict = {
+ "ls_structured_output_format": {
+ "kwargs": ls_structured_output_format.get("kwargs", {}),
+ "schema": convert_to_json_schema(
+ ls_structured_output_format["schema"]
+ ),
+ }
+ }
+ except ValueError:
+ ls_structured_output_format_dict = {}
+ else:
+ ls_structured_output_format_dict = {}
+
+ return ls_structured_output_format_dict
+
+
class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
"""Base class for chat models.
@@ -366,28 +388,18 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
else:
config = ensure_config(config)
messages = self._convert_input(input).to_messages()
- structured_output_format = kwargs.pop("structured_output_format", None)
- if structured_output_format:
- try:
- structured_output_format_dict = {
- "structured_output_format": {
- "kwargs": structured_output_format.get("kwargs", {}),
- "schema": convert_to_openai_tool(
- structured_output_format["schema"]
- ),
- }
- }
- except ValueError:
- structured_output_format_dict = {}
- else:
- structured_output_format_dict = {}
+ ls_structured_output_format = kwargs.pop(
+ "ls_structured_output_format", None
+ ) or kwargs.pop("structured_output_format", None)
+ ls_structured_output_format_dict = _format_ls_structured_output(
+ ls_structured_output_format
+ )
params = self._get_invocation_params(stop=stop, **kwargs)
- options = {"stop": stop, **kwargs}
+ options = {"stop": stop, **kwargs, **ls_structured_output_format_dict}
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(stop=stop, **kwargs),
- **structured_output_format_dict,
}
callback_manager = CallbackManager.configure(
config.get("callbacks"),
@@ -460,28 +472,18 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
config = ensure_config(config)
messages = self._convert_input(input).to_messages()
- structured_output_format = kwargs.pop("structured_output_format", None)
- if structured_output_format:
- try:
- structured_output_format_dict = {
- "structured_output_format": {
- "kwargs": structured_output_format.get("kwargs", {}),
- "schema": convert_to_openai_tool(
- structured_output_format["schema"]
- ),
- }
- }
- except ValueError:
- structured_output_format_dict = {}
- else:
- structured_output_format_dict = {}
+ ls_structured_output_format = kwargs.pop(
+ "ls_structured_output_format", None
+ ) or kwargs.pop("structured_output_format", None)
+ ls_structured_output_format_dict = _format_ls_structured_output(
+ ls_structured_output_format
+ )
params = self._get_invocation_params(stop=stop, **kwargs)
- options = {"stop": stop, **kwargs}
+ options = {"stop": stop, **kwargs, **ls_structured_output_format_dict}
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(stop=stop, **kwargs),
- **structured_output_format_dict,
}
callback_manager = AsyncCallbackManager.configure(
config.get("callbacks"),
@@ -642,28 +644,18 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
- structured_output_format = kwargs.pop("structured_output_format", None)
- if structured_output_format:
- try:
- structured_output_format_dict = {
- "structured_output_format": {
- "kwargs": structured_output_format.get("kwargs", {}),
- "schema": convert_to_openai_tool(
- structured_output_format["schema"]
- ),
- }
- }
- except ValueError:
- structured_output_format_dict = {}
- else:
- structured_output_format_dict = {}
+ ls_structured_output_format = kwargs.pop(
+ "ls_structured_output_format", None
+ ) or kwargs.pop("structured_output_format", None)
+ ls_structured_output_format_dict = _format_ls_structured_output(
+ ls_structured_output_format
+ )
params = self._get_invocation_params(stop=stop, **kwargs)
- options = {"stop": stop}
+ options = {"stop": stop, **ls_structured_output_format_dict}
inheritable_metadata = {
**(metadata or {}),
**self._get_ls_params(stop=stop, **kwargs),
- **structured_output_format_dict,
}
callback_manager = CallbackManager.configure(
@@ -750,28 +742,18 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
- structured_output_format = kwargs.pop("structured_output_format", None)
- if structured_output_format:
- try:
- structured_output_format_dict = {
- "structured_output_format": {
- "kwargs": structured_output_format.get("kwargs", {}),
- "schema": convert_to_openai_tool(
- structured_output_format["schema"]
- ),
- }
- }
- except ValueError:
- structured_output_format_dict = {}
- else:
- structured_output_format_dict = {}
+ ls_structured_output_format = kwargs.pop(
+ "ls_structured_output_format", None
+ ) or kwargs.pop("structured_output_format", None)
+ ls_structured_output_format_dict = _format_ls_structured_output(
+ ls_structured_output_format
+ )
params = self._get_invocation_params(stop=stop, **kwargs)
- options = {"stop": stop}
+ options = {"stop": stop, **ls_structured_output_format_dict}
inheritable_metadata = {
**(metadata or {}),
**self._get_ls_params(stop=stop, **kwargs),
- **structured_output_format_dict,
}
callback_manager = AsyncCallbackManager.configure(
@@ -1314,7 +1296,10 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
llm = self.bind_tools(
[schema],
tool_choice="any",
- structured_output_format={"kwargs": {}, "schema": schema},
+ ls_structured_output_format={
+ "kwargs": {"method": "function_calling"},
+ "schema": schema,
+ },
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
output_parser: OutputParserLike = PydanticToolsParser(
diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py
index c317bf099c7..19267060472 100644
--- a/libs/core/langchain_core/messages/ai.py
+++ b/libs/core/langchain_core/messages/ai.py
@@ -443,6 +443,11 @@ def add_ai_message_chunks(
else:
usage_metadata = None
+ id = None
+ for id_ in [left.id] + [o.id for o in others]:
+ if id_:
+ id = id_
+ break
return left.__class__(
example=left.example,
content=content,
@@ -450,7 +455,7 @@ def add_ai_message_chunks(
tool_call_chunks=tool_call_chunks,
response_metadata=response_metadata,
usage_metadata=usage_metadata,
- id=left.id,
+ id=id,
)
diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py
index aa3fe27dd5b..45f17046145 100644
--- a/libs/core/langchain_core/messages/utils.py
+++ b/libs/core/langchain_core/messages/utils.py
@@ -1191,6 +1191,8 @@ def convert_to_openai_messages(
},
}
)
+ elif block.get("type") == "thinking":
+ content.append(block)
else:
err = (
f"Unrecognized content block at "
diff --git a/libs/core/langchain_core/runnables/graph_mermaid.py b/libs/core/langchain_core/runnables/graph_mermaid.py
index 4693558cc0d..af4f806c1de 100644
--- a/libs/core/langchain_core/runnables/graph_mermaid.py
+++ b/libs/core/langchain_core/runnables/graph_mermaid.py
@@ -56,37 +56,50 @@ def draw_mermaid(
if with_styles
else "graph TD;\n"
)
+ # Group nodes by subgraph
+ subgraph_nodes: dict[str, dict[str, Node]] = {}
+ regular_nodes: dict[str, Node] = {}
- if with_styles:
- # Node formatting templates
- default_class_label = "default"
- format_dict = {default_class_label: "{0}({1})"}
- if first_node is not None:
- format_dict[first_node] = "{0}([{1}]):::first"
- if last_node is not None:
- format_dict[last_node] = "{0}([{1}]):::last"
+ for key, node in nodes.items():
+ if ":" in key:
+ # For nodes with colons, add them only to their deepest subgraph level
+ prefix = ":".join(key.split(":")[:-1])
+ subgraph_nodes.setdefault(prefix, {})[key] = node
+ else:
+ regular_nodes[key] = node
- # Add nodes to the graph
- for key, node in nodes.items():
- node_name = node.name.split(":")[-1]
+ # Node formatting templates
+ default_class_label = "default"
+ format_dict = {default_class_label: "{0}({1})"}
+ if first_node is not None:
+ format_dict[first_node] = "{0}([{1}]):::first"
+ if last_node is not None:
+ format_dict[last_node] = "{0}([{1}]):::last"
+
+ def render_node(key: str, node: Node, indent: str = "\t") -> str:
+ """Helper function to render a node with consistent formatting."""
+ node_name = node.name.split(":")[-1]
+ label = (
+ f"{node_name}
"
+ if node_name.startswith(tuple(MARKDOWN_SPECIAL_CHARS))
+ and node_name.endswith(tuple(MARKDOWN_SPECIAL_CHARS))
+ else node_name
+ )
+ if node.metadata:
label = (
- f"{node_name}
"
- if node_name.startswith(tuple(MARKDOWN_SPECIAL_CHARS))
- and node_name.endswith(tuple(MARKDOWN_SPECIAL_CHARS))
- else node_name
+ f"{label}
"
+ + "\n".join(f"{k} = {value}" for k, value in node.metadata.items())
+ + ""
)
- if node.metadata:
- label = (
- f"{label}
"
- + "\n".join(
- f"{key} = {value}" for key, value in node.metadata.items()
- )
- + ""
- )
- node_label = format_dict.get(key, format_dict[default_class_label]).format(
- _escape_node_label(key), label
- )
- mermaid_graph += f"\t{node_label}\n"
+ node_label = format_dict.get(key, format_dict[default_class_label]).format(
+ _escape_node_label(key), label
+ )
+ return f"{indent}{node_label}\n"
+
+ # Add non-subgraph nodes to the graph
+ if with_styles:
+ for key, node in regular_nodes.items():
+ mermaid_graph += render_node(key, node)
# Group edges by their common prefixes
edge_groups: dict[str, list[Edge]] = {}
@@ -116,6 +129,11 @@ def draw_mermaid(
seen_subgraphs.add(subgraph)
mermaid_graph += f"\tsubgraph {subgraph}\n"
+ # Add nodes that belong to this subgraph
+ if with_styles and prefix in subgraph_nodes:
+ for key, node in subgraph_nodes[prefix].items():
+ mermaid_graph += render_node(key, node)
+
for edge in edges:
source, target = edge.source, edge.target
@@ -156,11 +174,25 @@ def draw_mermaid(
# Start with the top-level edges (no common prefix)
add_subgraph(edge_groups.get("", []), "")
- # Add remaining subgraphs
+ # Add remaining subgraphs with edges
for prefix in edge_groups:
if ":" in prefix or prefix == "":
continue
add_subgraph(edge_groups[prefix], prefix)
+ seen_subgraphs.add(prefix)
+
+ # Add empty subgraphs (subgraphs with no internal edges)
+ if with_styles:
+ for prefix in subgraph_nodes:
+ if ":" not in prefix and prefix not in seen_subgraphs:
+ mermaid_graph += f"\tsubgraph {prefix}\n"
+
+ # Add nodes that belong to this subgraph
+ for key, node in subgraph_nodes[prefix].items():
+ mermaid_graph += render_node(key, node)
+
+ mermaid_graph += "\tend\n"
+ seen_subgraphs.add(prefix)
# Add custom styles for nodes
if with_styles:
diff --git a/libs/core/langchain_core/tools/__init__.py b/libs/core/langchain_core/tools/__init__.py
index 969e62e828a..5189d633817 100644
--- a/libs/core/langchain_core/tools/__init__.py
+++ b/libs/core/langchain_core/tools/__init__.py
@@ -22,6 +22,9 @@ from __future__ import annotations
from langchain_core.tools.base import (
FILTERED_ARGS as FILTERED_ARGS,
)
+from langchain_core.tools.base import (
+ ArgsSchema as ArgsSchema,
+)
from langchain_core.tools.base import (
BaseTool as BaseTool,
)
diff --git a/libs/core/langchain_core/tracers/langchain.py b/libs/core/langchain_core/tracers/langchain.py
index 7cdf5c92a32..8a14ce1fc6e 100644
--- a/libs/core/langchain_core/tracers/langchain.py
+++ b/libs/core/langchain_core/tracers/langchain.py
@@ -50,8 +50,8 @@ def log_error_once(method: str, exception: Exception) -> None:
def wait_for_all_tracers() -> None:
"""Wait for all tracers to finish."""
- if rt._CLIENT is not None and rt._CLIENT.tracing_queue is not None:
- rt._CLIENT.tracing_queue.join()
+ if rt._CLIENT is not None:
+ rt._CLIENT.flush()
def get_client() -> Client:
@@ -319,5 +319,5 @@ class LangChainTracer(BaseTracer):
def wait_for_futures(self) -> None:
"""Wait for the given futures to complete."""
- if self.client is not None and self.client.tracing_queue is not None:
- self.client.tracing_queue.join()
+ if self.client is not None:
+ self.client.flush()
diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py
index 0eadd46b2b0..72d261ec4c5 100644
--- a/libs/core/langchain_core/utils/function_calling.py
+++ b/libs/core/langchain_core/utils/function_calling.py
@@ -531,13 +531,52 @@ def convert_to_openai_tool(
'description' and 'parameters' keys are now optional. Only 'name' is
required and guaranteed to be part of the output.
+
+ .. versionchanged:: 0.3.44
+
+ Return OpenAI Responses API-style tools unchanged. This includes
+ any dict with "type" in "file_search", "function", "computer_use_preview",
+ "web_search_preview".
"""
- if isinstance(tool, dict) and tool.get("type") == "function" and "function" in tool:
- return tool
+ if isinstance(tool, dict):
+ if tool.get("type") in ("function", "file_search", "computer_use_preview"):
+ return tool
+ # As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11"
+ if (tool.get("type") or "").startswith("web_search_preview"):
+ return tool
oai_function = convert_to_openai_function(tool, strict=strict)
return {"type": "function", "function": oai_function}
+def convert_to_json_schema(
+ schema: Union[dict[str, Any], type[BaseModel], Callable, BaseTool],
+ *,
+ strict: Optional[bool] = None,
+) -> dict[str, Any]:
+ """Convert a schema representation to a JSON schema."""
+ openai_tool = convert_to_openai_tool(schema, strict=strict)
+ if (
+ not isinstance(openai_tool, dict)
+ or "function" not in openai_tool
+ or "name" not in openai_tool["function"]
+ ):
+ error_message = "Input must be a valid OpenAI-format tool."
+ raise ValueError(error_message)
+
+ openai_function = openai_tool["function"]
+ json_schema = {}
+ json_schema["title"] = openai_function["name"]
+
+ if "description" in openai_function:
+ json_schema["description"] = openai_function["description"]
+
+ if "parameters" in openai_function:
+ parameters = openai_function["parameters"].copy()
+ json_schema.update(parameters)
+
+ return json_schema
+
+
@beta()
def tool_example_to_messages(
input: str,
diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml
index a2cfc7adf5b..620a0ea23ab 100644
--- a/libs/core/pyproject.toml
+++ b/libs/core/pyproject.toml
@@ -17,7 +17,7 @@ dependencies = [
"pydantic<3.0.0,>=2.7.4; python_full_version >= \"3.12.4\"",
]
name = "langchain-core"
-version = "0.3.41"
+version = "0.3.45-rc.1"
description = "Building applications with LLMs through composability"
readme = "README.md"
diff --git a/libs/core/tests/unit_tests/messages/test_utils.py b/libs/core/tests/unit_tests/messages/test_utils.py
index 8777b2674c5..3b223b4864f 100644
--- a/libs/core/tests/unit_tests/messages/test_utils.py
+++ b/libs/core/tests/unit_tests/messages/test_utils.py
@@ -832,6 +832,18 @@ def test_convert_to_openai_messages_anthropic() -> None:
]
assert result == expected
+ # Test thinking blocks (pass through)
+ thinking_block = {
+ "signature": "abc123",
+ "thinking": "Thinking text.",
+ "type": "thinking",
+ }
+ text_block = {"text": "Response text.", "type": "text"}
+ messages = [AIMessage([thinking_block, text_block])]
+ result = convert_to_openai_messages(messages)
+ expected = [{"role": "assistant", "content": [thinking_block, text_block]}]
+ assert result == expected
+
def test_convert_to_openai_messages_bedrock_converse_image() -> None:
image_data = create_image_data()
diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr
index 4b4568571b3..2e4a19ce5c2 100644
--- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr
+++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr
@@ -5,9 +5,6 @@
graph TD;
__start__([__start__
]):::first
parent_1(parent_1)
- child_child_1_grandchild_1(grandchild_1)
- child_child_1_grandchild_2(grandchild_2
__interrupt = before)
- child_child_2(child_2)
parent_2(parent_2)
__end__([__end__
]):::last
__start__ --> parent_1;
@@ -15,8 +12,11 @@
parent_1 --> child_child_1_grandchild_1;
parent_2 --> __end__;
subgraph child
+ child_child_2(child_2)
child_child_1_grandchild_2 --> child_child_2;
subgraph child_1
+ child_child_1_grandchild_1(grandchild_1)
+ child_child_1_grandchild_2(grandchild_2
__interrupt = before)
child_child_1_grandchild_1 --> child_child_1_grandchild_2;
end
end
@@ -32,10 +32,6 @@
graph TD;
__start__([__start__
]):::first
parent_1(parent_1)
- child_child_1_grandchild_1(grandchild_1)
- child_child_1_grandchild_1_greatgrandchild(greatgrandchild)
- child_child_1_grandchild_2(grandchild_2
__interrupt = before)
- child_child_2(child_2)
parent_2(parent_2)
__end__([__end__
]):::last
__start__ --> parent_1;
@@ -43,10 +39,14 @@
parent_1 --> child_child_1_grandchild_1;
parent_2 --> __end__;
subgraph child
+ child_child_2(child_2)
child_child_1_grandchild_2 --> child_child_2;
subgraph child_1
+ child_child_1_grandchild_1(grandchild_1)
+ child_child_1_grandchild_2(grandchild_2
__interrupt = before)
child_child_1_grandchild_1_greatgrandchild --> child_child_1_grandchild_2;
subgraph grandchild_1
+ child_child_1_grandchild_1_greatgrandchild(greatgrandchild)
child_child_1_grandchild_1 --> child_child_1_grandchild_1_greatgrandchild;
end
end
@@ -1996,10 +1996,6 @@
graph TD;
__start__([__start__
]):::first
outer_1(outer_1)
- inner_1_inner_1(inner_1)
- inner_1_inner_2(inner_2
__interrupt = before)
- inner_2_inner_1(inner_1)
- inner_2_inner_2(inner_2)
outer_2(outer_2)
__end__([__end__
]):::last
__start__ --> outer_1;
@@ -2009,9 +2005,13 @@
outer_1 --> inner_2_inner_1;
outer_2 --> __end__;
subgraph inner_1
+ inner_1_inner_1(inner_1)
+ inner_1_inner_2(inner_2
__interrupt = before)
inner_1_inner_1 --> inner_1_inner_2;
end
subgraph inner_2
+ inner_2_inner_1(inner_1)
+ inner_2_inner_2(inner_2)
inner_2_inner_1 --> inner_2_inner_2;
end
classDef default fill:#f2f0ff,line-height:1.2
@@ -2020,6 +2020,23 @@
'''
# ---
+# name: test_single_node_subgraph_mermaid[mermaid]
+ '''
+ %%{init: {'flowchart': {'curve': 'linear'}}}%%
+ graph TD;
+ __start__([__start__
]):::first
+ __end__([__end__
]):::last
+ __start__ --> sub_meow;
+ sub_meow --> __end__;
+ subgraph sub
+ sub_meow(meow)
+ end
+ classDef default fill:#f2f0ff,line-height:1.2
+ classDef first fill-opacity:0
+ classDef last fill:#bfb6fc
+
+ '''
+# ---
# name: test_trim
dict({
'edges': list([
diff --git a/libs/core/tests/unit_tests/runnables/test_graph.py b/libs/core/tests/unit_tests/runnables/test_graph.py
index c2f7ef9b7dc..6f822c1e7c2 100644
--- a/libs/core/tests/unit_tests/runnables/test_graph.py
+++ b/libs/core/tests/unit_tests/runnables/test_graph.py
@@ -448,6 +448,23 @@ def test_triple_nested_subgraph_mermaid(snapshot: SnapshotAssertion) -> None:
assert graph.draw_mermaid() == snapshot(name="mermaid")
+def test_single_node_subgraph_mermaid(snapshot: SnapshotAssertion) -> None:
+ empty_data = BaseModel
+ nodes = {
+ "__start__": Node(
+ id="__start__", name="__start__", data=empty_data, metadata=None
+ ),
+ "sub:meow": Node(id="sub:meow", name="meow", data=empty_data, metadata=None),
+ "__end__": Node(id="__end__", name="__end__", data=empty_data, metadata=None),
+ }
+ edges = [
+ Edge(source="__start__", target="sub:meow", data=None, conditional=False),
+ Edge(source="sub:meow", target="__end__", data=None, conditional=False),
+ ]
+ graph = Graph(nodes, edges)
+ assert graph.draw_mermaid() == snapshot(name="mermaid")
+
+
def test_runnable_get_graph_with_invalid_input_type() -> None:
"""Test that error isn't raised when getting graph with invalid input type."""
diff --git a/libs/core/tests/unit_tests/utils/test_function_calling.py b/libs/core/tests/unit_tests/utils/test_function_calling.py
index 38206bceef5..03e0625c69a 100644
--- a/libs/core/tests/unit_tests/utils/test_function_calling.py
+++ b/libs/core/tests/unit_tests/utils/test_function_calling.py
@@ -31,6 +31,7 @@ from langchain_core.runnables import Runnable, RunnableLambda
from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
from langchain_core.utils.function_calling import (
_convert_typed_dict_to_openai_function,
+ convert_to_json_schema,
convert_to_openai_function,
tool_example_to_messages,
)
@@ -1019,3 +1020,46 @@ def test_convert_to_openai_function_no_args() -> None:
},
"strict": True,
}
+
+
+def test_convert_to_json_schema(
+ pydantic: type[BaseModel],
+ function: Callable,
+ function_docstring_annotations: Callable,
+ dummy_structured_tool: StructuredTool,
+ dummy_structured_tool_args_schema_dict: StructuredTool,
+ dummy_tool: BaseTool,
+ json_schema: dict,
+ anthropic_tool: dict,
+ bedrock_converse_tool: dict,
+ annotated_function: Callable,
+ dummy_pydantic: type[BaseModel],
+ dummy_typing_typed_dict: type,
+ dummy_typing_typed_dict_docstring: type,
+ dummy_extensions_typed_dict: type,
+ dummy_extensions_typed_dict_docstring: type,
+) -> None:
+ expected = json_schema
+
+ for fn in (
+ pydantic,
+ function,
+ function_docstring_annotations,
+ dummy_structured_tool,
+ dummy_structured_tool_args_schema_dict,
+ dummy_tool,
+ json_schema,
+ anthropic_tool,
+ bedrock_converse_tool,
+ expected,
+ Dummy.dummy_function,
+ DummyWithClassMethod.dummy_function,
+ annotated_function,
+ dummy_pydantic,
+ dummy_typing_typed_dict,
+ dummy_typing_typed_dict_docstring,
+ dummy_extensions_typed_dict,
+ dummy_extensions_typed_dict_docstring,
+ ):
+ actual = convert_to_json_schema(fn) # type: ignore
+ assert actual == expected
diff --git a/libs/core/uv.lock b/libs/core/uv.lock
index 347ee332f9b..bf9653da199 100644
--- a/libs/core/uv.lock
+++ b/libs/core/uv.lock
@@ -935,7 +935,7 @@ wheels = [
[[package]]
name = "langchain-core"
-version = "0.3.41"
+version = "0.3.44"
source = { editable = "." }
dependencies = [
{ name = "jsonpatch" },
@@ -1026,7 +1026,7 @@ typing = [
[[package]]
name = "langchain-tests"
-version = "0.3.12"
+version = "0.3.14"
source = { directory = "../standard-tests" }
dependencies = [
{ name = "httpx" },
diff --git a/libs/langchain/tests/unit_tests/chat_models/test_base.py b/libs/langchain/tests/unit_tests/chat_models/test_base.py
index 46055b092cb..2e6fc4f4521 100644
--- a/libs/langchain/tests/unit_tests/chat_models/test_base.py
+++ b/libs/langchain/tests/unit_tests/chat_models/test_base.py
@@ -133,6 +133,7 @@ def test_configurable() -> None:
"extra_body": None,
"include_response_headers": False,
"stream_usage": False,
+ "use_responses_api": None,
},
"kwargs": {
"tools": [
diff --git a/libs/packages.yml b/libs/packages.yml
index 651d87e9a7d..8b807f75a6a 100644
--- a/libs/packages.yml
+++ b/libs/packages.yml
@@ -5,472 +5,514 @@ packages:
- name: langchain-core
path: libs/core
repo: langchain-ai/langchain
- downloads: 27722594
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 31474865
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-text-splitters
path: libs/text-splitters
repo: langchain-ai/langchain
- downloads: 12866727
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 13175905
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain
path: libs/langchain
repo: langchain-ai/langchain
- downloads: 32917727
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 36537062
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-community
path: libs/community
repo: langchain-ai/langchain
- downloads: 21967466
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 16293675
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-experimental
path: libs/experimental
repo: langchain-ai/langchain-experimental
- downloads: 1960508
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1678730
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-cli
path: libs/cli
repo: langchain-ai/langchain
- downloads: 84415
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 71921
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-ai21
path: libs/ai21
repo: langchain-ai/langchain-ai21
- downloads: 13100
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 15340
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-anthropic
path: libs/partners/anthropic
repo: langchain-ai/langchain
js: '@langchain/anthropic'
- downloads: 1549411
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1769640
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-chroma
path: libs/partners/chroma
repo: langchain-ai/langchain
- downloads: 553991
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 560965
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-exa
path: libs/partners/exa
repo: langchain-ai/langchain
provider_page: exa_search
js: '@langchain/exa'
- downloads: 5817
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 6330
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-fireworks
path: libs/partners/fireworks
repo: langchain-ai/langchain
- downloads: 264866
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 305028
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-groq
path: libs/partners/groq
repo: langchain-ai/langchain
js: '@langchain/groq'
- downloads: 452801
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 535446
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-huggingface
path: libs/partners/huggingface
repo: langchain-ai/langchain
- downloads: 403346
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 475998
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-ibm
path: libs/ibm
repo: langchain-ai/langchain-ibm
js: '@langchain/ibm'
- downloads: 95572
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 143800
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-localai
path: libs/localai
repo: mkhludnev/langchain-localai
- downloads: 306
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 272
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-milvus
path: libs/milvus
repo: langchain-ai/langchain-milvus
- downloads: 162619
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 198722
+ downloads_updated_at: '2025-03-09T00:13:44.336850+00:00'
- name: langchain-mistralai
path: libs/partners/mistralai
repo: langchain-ai/langchain
js: '@langchain/mistralai'
- downloads: 315149
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 375450
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-mongodb
path: libs/langchain-mongodb
repo: langchain-ai/langchain-mongodb
provider_page: mongodb_atlas
js: '@langchain/mongodb'
- downloads: 160711
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 209228
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-nomic
path: libs/partners/nomic
repo: langchain-ai/langchain
js: '@langchain/nomic'
- downloads: 10335
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 12028
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-openai
path: libs/partners/openai
repo: langchain-ai/langchain
js: '@langchain/openai'
- downloads: 9823331
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 10724437
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-pinecone
path: libs/pinecone
repo: langchain-ai/langchain-pinecone
js: '@langchain/pinecone'
- downloads: 393153
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 434487
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-prompty
path: libs/partners/prompty
repo: langchain-ai/langchain
provider_page: microsoft
- downloads: 1216
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1232
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-qdrant
path: libs/partners/qdrant
repo: langchain-ai/langchain
js: '@langchain/qdrant'
- downloads: 125551
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 180502
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-scrapegraph
path: .
repo: ScrapeGraphAI/langchain-scrapegraph
- downloads: 851
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1304
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-sema4
path: libs/sema4
repo: langchain-ai/langchain-sema4
provider_page: robocorp
- downloads: 1647
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1686
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-together
path: libs/together
repo: langchain-ai/langchain-together
- downloads: 53987
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 68030
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-upstage
path: libs/upstage
repo: langchain-ai/langchain-upstage
- downloads: 29553
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 26348
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-voyageai
path: libs/partners/voyageai
repo: langchain-ai/langchain
- downloads: 17269
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 26785
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-aws
name_title: AWS
path: libs/aws
repo: langchain-ai/langchain-aws
js: '@langchain/aws'
- downloads: 2133380
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 2093475
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-astradb
path: libs/astradb
repo: langchain-ai/langchain-datastax
- downloads: 83037
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 96896
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-google-genai
name_title: Google Generative AI
path: libs/genai
repo: langchain-ai/langchain-google
provider_page: google
js: '@langchain/google-genai'
- downloads: 1019707
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1262954
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-google-vertexai
path: libs/vertexai
repo: langchain-ai/langchain-google
provider_page: google
js: '@langchain/google-vertexai'
- downloads: 13033464
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 15029525
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-google-community
path: libs/community
repo: langchain-ai/langchain-google
provider_page: google
- downloads: 3787822
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 4073551
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-weaviate
path: libs/weaviate
repo: langchain-ai/langchain-weaviate
js: '@langchain/weaviate'
- downloads: 31199
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 45674
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-cohere
path: libs/cohere
repo: langchain-ai/langchain-cohere
js: '@langchain/cohere'
- downloads: 653329
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 737369
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-elasticsearch
path: libs/elasticsearch
repo: langchain-ai/langchain-elastic
- downloads: 137212
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 161979
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-nvidia-ai-endpoints
path: libs/ai-endpoints
repo: langchain-ai/langchain-nvidia
provider_page: nvidia
- downloads: 157267
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 183435
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-postgres
path: .
repo: langchain-ai/langchain-postgres
provider_page: pgvector
- downloads: 320831
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 338324
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-redis
path: libs/redis
repo: langchain-ai/langchain-redis
js: '@langchain/redis'
- downloads: 22787
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 25789
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-unstructured
path: libs/unstructured
repo: langchain-ai/langchain-unstructured
- downloads: 118888
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 157291
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-azure-ai
path: libs/azure-ai
repo: langchain-ai/langchain-azure
provider_page: azure_ai
- downloads: 5835
js: '@langchain/openai'
+ downloads: 10985
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-azure-dynamic-sessions
path: libs/azure-dynamic-sessions
repo: langchain-ai/langchain-azure
provider_page: microsoft
js: '@langchain/azure-dynamic-sessions'
- downloads: 7401
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 9336
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-sqlserver
path: libs/sqlserver
repo: langchain-ai/langchain-azure
provider_page: microsoft
- downloads: 2298
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1945
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-cerebras
path: libs/cerebras
repo: langchain-ai/langchain-cerebras
- downloads: 26690
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 27854
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-snowflake
path: libs/snowflake
repo: langchain-ai/langchain-snowflake
- downloads: 1905
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1769
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: databricks-langchain
name_title: Databricks
path: integrations/langchain
repo: databricks/databricks-ai-bridge
provider_page: databricks
- downloads: 36221
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 64671
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-couchbase
path: .
repo: Couchbase-Ecosystem/langchain-couchbase
- downloads: 725
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 777
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-ollama
path: libs/partners/ollama
repo: langchain-ai/langchain
js: '@langchain/ollama'
- downloads: 623011
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 805025
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-box
path: libs/box
repo: box-community/langchain-box
- downloads: 730
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 479
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-tests
path: libs/standard-tests
repo: langchain-ai/langchain
- downloads: 180354
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 245012
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-neo4j
path: libs/neo4j
repo: langchain-ai/langchain-neo4j
- downloads: 30320
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 41410
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-linkup
path: .
repo: LinkupPlatform/langchain-linkup
- downloads: 532
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 477
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-yt-dlp
path: .
repo: aqib0770/langchain-yt-dlp
- downloads: 461
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1333
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-oceanbase
path: .
repo: oceanbase/langchain-oceanbase
- downloads: 58
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 74
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-predictionguard
path: .
repo: predictionguard/langchain-predictionguard
- downloads: 422
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 1082
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-cratedb
path: .
repo: crate/langchain-cratedb
- downloads: 417
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 403
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-modelscope
path: .
repo: modelscope/langchain-modelscope
- downloads: 131
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 139
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-falkordb
path: .
repo: kingtroga/langchain-falkordb
- downloads: 178
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 140
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-dappier
path: .
repo: DappierAI/langchain-dappier
- downloads: 353
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 236
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-pull-md
path: .
repo: chigwell/langchain-pull-md
- downloads: 161
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 146
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-kuzu
path: .
repo: kuzudb/langchain-kuzu
- downloads: 426
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 395
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-docling
path: .
repo: DS4SD/docling-langchain
- downloads: 6800
- downloads_updated_at: '2025-02-13T20:29:06.035211+00:00'
+ downloads: 11289
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-lindorm-integration
path: .
repo: AlwaysBluer/langchain-lindorm-integration
provider_page: lindorm
- downloads: 79
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 68
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-hyperbrowser
path: .
repo: hyperbrowserai/langchain-hyperbrowser
- downloads: 371
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 315
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-fmp-data
path: .
repo: MehdiZare/langchain-fmp-data
- downloads: 366
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 145
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: tilores-langchain
name_title: Tilores
path: .
repo: tilotech/tilores-langchain
provider_page: tilores
- downloads: 121
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 83
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-pipeshift
path: .
repo: pipeshift-org/langchain-pipeshift
- downloads: 133
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 101
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-payman-tool
path: .
repo: paymanai/langchain-payman-tool
- downloads: 685
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 236
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-sambanova
path: .
repo: sambanova/langchain-sambanova
- downloads: 1313
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 26848
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-deepseek
path: libs/partners/deepseek
repo: langchain-ai/langchain
provider_page: deepseek
js: '@langchain/deepseek'
- downloads: 6871
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 25597
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-jenkins
path: .
repo: Amitgb14/langchain_jenkins
- downloads: 386
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 362
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-goodfire
path: .
repo: keenanpepper/langchain-goodfire
- downloads: 585
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 297
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-nimble
path: .
repo: Nimbleway/langchain-nimble
- downloads: 388
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 498
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-apify
path: .
repo: apify/langchain-apify
- downloads: 443
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 679
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langfair
name_title: LangFair
path: .
repo: cvs-health/langfair
- downloads: 901
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 894
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-abso
path: .
repo: lunary-ai/langchain-abso
- downloads: 0
- downloads_updated_at: '2025-02-13T20:30:13.814314+00:00'
+ downloads: 264
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-graph-retriever
name_title: Graph RAG
path: packages/langchain-graph-retriever
repo: datastax/graph-rag
provider_page: graph_rag
- downloads: 2093
- downloads_updated_at: '2025-02-13T20:32:23.744801+00:00'
+ downloads: 2342
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-xai
path: libs/partners/xai
repo: langchain-ai/langchain
- downloads: 9521
- downloads_updated_at: '2025-02-13T23:35:48.490391+00:00'
+ downloads: 11326
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-salesforce
path: .
repo: colesmcintosh/langchain-salesforce
- downloads: 0
- downloads_updated_at: '2025-02-13T23:35:48.490391+00:00'
+ downloads: 343
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-discord-shikenso
path: .
repo: Shikenso-Analytics/langchain-discord
- downloads: 1
- downloads_updated_at: '2025-02-15T16:00:00.000000+00:00'
+ downloads: 255
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-vdms
- repo: IntelLabs/langchain-vdms
- path: .
name_title: VDMS
+ path: .
+ repo: IntelLabs/langchain-vdms
+ downloads: 590
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-deeplake
path: .
repo: activeloopai/langchain-deeplake
+ downloads: 72
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-cognee
- repo: topoteretes/langchain-cognee
path: .
+ repo: topoteretes/langchain-cognee
+ downloads: 238
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-prolog
path: .
repo: apisani1/langchain-prolog
- downloads: 0
- downloads_updated_at: '2025-02-15T16:00:00.000000+00:00'
+ downloads: 171
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-permit
path: .
repo: permitio/langchain-permit
+ downloads: 228
+ downloads_updated_at: '2025-03-09T00:14:08.178061+00:00'
- name: langchain-pymupdf4llm
path: .
repo: lakinduboteju/langchain-pymupdf4llm
+ downloads: 334
+ downloads_updated_at: '2025-03-09T00:14:26.697616+00:00'
- name: langchain-writer
path: .
repo: writer/langchain-writer
- downloads: 0
- downloads_updated_at: '2025-02-24T13:19:19.816059+00:00'
+ downloads: 444
+ downloads_updated_at: '2025-03-09T00:14:26.697616+00:00'
- name: langchain-taiga
name_title: Taiga
path: .
repo: Shikenso-Analytics/langchain-taiga
+ downloads: 206
+ downloads_updated_at: '2025-03-09T00:14:26.697616+00:00'
+- name: langchain-tableau
+ name_title: Tableau
+ path: .
+ repo: Tab-SE/tableau_langchain
+ downloads: 278
+ downloads_updated_at: '2025-03-09T00:14:26.697616+00:00'
+- name: ads4gpts-langchain
+ name_title: ADS4GPTs
+ provider_page: ads4gpts
+ path: libs/python-sdk/ads4gpts-langchain
+ repo: ADS4GPTs/ads4gpts
+ downloads: 733
+ downloads_updated_at: '2025-03-09T00:15:16.651181+00:00'
+- name: langchain-contextual
+ name_title: Contextual AI
+ path: langchain-contextual
+ repo: ContextualAI//langchain-contextual
+ downloads: 432
+ downloads_updated_at: '2025-03-09T01:40:49.430540+00:00'
+- name: langchain-valthera
+ name_title: Valthera
+ path: .
+ repo: valthera/langchain-valthera
+- name: langchain-opengradient
+ path: .
+ repo: OpenGradient/og-langchain
+- name: langchain-agentql
+ path: langchain
+ repo: tinyfish-io/agentql-integrations
diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py
index 09a4710610b..eddcac746b1 100644
--- a/libs/partners/anthropic/langchain_anthropic/chat_models.py
+++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py
@@ -519,6 +519,41 @@ class ChatAnthropic(BaseChatModel):
"The image depicts a sunny day with a partly cloudy sky. The sky is a brilliant blue color with scattered white clouds drifting across. The lighting and cloud patterns suggest pleasant, mild weather conditions. The scene shows a grassy field or meadow with a wooden boardwalk trail leading through it, indicating an outdoor setting on a nice day well-suited for enjoying nature."
+ PDF input:
+ .. code-block:: python
+
+ from base64 import b64encode
+ from langchain_anthropic import ChatAnthropic
+ from langchain_core.messages import HumanMessage
+ import requests
+
+ url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
+ data = b64encode(requests.get(url).content).decode()
+
+ llm = ChatAnthropic(model="claude-3-5-sonnet-latest")
+ ai_msg = llm.invoke(
+ [
+ HumanMessage(
+ [
+ "Summarize this document.",
+ {
+ "type": "document",
+ "source": {
+ "type": "base64",
+ "data": data,
+ "media_type": "application/pdf",
+ },
+ },
+ ]
+ )
+ ]
+ )
+ ai_msg.content
+
+ .. code-block:: python
+
+ "This appears to be a simple document..."
+
Extended thinking:
Claude 3.7 Sonnet supports an
`extended thinking `_
@@ -979,7 +1014,10 @@ class ChatAnthropic(BaseChatModel):
warnings.warn(thinking_admonition)
llm = self.bind_tools(
[schema],
- structured_output_format={"kwargs": {}, "schema": formatted_tool},
+ ls_structured_output_format={
+ "kwargs": {"method": "function_calling"},
+ "schema": formatted_tool,
+ },
)
def _raise_if_no_tool_calls(message: AIMessage) -> AIMessage:
@@ -1294,7 +1332,10 @@ class ChatAnthropic(BaseChatModel):
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
- structured_output_format={"kwargs": {}, "schema": formatted_tool},
+ ls_structured_output_format={
+ "kwargs": {"method": "function_calling"},
+ "schema": formatted_tool,
+ },
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py
index cde69d25bcc..c61817972f3 100644
--- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py
+++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py
@@ -4,6 +4,7 @@ import json
from base64 import b64encode
from typing import List, Optional
+import httpx
import pytest
import requests
from anthropic import BadRequestError
@@ -768,3 +769,64 @@ def test_structured_output_thinking_force_tool_use() -> None:
)
with pytest.raises(BadRequestError):
llm.invoke("Generate a username for Sally with green hair")
+
+
+def test_image_tool_calling() -> None:
+ """Test tool calling with image inputs."""
+
+ class color_picker(BaseModel):
+ """Input your fav color and get a random fact about it."""
+
+ fav_color: str
+
+ human_content: List[dict] = [
+ {
+ "type": "text",
+ "text": "what's your favorite color in this image",
+ },
+ ]
+ image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
+ image_data = b64encode(httpx.get(image_url).content).decode("utf-8")
+ human_content.append(
+ {
+ "type": "image",
+ "source": {
+ "type": "base64",
+ "media_type": "image/jpeg",
+ "data": image_data,
+ },
+ }
+ )
+ messages = [
+ SystemMessage("you're a good assistant"),
+ HumanMessage(human_content), # type: ignore[arg-type]
+ AIMessage(
+ [
+ {"type": "text", "text": "Hmm let me think about that"},
+ {
+ "type": "tool_use",
+ "input": {"fav_color": "green"},
+ "id": "foo",
+ "name": "color_picker",
+ },
+ ]
+ ),
+ HumanMessage(
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "foo",
+ "content": [
+ {
+ "type": "text",
+ "text": "green is a great pick! that's my sister's favorite color", # noqa: E501
+ }
+ ],
+ "is_error": False,
+ },
+ {"type": "text", "text": "what's my sister's favorite color"},
+ ]
+ ),
+ ]
+ llm = ChatAnthropic(model="claude-3-5-sonnet-latest")
+ llm.bind_tools([color_picker]).invoke(messages)
diff --git a/libs/partners/deepseek/pyproject.toml b/libs/partners/deepseek/pyproject.toml
index b8b10514d18..e7fbbc55246 100644
--- a/libs/partners/deepseek/pyproject.toml
+++ b/libs/partners/deepseek/pyproject.toml
@@ -26,7 +26,7 @@ test = [
"pytest-asyncio<1.0.0,>=0.23.2",
"pytest-socket<1.0.0,>=0.7.0",
"pytest-watcher<1.0.0,>=0.3.4",
- "langchain-tests<1.0.0,>=0.3.5",
+ "langchain-tests",
"langchain-openai",
"pytest-timeout<3.0.0,>=2.3.1",
]
@@ -40,6 +40,7 @@ typing = ["mypy<2.0,>=1.10"]
[tool.uv.sources]
langchain-openai = { path = "../openai", editable = true }
langchain-core = { path = "../../core", editable = true }
+langchain-tests = { path = "../../standard-tests", editable = true }
[tool.mypy]
disallow_untyped_defs = "True"
diff --git a/libs/partners/deepseek/uv.lock b/libs/partners/deepseek/uv.lock
index 4820cc01c05..fd30e79a64d 100644
--- a/libs/partners/deepseek/uv.lock
+++ b/libs/partners/deepseek/uv.lock
@@ -367,7 +367,7 @@ wheels = [
[[package]]
name = "langchain-core"
-version = "0.3.35"
+version = "0.3.43"
source = { editable = "../../core" }
dependencies = [
{ name = "jsonpatch" },
@@ -399,7 +399,7 @@ dev = [
]
lint = [{ name = "ruff", specifier = ">=0.9.2,<1.0.0" }]
test = [
- { name = "blockbuster", specifier = "~=1.5.11" },
+ { name = "blockbuster", specifier = "~=1.5.18" },
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
{ name = "grandalf", specifier = ">=0.8,<1.0" },
{ name = "langchain-tests", directory = "../../standard-tests" },
@@ -464,7 +464,7 @@ dev = []
lint = [{ name = "ruff", specifier = ">=0.5,<1.0" }]
test = [
{ name = "langchain-openai", editable = "../openai" },
- { name = "langchain-tests", specifier = ">=0.3.5,<1.0.0" },
+ { name = "langchain-tests", editable = "../../standard-tests" },
{ name = "pytest", specifier = ">=7.4.3,<8.0.0" },
{ name = "pytest-asyncio", specifier = ">=0.23.2,<1.0.0" },
{ name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" },
@@ -476,7 +476,7 @@ typing = [{ name = "mypy", specifier = ">=1.10,<2.0" }]
[[package]]
name = "langchain-openai"
-version = "0.3.5"
+version = "0.3.8"
source = { editable = "../openai" }
dependencies = [
{ name = "langchain-core" },
@@ -524,8 +524,8 @@ typing = [
[[package]]
name = "langchain-tests"
-version = "0.3.10"
-source = { registry = "https://pypi.org/simple" }
+version = "0.3.14"
+source = { editable = "../../standard-tests" }
dependencies = [
{ name = "httpx" },
{ name = "langchain-core" },
@@ -536,9 +536,26 @@ dependencies = [
{ name = "pytest-socket" },
{ name = "syrupy" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/80/24/b1ef0d74222d04c4196e673e3ae8bac9f89481c17c4e6a72c67f61b403c7/langchain_tests-0.3.10.tar.gz", hash = "sha256:ba0ce038cb633e906961efc85591dd86b28d5c84a7880e7e0cd4dcb833d604a8", size = 31022 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d0/c3/2f2f2e919bbb9f8608389ac926c6cf8f717c3965956f0e5f139372742fb9/langchain_tests-0.3.10-py3-none-any.whl", hash = "sha256:393e15990b9d1d12b52ee832257e874beb4299891d98ec7682b7fba12c0f8fe1", size = 37521 },
+
+[package.metadata]
+requires-dist = [
+ { name = "httpx", specifier = ">=0.25.0,<1" },
+ { name = "langchain-core", editable = "../../core" },
+ { name = "numpy", specifier = ">=1.26.2,<3" },
+ { name = "pytest", specifier = ">=7,<9" },
+ { name = "pytest-asyncio", specifier = ">=0.20,<1" },
+ { name = "pytest-socket", specifier = ">=0.6.0,<1" },
+ { name = "syrupy", specifier = ">=4,<5" },
+]
+
+[package.metadata.requires-dev]
+codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
+lint = [{ name = "ruff", specifier = ">=0.9.2,<1.0.0" }]
+test = [{ name = "langchain-core", editable = "../../core" }]
+test-integration = []
+typing = [
+ { name = "langchain-core", editable = "../../core" },
+ { name = "mypy", specifier = ">=1,<2" },
]
[[package]]
diff --git a/libs/partners/fireworks/langchain_fireworks/chat_models.py b/libs/partners/fireworks/langchain_fireworks/chat_models.py
index 42bed993623..6ac752d3777 100644
--- a/libs/partners/fireworks/langchain_fireworks/chat_models.py
+++ b/libs/partners/fireworks/langchain_fireworks/chat_models.py
@@ -970,7 +970,7 @@ class ChatFireworks(BaseChatModel):
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": formatted_tool,
},
@@ -987,7 +987,7 @@ class ChatFireworks(BaseChatModel):
elif method == "json_mode":
llm = self.bind(
response_format={"type": "json_object"},
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": "json_mode"},
"schema": schema,
},
diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py
index e971b5e39c1..bb4115b850a 100644
--- a/libs/partners/groq/langchain_groq/chat_models.py
+++ b/libs/partners/groq/langchain_groq/chat_models.py
@@ -88,6 +88,8 @@ from typing_extensions import Self
from langchain_groq.version import __version__
+WARNED_DEFAULT_MODEL = False
+
class ChatGroq(BaseChatModel):
"""`Groq` Chat large language models API.
@@ -109,7 +111,7 @@ class ChatGroq(BaseChatModel):
Key init args — completion params:
model: str
- Name of Groq model to use. E.g. "mixtral-8x7b-32768".
+ Name of Groq model to use. E.g. "llama-3.1-8b-instant".
temperature: float
Sampling temperature. Ranges from 0.0 to 1.0.
max_tokens: Optional[int]
@@ -140,7 +142,7 @@ class ChatGroq(BaseChatModel):
from langchain_groq import ChatGroq
llm = ChatGroq(
- model="mixtral-8x7b-32768",
+ model="llama-3.1-8b-instant",
temperature=0.0,
max_retries=2,
# other params...
@@ -164,7 +166,7 @@ class ChatGroq(BaseChatModel):
response_metadata={'token_usage': {'completion_tokens': 38,
'prompt_tokens': 28, 'total_tokens': 66, 'completion_time':
0.057975474, 'prompt_time': 0.005366091, 'queue_time': None,
- 'total_time': 0.063341565}, 'model_name': 'mixtral-8x7b-32768',
+ 'total_time': 0.063341565}, 'model_name': 'llama-3.1-8b-instant',
'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop',
'logprobs': None}, id='run-ecc71d70-e10c-4b69-8b8c-b8027d95d4b8-0')
@@ -222,7 +224,7 @@ class ChatGroq(BaseChatModel):
response_metadata={'token_usage': {'completion_tokens': 53,
'prompt_tokens': 28, 'total_tokens': 81, 'completion_time':
0.083623752, 'prompt_time': 0.007365126, 'queue_time': None,
- 'total_time': 0.090988878}, 'model_name': 'mixtral-8x7b-32768',
+ 'total_time': 0.090988878}, 'model_name': 'llama-3.1-8b-instant',
'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop',
'logprobs': None}, id='run-897f3391-1bea-42e2-82e0-686e2367bcf8-0')
@@ -295,7 +297,7 @@ class ChatGroq(BaseChatModel):
'prompt_time': 0.007518279,
'queue_time': None,
'total_time': 0.11947467},
- 'model_name': 'mixtral-8x7b-32768',
+ 'model_name': 'llama-3.1-8b-instant',
'system_fingerprint': 'fp_c5f20b5bb1',
'finish_reason': 'stop',
'logprobs': None}
@@ -351,6 +353,27 @@ class ChatGroq(BaseChatModel):
populate_by_name=True,
)
+ @model_validator(mode="before")
+ @classmethod
+ def warn_default_model(cls, values: Dict[str, Any]) -> Any:
+ """Warning anticipating removal of default model."""
+ # TODO(ccurme): remove this warning in 0.3.0 when default model is removed
+ global WARNED_DEFAULT_MODEL
+ if (
+ "model" not in values
+ and "model_name" not in values
+ and not WARNED_DEFAULT_MODEL
+ ):
+ warnings.warn(
+ "Groq is retiring the default model for ChatGroq, mixtral-8x7b-32768, "
+ "on March 20, 2025. Requests with the default model will start failing "
+ "on that date. Version 0.3.0 of langchain-groq will remove the "
+ "default. Please specify `model` explicitly, e.g., "
+ "`model='mistral-saba-24b'` or `model='llama-3.3-70b-versatile'`.",
+ )
+ WARNED_DEFAULT_MODEL = True
+ return values
+
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
@@ -1001,7 +1024,7 @@ class ChatGroq(BaseChatModel):
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": formatted_tool,
},
@@ -1018,7 +1041,7 @@ class ChatGroq(BaseChatModel):
elif method == "json_mode":
llm = self.bind(
response_format={"type": "json_object"},
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": "json_mode"},
"schema": schema,
},
diff --git a/libs/partners/groq/pyproject.toml b/libs/partners/groq/pyproject.toml
index 283276f6932..31c3329d2ad 100644
--- a/libs/partners/groq/pyproject.toml
+++ b/libs/partners/groq/pyproject.toml
@@ -6,9 +6,9 @@ build-backend = "pdm.backend"
authors = []
license = { text = "MIT" }
requires-python = "<4.0,>=3.9"
-dependencies = ["langchain-core<1.0.0,>=0.3.33", "groq<1,>=0.4.1"]
+dependencies = ["langchain-core<1.0.0,>=0.3.42", "groq<1,>=0.4.1"]
name = "langchain-groq"
-version = "0.2.4"
+version = "0.2.5"
description = "An integration package connecting Groq and LangChain"
readme = "README.md"
diff --git a/libs/partners/groq/tests/integration_tests/test_chat_models.py b/libs/partners/groq/tests/integration_tests/test_chat_models.py
index 9113c829871..9d74ef4f2ab 100644
--- a/libs/partners/groq/tests/integration_tests/test_chat_models.py
+++ b/libs/partners/groq/tests/integration_tests/test_chat_models.py
@@ -21,6 +21,8 @@ from tests.unit_tests.fake.callbacks import (
FakeCallbackHandlerWithChatStart,
)
+MODEL_NAME = "llama-3.3-70b-versatile"
+
#
# Smoke test Runnable interface
@@ -28,7 +30,8 @@ from tests.unit_tests.fake.callbacks import (
@pytest.mark.scheduled
def test_invoke() -> None:
"""Test Chat wrapper."""
- chat = ChatGroq( # type: ignore[call-arg]
+ chat = ChatGroq(
+ model=MODEL_NAME,
temperature=0.7,
base_url=None,
groq_proxy=None,
@@ -49,7 +52,7 @@ def test_invoke() -> None:
@pytest.mark.scheduled
async def test_ainvoke() -> None:
"""Test ainvoke tokens from ChatGroq."""
- chat = ChatGroq(max_tokens=10) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
result = await chat.ainvoke("Welcome to the Groqetship!", config={"tags": ["foo"]})
assert isinstance(result, BaseMessage)
@@ -59,7 +62,7 @@ async def test_ainvoke() -> None:
@pytest.mark.scheduled
def test_batch() -> None:
"""Test batch tokens from ChatGroq."""
- chat = ChatGroq(max_tokens=10) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
result = chat.batch(["Hello!", "Welcome to the Groqetship!"])
for token in result:
@@ -70,7 +73,7 @@ def test_batch() -> None:
@pytest.mark.scheduled
async def test_abatch() -> None:
"""Test abatch tokens from ChatGroq."""
- chat = ChatGroq(max_tokens=10) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
result = await chat.abatch(["Hello!", "Welcome to the Groqetship!"])
for token in result:
@@ -81,7 +84,7 @@ async def test_abatch() -> None:
@pytest.mark.scheduled
async def test_stream() -> None:
"""Test streaming tokens from Groq."""
- chat = ChatGroq(max_tokens=10) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
for token in chat.stream("Welcome to the Groqetship!"):
assert isinstance(token, BaseMessageChunk)
@@ -91,7 +94,7 @@ async def test_stream() -> None:
@pytest.mark.scheduled
async def test_astream() -> None:
"""Test streaming tokens from Groq."""
- chat = ChatGroq(max_tokens=10) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
full: Optional[BaseMessageChunk] = None
chunks_with_token_counts = 0
@@ -124,7 +127,7 @@ async def test_astream() -> None:
def test_generate() -> None:
"""Test sync generate."""
n = 1
- chat = ChatGroq(max_tokens=10) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
message = HumanMessage(content="Hello", n=1)
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -143,7 +146,7 @@ def test_generate() -> None:
async def test_agenerate() -> None:
"""Test async generation."""
n = 1
- chat = ChatGroq(max_tokens=10, n=1) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10, n=1)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -165,7 +168,8 @@ async def test_agenerate() -> None:
def test_invoke_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
- chat = ChatGroq( # type: ignore[call-arg]
+ chat = ChatGroq(
+ model=MODEL_NAME,
max_tokens=2,
streaming=True,
temperature=0,
@@ -181,7 +185,8 @@ def test_invoke_streaming() -> None:
async def test_agenerate_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandlerWithChatStart()
- chat = ChatGroq( # type: ignore[call-arg]
+ chat = ChatGroq(
+ model=MODEL_NAME,
max_tokens=10,
streaming=True,
temperature=0,
@@ -220,7 +225,8 @@ def test_streaming_generation_info() -> None:
self.saved_things["generation"] = args[0]
callback = _FakeCallback()
- chat = ChatGroq( # type: ignore[call-arg]
+ chat = ChatGroq(
+ model=MODEL_NAME,
max_tokens=2,
temperature=0,
callbacks=[callback],
@@ -234,7 +240,7 @@ def test_streaming_generation_info() -> None:
def test_system_message() -> None:
"""Test ChatGroq wrapper with system message."""
- chat = ChatGroq(max_tokens=10) # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat.invoke([system_message, human_message])
@@ -242,10 +248,9 @@ def test_system_message() -> None:
assert isinstance(response.content, str)
-@pytest.mark.xfail(reason="Groq tool_choice doesn't currently force a tool call")
def test_tool_choice() -> None:
"""Test that tool choice is respected."""
- llm = ChatGroq() # type: ignore[call-arg]
+ llm = ChatGroq(model=MODEL_NAME)
class MyTool(BaseModel):
name: str
@@ -273,10 +278,9 @@ def test_tool_choice() -> None:
assert tool_call["args"] == {"name": "Erick", "age": 27}
-@pytest.mark.xfail(reason="Groq tool_choice doesn't currently force a tool call")
def test_tool_choice_bool() -> None:
"""Test that tool choice is respected just passing in True."""
- llm = ChatGroq() # type: ignore[call-arg]
+ llm = ChatGroq(model=MODEL_NAME)
class MyTool(BaseModel):
name: str
@@ -301,7 +305,7 @@ def test_tool_choice_bool() -> None:
@pytest.mark.xfail(reason="Groq tool_choice doesn't currently force a tool call")
def test_streaming_tool_call() -> None:
"""Test that tool choice is respected."""
- llm = ChatGroq() # type: ignore[call-arg]
+ llm = ChatGroq(model=MODEL_NAME)
class MyTool(BaseModel):
name: str
@@ -339,7 +343,7 @@ def test_streaming_tool_call() -> None:
@pytest.mark.xfail(reason="Groq tool_choice doesn't currently force a tool call")
async def test_astreaming_tool_call() -> None:
"""Test that tool choice is respected."""
- llm = ChatGroq() # type: ignore[call-arg]
+ llm = ChatGroq(model=MODEL_NAME)
class MyTool(BaseModel):
name: str
@@ -384,7 +388,7 @@ def test_json_mode_structured_output() -> None:
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
- chat = ChatGroq().with_structured_output(Joke, method="json_mode") # type: ignore[call-arg]
+ chat = ChatGroq(model=MODEL_NAME).with_structured_output(Joke, method="json_mode")
result = chat.invoke(
"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys"
)
diff --git a/libs/partners/groq/tests/unit_tests/__snapshots__/test_standard.ambr b/libs/partners/groq/tests/unit_tests/__snapshots__/test_standard.ambr
index 741d2c84745..7b8db708167 100644
--- a/libs/partners/groq/tests/unit_tests/__snapshots__/test_standard.ambr
+++ b/libs/partners/groq/tests/unit_tests/__snapshots__/test_standard.ambr
@@ -16,7 +16,7 @@
}),
'max_retries': 2,
'max_tokens': 100,
- 'model_name': 'mixtral-8x7b-32768',
+ 'model_name': 'llama-3.1-8b-instant',
'n': 1,
'request_timeout': 60.0,
'stop': list([
diff --git a/libs/partners/groq/tests/unit_tests/test_chat_models.py b/libs/partners/groq/tests/unit_tests/test_chat_models.py
index 1d7ec63c514..583b562b285 100644
--- a/libs/partners/groq/tests/unit_tests/test_chat_models.py
+++ b/libs/partners/groq/tests/unit_tests/test_chat_models.py
@@ -2,6 +2,7 @@
import json
import os
+import warnings
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
@@ -156,7 +157,7 @@ def mock_completion() -> dict:
def test_groq_invoke(mock_completion: dict) -> None:
- llm = ChatGroq() # type: ignore[call-arg]
+ llm = ChatGroq(model="foo")
mock_client = MagicMock()
completed = False
@@ -178,7 +179,7 @@ def test_groq_invoke(mock_completion: dict) -> None:
async def test_groq_ainvoke(mock_completion: dict) -> None:
- llm = ChatGroq() # type: ignore[call-arg]
+ llm = ChatGroq(model="foo")
mock_client = AsyncMock()
completed = False
@@ -203,7 +204,7 @@ def test_chat_groq_extra_kwargs() -> None:
"""Test extra kwargs to chat groq."""
# Check that foo is saved in extra_kwargs.
with pytest.warns(UserWarning) as record:
- llm = ChatGroq(foo=3, max_tokens=10) # type: ignore[call-arg]
+ llm = ChatGroq(model="foo", foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
assert len(record) == 1
@@ -212,7 +213,7 @@ def test_chat_groq_extra_kwargs() -> None:
# Test that if extra_kwargs are provided, they are added to it.
with pytest.warns(UserWarning) as record:
- llm = ChatGroq(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
+ llm = ChatGroq(model="foo", foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
assert len(record) == 1
assert type(record[0].message) is UserWarning
@@ -220,21 +221,22 @@ def test_chat_groq_extra_kwargs() -> None:
# Test that if provided twice it errors
with pytest.raises(ValueError):
- ChatGroq(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
+ ChatGroq(model="foo", foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
# Test that if explicit param is specified in kwargs it errors
with pytest.raises(ValueError):
- ChatGroq(model_kwargs={"temperature": 0.2}) # type: ignore[call-arg]
+ ChatGroq(model="foo", model_kwargs={"temperature": 0.2})
# Test that "model" cannot be specified in kwargs
with pytest.raises(ValueError):
- ChatGroq(model_kwargs={"model": "test-model"}) # type: ignore[call-arg]
+ ChatGroq(model="foo", model_kwargs={"model": "test-model"})
def test_chat_groq_invalid_streaming_params() -> None:
"""Test that an error is raised if streaming is invoked with n>1."""
with pytest.raises(ValueError):
- ChatGroq( # type: ignore[call-arg]
+ ChatGroq(
+ model="foo",
max_tokens=10,
streaming=True,
temperature=0,
@@ -246,7 +248,7 @@ def test_chat_groq_secret() -> None:
"""Test that secret is not printed"""
secret = "secretKey"
not_secret = "safe"
- llm = ChatGroq(api_key=secret, model_kwargs={"not_secret": not_secret}) # type: ignore[call-arg, arg-type]
+ llm = ChatGroq(model="foo", api_key=secret, model_kwargs={"not_secret": not_secret}) # type: ignore[call-arg, arg-type]
stringified = str(llm)
assert not_secret in stringified
assert secret not in stringified
@@ -257,7 +259,7 @@ def test_groq_serialization() -> None:
"""Test that ChatGroq can be successfully serialized and deserialized"""
api_key1 = "top secret"
api_key2 = "topest secret"
- llm = ChatGroq(api_key=api_key1, temperature=0.5) # type: ignore[call-arg, arg-type]
+ llm = ChatGroq(model="foo", api_key=api_key1, temperature=0.5) # type: ignore[call-arg, arg-type]
dump = lc_load.dumps(llm)
llm2 = lc_load.loads(
dump,
@@ -278,3 +280,23 @@ def test_groq_serialization() -> None:
# Ensure a None was preserved
assert llm.groq_api_base == llm2.groq_api_base
+
+
+def test_groq_warns_default_model() -> None:
+ """Test that a warning is raised if a default model is used."""
+
+ # Delete this test in 0.3 release, when the default model is removed.
+
+ # Test no warning if model is specified
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ ChatGroq(model="foo")
+
+ # Test warns if default model is used
+ with pytest.warns(match="default model"):
+ ChatGroq()
+
+ # Test only warns once
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ ChatGroq()
diff --git a/libs/partners/groq/tests/unit_tests/test_standard.py b/libs/partners/groq/tests/unit_tests/test_standard.py
index e4df2916f30..f04d13b703f 100644
--- a/libs/partners/groq/tests/unit_tests/test_standard.py
+++ b/libs/partners/groq/tests/unit_tests/test_standard.py
@@ -14,3 +14,7 @@ class TestGroqStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatGroq
+
+ @property
+ def chat_model_params(self) -> dict:
+ return {"model": "llama-3.1-8b-instant"}
diff --git a/libs/partners/groq/uv.lock b/libs/partners/groq/uv.lock
index 6874b46c32f..560e6c36e0d 100644
--- a/libs/partners/groq/uv.lock
+++ b/libs/partners/groq/uv.lock
@@ -313,7 +313,7 @@ wheels = [
[[package]]
name = "langchain-core"
-version = "0.3.35"
+version = "0.3.42"
source = { editable = "../../core" }
dependencies = [
{ name = "jsonpatch" },
@@ -345,7 +345,7 @@ dev = [
]
lint = [{ name = "ruff", specifier = ">=0.9.2,<1.0.0" }]
test = [
- { name = "blockbuster", specifier = "~=1.5.11" },
+ { name = "blockbuster", specifier = "~=1.5.18" },
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
{ name = "grandalf", specifier = ">=0.8,<1.0" },
{ name = "langchain-tests", directory = "../../standard-tests" },
@@ -371,7 +371,7 @@ typing = [
[[package]]
name = "langchain-groq"
-version = "0.2.4"
+version = "0.2.5"
source = { editable = "." }
dependencies = [
{ name = "groq" },
@@ -430,7 +430,7 @@ typing = [
[[package]]
name = "langchain-tests"
-version = "0.3.11"
+version = "0.3.14"
source = { editable = "../../standard-tests" }
dependencies = [
{ name = "httpx" },
@@ -447,8 +447,7 @@ dependencies = [
requires-dist = [
{ name = "httpx", specifier = ">=0.25.0,<1" },
{ name = "langchain-core", editable = "../../core" },
- { name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.24.0,<2.0.0" },
- { name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.2,<3" },
+ { name = "numpy", specifier = ">=1.26.2,<3" },
{ name = "pytest", specifier = ">=7,<9" },
{ name = "pytest-asyncio", specifier = ">=0.20,<1" },
{ name = "pytest-socket", specifier = ">=0.6.0,<1" },
diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py
index aaa8c43780f..98aa414e573 100644
--- a/libs/partners/mistralai/langchain_mistralai/chat_models.py
+++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py
@@ -5,6 +5,7 @@ import json
import logging
import os
import re
+import ssl
import uuid
from operator import itemgetter
from typing import (
@@ -24,6 +25,7 @@ from typing import (
cast,
)
+import certifi
import httpx
from httpx_sse import EventSource, aconnect_sse, connect_sse
from langchain_core.callbacks import (
@@ -87,6 +89,11 @@ logger = logging.getLogger(__name__)
TOOL_CALL_ID_PATTERN = re.compile(r"^[a-zA-Z0-9]{9}$")
+# This SSL context is equivelent to the default `verify=True`.
+# https://www.python-httpx.org/advanced/ssl/#configuring-client-instances
+global_ssl_context = ssl.create_default_context(cafile=certifi.where())
+
+
def _create_retry_decorator(
llm: ChatMistralAI,
run_manager: Optional[
@@ -518,6 +525,7 @@ class ChatMistralAI(BaseChatModel):
"Authorization": f"Bearer {api_key_str}",
},
timeout=self.timeout,
+ verify=global_ssl_context,
)
# todo: handle retries and max_concurrency
if not self.async_client:
@@ -529,6 +537,7 @@ class ChatMistralAI(BaseChatModel):
"Authorization": f"Bearer {api_key_str}",
},
timeout=self.timeout,
+ verify=global_ssl_context,
)
if self.temperature is not None and not 0 <= self.temperature <= 1:
@@ -950,7 +959,7 @@ class ChatMistralAI(BaseChatModel):
llm = self.bind_tools(
[schema],
tool_choice="any",
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": schema,
},
@@ -968,7 +977,7 @@ class ChatMistralAI(BaseChatModel):
elif method == "json_mode":
llm = self.bind(
response_format={"type": "json_object"},
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {
# this is correct - name difference with mistral api
"method": "json_mode"
@@ -990,7 +999,7 @@ class ChatMistralAI(BaseChatModel):
response_format = _convert_to_openai_response_format(schema, strict=True)
llm = self.bind(
response_format=response_format,
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": "json_schema"},
"schema": schema,
},
diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py
index fdbcf3c105a..e449969025a 100644
--- a/libs/partners/ollama/langchain_ollama/chat_models.py
+++ b/libs/partners/ollama/langchain_ollama/chat_models.py
@@ -1090,7 +1090,7 @@ class ChatOllama(BaseChatModel):
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": method},
"schema": formatted_tool,
},
@@ -1107,7 +1107,7 @@ class ChatOllama(BaseChatModel):
elif method == "json_mode":
llm = self.bind(
format="json",
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": method},
"schema": schema,
},
@@ -1127,7 +1127,7 @@ class ChatOllama(BaseChatModel):
schema = cast(TypeBaseModel, schema)
llm = self.bind(
format=schema.model_json_schema(),
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": method},
"schema": schema,
},
@@ -1148,7 +1148,7 @@ class ChatOllama(BaseChatModel):
response_format = schema
llm = self.bind(
format=response_format,
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": method},
"schema": response_format,
},
diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py
index e40c5bf98d1..a2720764a86 100644
--- a/libs/partners/openai/langchain_openai/chat_models/base.py
+++ b/libs/partners/openai/langchain_openai/chat_models/base.py
@@ -12,9 +12,11 @@ import sys
import warnings
from functools import partial
from io import BytesIO
+from json import JSONDecodeError
from math import ceil
from operator import itemgetter
from typing import (
+ TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
@@ -89,6 +91,7 @@ from langchain_core.runnables import (
)
from langchain_core.runnables.config import run_in_executor
from langchain_core.tools import BaseTool
+from langchain_core.tools.base import _stringify
from langchain_core.utils import get_pydantic_field_names
from langchain_core.utils.function_calling import (
convert_to_openai_function,
@@ -104,12 +107,17 @@ from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from pydantic.v1 import BaseModel as BaseModelV1
from typing_extensions import Self
+if TYPE_CHECKING:
+ from openai.types.responses import Response
+
logger = logging.getLogger(__name__)
# This SSL context is equivelent to the default `verify=True`.
# https://www.python-httpx.org/advanced/ssl/#configuring-client-instances
global_ssl_context = ssl.create_default_context(cafile=certifi.where())
+_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
+
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
"""Convert a dictionary to a LangChain message.
@@ -186,15 +194,38 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
def _format_message_content(content: Any) -> Any:
"""Format message content."""
if content and isinstance(content, list):
- # Remove unexpected block types
formatted_content = []
for block in content:
+ # Remove unexpected block types
if (
isinstance(block, dict)
and "type" in block
- and block["type"] == "tool_use"
+ and block["type"] in ("tool_use", "thinking")
):
continue
+ # Anthropic image blocks
+ elif (
+ isinstance(block, dict)
+ and block.get("type") == "image"
+ and (source := block.get("source"))
+ and isinstance(source, dict)
+ ):
+ if source.get("type") == "base64" and (
+ (media_type := source.get("media_type"))
+ and (data := source.get("data"))
+ ):
+ formatted_content.append(
+ {
+ "type": "image_url",
+ "image_url": {"url": f"data:{media_type};base64,{data}"},
+ }
+ )
+ elif source.get("type") == "url" and (url := source.get("url")):
+ formatted_content.append(
+ {"type": "image_url", "image_url": {"url": url}}
+ )
+ else:
+ continue
else:
formatted_content.append(block)
else:
@@ -505,6 +536,14 @@ class BaseChatOpenAI(BaseChatModel):
invocation.
"""
+ use_responses_api: Optional[bool] = None
+ """Whether to use the Responses API instead of the Chat API.
+
+ If not specified then will be inferred based on invocation params.
+
+ .. versionadded:: 0.3.9
+ """
+
model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="before")
@@ -631,7 +670,7 @@ class BaseChatOpenAI(BaseChatModel):
if output is None:
# Happens in streaming
continue
- token_usage = output["token_usage"]
+ token_usage = output.get("token_usage")
if token_usage is not None:
for k, v in token_usage.items():
if v is None:
@@ -702,6 +741,50 @@ class BaseChatOpenAI(BaseChatModel):
)
return generation_chunk
+ def _stream_responses(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> Iterator[ChatGenerationChunk]:
+ kwargs["stream"] = True
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
+ context_manager = self.root_client.responses.create(**payload)
+
+ with context_manager as response:
+ for chunk in response:
+ if generation_chunk := _convert_responses_chunk_to_generation_chunk(
+ chunk
+ ):
+ if run_manager:
+ run_manager.on_llm_new_token(
+ generation_chunk.text, chunk=generation_chunk
+ )
+ yield generation_chunk
+
+ async def _astream_responses(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> AsyncIterator[ChatGenerationChunk]:
+ kwargs["stream"] = True
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
+ context_manager = await self.root_async_client.responses.create(**payload)
+
+ async with context_manager as response:
+ async for chunk in response:
+ if generation_chunk := _convert_responses_chunk_to_generation_chunk(
+ chunk
+ ):
+ if run_manager:
+ await run_manager.on_llm_new_token(
+ generation_chunk.text, chunk=generation_chunk
+ )
+ yield generation_chunk
+
def _stream(
self,
messages: List[BaseMessage],
@@ -796,10 +879,19 @@ class BaseChatOpenAI(BaseChatModel):
raw_response = self.client.with_raw_response.create(**payload)
response = raw_response.parse()
generation_info = {"headers": dict(raw_response.headers)}
+ elif self._use_responses_api(payload):
+ response = self.root_client.responses.create(**payload)
+ return _construct_lc_result_from_responses_api(response)
else:
response = self.client.create(**payload)
return self._create_chat_result(response, generation_info)
+ def _use_responses_api(self, payload: dict) -> bool:
+ if isinstance(self.use_responses_api, bool):
+ return self.use_responses_api
+ else:
+ return _use_responses_api(payload)
+
def _get_request_payload(
self,
input_: LanguageModelInput,
@@ -811,11 +903,12 @@ class BaseChatOpenAI(BaseChatModel):
if stop is not None:
kwargs["stop"] = stop
- return {
- "messages": [_convert_message_to_dict(m) for m in messages],
- **self._default_params,
- **kwargs,
- }
+ payload = {**self._default_params, **kwargs}
+ if self._use_responses_api(payload):
+ payload = _construct_responses_api_payload(messages, payload)
+ else:
+ payload["messages"] = [_convert_message_to_dict(m) for m in messages]
+ return payload
def _create_chat_result(
self,
@@ -854,6 +947,8 @@ class BaseChatOpenAI(BaseChatModel):
"model_name": response_dict.get("model", self.model_name),
"system_fingerprint": response_dict.get("system_fingerprint", ""),
}
+ if "id" in response_dict:
+ llm_output["id"] = response_dict["id"]
if isinstance(response, openai.BaseModel) and getattr(
response, "choices", None
@@ -966,6 +1061,9 @@ class BaseChatOpenAI(BaseChatModel):
raw_response = await self.async_client.with_raw_response.create(**payload)
response = raw_response.parse()
generation_info = {"headers": dict(raw_response.headers)}
+ elif self._use_responses_api(payload):
+ response = await self.root_async_client.responses.create(**payload)
+ return _construct_lc_result_from_responses_api(response)
else:
response = await self.async_client.create(**payload)
return await run_in_executor(
@@ -1235,33 +1333,38 @@ class BaseChatOpenAI(BaseChatModel):
formatted_tools = [
convert_to_openai_tool(tool, strict=strict) for tool in tools
]
+ tool_names = []
+ for tool in formatted_tools:
+ if "function" in tool:
+ tool_names.append(tool["function"]["name"])
+ elif "name" in tool:
+ tool_names.append(tool["name"])
+ else:
+ pass
if tool_choice:
if isinstance(tool_choice, str):
# tool_choice is a tool/function name
- if tool_choice not in ("auto", "none", "any", "required"):
+ if tool_choice in tool_names:
tool_choice = {
"type": "function",
"function": {"name": tool_choice},
}
+ elif tool_choice in (
+ "file_search",
+ "web_search_preview",
+ "computer_use_preview",
+ ):
+ tool_choice = {"type": tool_choice}
# 'any' is not natively supported by OpenAI API.
# We support 'any' since other models use this instead of 'required'.
- if tool_choice == "any":
+ elif tool_choice == "any":
tool_choice = "required"
+ else:
+ pass
elif isinstance(tool_choice, bool):
tool_choice = "required"
elif isinstance(tool_choice, dict):
- tool_names = [
- formatted_tool["function"]["name"]
- for formatted_tool in formatted_tools
- ]
- if not any(
- tool_name == tool_choice["function"]["name"]
- for tool_name in tool_names
- ):
- raise ValueError(
- f"Tool choice {tool_choice} was specified, but the only "
- f"provided tools were {tool_names}."
- )
+ pass
else:
raise ValueError(
f"Unrecognized tool_choice type. Expected str, bool or dict. "
@@ -1408,7 +1511,7 @@ class BaseChatOpenAI(BaseChatModel):
tool_choice=tool_name,
parallel_tool_calls=False,
strict=strict,
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": method},
"schema": schema,
},
@@ -1427,7 +1530,7 @@ class BaseChatOpenAI(BaseChatModel):
elif method == "json_mode":
llm = self.bind(
response_format={"type": "json_object"},
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": method},
"schema": schema,
},
@@ -1446,7 +1549,7 @@ class BaseChatOpenAI(BaseChatModel):
response_format = _convert_to_openai_response_format(schema, strict=strict)
llm = self.bind(
response_format=response_format,
- structured_output_format={
+ ls_structured_output_format={
"kwargs": {"method": method},
"schema": convert_to_openai_tool(schema),
},
@@ -1539,6 +1642,8 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
stream_options: Dict
Configure streaming outputs, like whether to return token usage when
streaming (``{"include_usage": True}``).
+ use_responses_api: Optional[bool]
+ Whether to use the responses API.
See full list of supported init args and their descriptions in the params section.
@@ -1782,6 +1887,79 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
See ``ChatOpenAI.bind_tools()`` method for more.
+ .. dropdown:: Built-in tools
+
+ .. versionadded:: 0.3.9
+
+ You can access `built-in tools `_
+ supported by the OpenAI Responses API. See LangChain
+ `docs `_ for more
+ detail.
+
+ .. code-block:: python
+
+ from langchain_openai import ChatOpenAI
+
+ llm = ChatOpenAI(model="gpt-4o-mini")
+
+ tool = {"type": "web_search_preview"}
+ llm_with_tools = llm.bind_tools([tool])
+
+ response = llm_with_tools.invoke("What was a positive news story from today?")
+ response.content
+
+ .. code-block:: python
+
+ [
+ {
+ "type": "text",
+ "text": "Today, a heartwarming story emerged from ...",
+ "annotations": [
+ {
+ "end_index": 778,
+ "start_index": 682,
+ "title": "Title of story",
+ "type": "url_citation",
+ "url": "",
+ }
+ ],
+ }
+ ]
+
+ .. dropdown:: Managing conversation state
+
+ .. versionadded:: 0.3.9
+
+ OpenAI's Responses API supports management of
+ `conversation state `_.
+ Passing in response IDs from previous messages will continue a conversational
+ thread. See LangChain
+ `docs `_ for more
+ detail.
+
+ .. code-block:: python
+
+ from langchain_openai import ChatOpenAI
+
+ llm = ChatOpenAI(model="gpt-4o-mini", use_responses_api=True)
+ response = llm.invoke("Hi, I'm Bob.")
+ response.text()
+
+ .. code-block:: python
+
+ "Hi Bob! How can I assist you today?"
+
+ .. code-block:: python
+
+ second_response = llm.invoke(
+ "What is my name?", previous_response_id=response.response_metadata["id"]
+ )
+ second_response.text()
+
+ .. code-block:: python
+
+ "Your name is Bob. How can I help you today, Bob?"
+
.. dropdown:: Structured output
.. code-block:: python
@@ -2059,27 +2237,34 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
self, *args: Any, stream_usage: Optional[bool] = None, **kwargs: Any
) -> Iterator[ChatGenerationChunk]:
"""Set default stream_options."""
- stream_usage = self._should_stream_usage(stream_usage, **kwargs)
- # Note: stream_options is not a valid parameter for Azure OpenAI.
- # To support users proxying Azure through ChatOpenAI, here we only specify
- # stream_options if include_usage is set to True.
- # See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
- # for release notes.
- if stream_usage:
- kwargs["stream_options"] = {"include_usage": stream_usage}
+ if self._use_responses_api(kwargs):
+ return super()._stream_responses(*args, **kwargs)
+ else:
+ stream_usage = self._should_stream_usage(stream_usage, **kwargs)
+ # Note: stream_options is not a valid parameter for Azure OpenAI.
+ # To support users proxying Azure through ChatOpenAI, here we only specify
+ # stream_options if include_usage is set to True.
+ # See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
+ # for release notes.
+ if stream_usage:
+ kwargs["stream_options"] = {"include_usage": stream_usage}
- return super()._stream(*args, **kwargs)
+ return super()._stream(*args, **kwargs)
async def _astream(
self, *args: Any, stream_usage: Optional[bool] = None, **kwargs: Any
) -> AsyncIterator[ChatGenerationChunk]:
"""Set default stream_options."""
- stream_usage = self._should_stream_usage(stream_usage, **kwargs)
- if stream_usage:
- kwargs["stream_options"] = {"include_usage": stream_usage}
+ if self._use_responses_api(kwargs):
+ async for chunk in super()._astream_responses(*args, **kwargs):
+ yield chunk
+ else:
+ stream_usage = self._should_stream_usage(stream_usage, **kwargs)
+ if stream_usage:
+ kwargs["stream_options"] = {"include_usage": stream_usage}
- async for chunk in super()._astream(*args, **kwargs):
- yield chunk
+ async for chunk in super()._astream(*args, **kwargs):
+ yield chunk
def with_structured_output(
self,
@@ -2594,3 +2779,355 @@ def _create_usage_metadata(oai_token_usage: dict) -> UsageMetadata:
**{k: v for k, v in output_token_details.items() if v is not None}
),
)
+
+
+def _create_usage_metadata_responses(oai_token_usage: dict) -> UsageMetadata:
+ input_tokens = oai_token_usage.get("input_tokens", 0)
+ output_tokens = oai_token_usage.get("output_tokens", 0)
+ total_tokens = oai_token_usage.get("total_tokens", input_tokens + output_tokens)
+
+ output_token_details: dict = {
+ "audio": (oai_token_usage.get("completion_tokens_details") or {}).get(
+ "audio_tokens"
+ ),
+ "reasoning": (oai_token_usage.get("output_token_details") or {}).get(
+ "reasoning_tokens"
+ ),
+ }
+ return UsageMetadata(
+ input_tokens=input_tokens,
+ output_tokens=output_tokens,
+ total_tokens=total_tokens,
+ output_token_details=OutputTokenDetails(
+ **{k: v for k, v in output_token_details.items() if v is not None}
+ ),
+ )
+
+
+def _is_builtin_tool(tool: dict) -> bool:
+ return "type" in tool and tool["type"] != "function"
+
+
+def _use_responses_api(payload: dict) -> bool:
+ uses_builtin_tools = "tools" in payload and any(
+ _is_builtin_tool(tool) for tool in payload["tools"]
+ )
+ responses_only_args = {"previous_response_id", "text", "truncation", "include"}
+ return bool(uses_builtin_tools or responses_only_args.intersection(payload))
+
+
+def _construct_responses_api_payload(
+ messages: Sequence[BaseMessage], payload: dict
+) -> dict:
+ payload["input"] = _construct_responses_api_input(messages)
+ if tools := payload.pop("tools", None):
+ new_tools: list = []
+ for tool in tools:
+ # chat api: {"type": "function", "function": {"name": "...", "description": "...", "parameters": {...}, "strict": ...}} # noqa: E501
+ # responses api: {"type": "function", "name": "...", "description": "...", "parameters": {...}, "strict": ...} # noqa: E501
+ if tool["type"] == "function" and "function" in tool:
+ new_tools.append({"type": "function", **tool["function"]})
+ else:
+ new_tools.append(tool)
+ payload["tools"] = new_tools
+ if tool_choice := payload.pop("tool_choice", None):
+ # chat api: {"type": "function", "function": {"name": "..."}}
+ # responses api: {"type": "function", "name": "..."}
+ if tool_choice["type"] == "function" and "function" in tool_choice:
+ payload["tool_choice"] = {"type": "function", **tool_choice["function"]}
+ else:
+ payload["tool_choice"] = tool_choice
+ if response_format := payload.pop("response_format", None):
+ if payload.get("text"):
+ text = payload["text"]
+ raise ValueError(
+ "Can specify at most one of 'response_format' or 'text', received both:"
+ f"\n{response_format=}\n{text=}"
+ )
+ # chat api: {"type": "json_schema, "json_schema": {"schema": {...}, "name": "...", "description": "...", "strict": ...}} # noqa: E501
+ # responses api: {"type": "json_schema, "schema": {...}, "name": "...", "description": "...", "strict": ...} # noqa: E501
+ if response_format["type"] == "json_schema":
+ payload["text"] = {"type": "json_schema", **response_format["json_schema"]}
+ else:
+ payload["text"] = response_format
+ return payload
+
+
+def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
+ input_ = []
+ for lc_msg in messages:
+ msg = _convert_message_to_dict(lc_msg)
+ if msg["role"] == "tool":
+ tool_output = msg["content"]
+ if not isinstance(tool_output, str):
+ tool_output = _stringify(tool_output)
+ function_call_output = {
+ "type": "function_call_output",
+ "output": tool_output,
+ "call_id": msg["tool_call_id"],
+ }
+ input_.append(function_call_output)
+ elif msg["role"] == "assistant":
+ function_calls = []
+ if tool_calls := msg.pop("tool_calls", None):
+ # TODO: should you be able to preserve the function call object id on
+ # the langchain tool calls themselves?
+ if not lc_msg.additional_kwargs.get(_FUNCTION_CALL_IDS_MAP_KEY):
+ raise ValueError("")
+ function_call_ids = lc_msg.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY]
+ for tool_call in tool_calls:
+ function_call = {
+ "type": "function_call",
+ "name": tool_call["function"]["name"],
+ "arguments": tool_call["function"]["arguments"],
+ "call_id": tool_call["id"],
+ "id": function_call_ids[tool_call["id"]],
+ }
+ function_calls.append(function_call)
+
+ msg["content"] = msg.get("content") or []
+ if lc_msg.additional_kwargs.get("refusal"):
+ if isinstance(msg["content"], str):
+ msg["content"] = [
+ {
+ "type": "output_text",
+ "text": msg["content"],
+ "annotations": [],
+ }
+ ]
+ msg["content"] = msg["content"] + [
+ {"type": "refusal", "refusal": lc_msg.additional_kwargs["refusal"]}
+ ]
+ if isinstance(msg["content"], list):
+ new_blocks = []
+ for block in msg["content"]:
+ # chat api: {"type": "text", "text": "..."}
+ # responses api: {"type": "output_text", "text": "...", "annotations": [...]} # noqa: E501
+ if block["type"] == "text":
+ new_blocks.append(
+ {
+ "type": "output_text",
+ "text": block["text"],
+ "annotations": block.get("annotations") or [],
+ }
+ )
+ elif block["type"] in ("output_text", "refusal"):
+ new_blocks.append(block)
+ else:
+ pass
+ msg["content"] = new_blocks
+ if msg["content"]:
+ input_.append(msg)
+ input_.extend(function_calls)
+ elif msg["role"] == "user":
+ if isinstance(msg["content"], list):
+ new_blocks = []
+ for block in msg["content"]:
+ # chat api: {"type": "text", "text": "..."}
+ # responses api: {"type": "input_text", "text": "..."}
+ if block["type"] == "text":
+ new_blocks.append({"type": "input_text", "text": block["text"]})
+ # chat api: {"type": "image_url", "image_url": {"url": "...", "detail": "..."}} # noqa: E501
+ # responses api: {"type": "image_url", "image_url": "...", "detail": "...", "file_id": "..."} # noqa: E501
+ elif block["type"] == "image_url":
+ new_block = {
+ "type": "input_image",
+ "image_url": block["image_url"]["url"],
+ }
+ if block["image_url"].get("detail"):
+ new_block["detail"] = block["image_url"]["detail"]
+ new_blocks.append(new_block)
+ elif block["type"] in ("input_text", "input_image", "input_file"):
+ new_blocks.append(block)
+ else:
+ pass
+ msg["content"] = new_blocks
+ input_.append(msg)
+ else:
+ input_.append(msg)
+
+ return input_
+
+
+def _construct_lc_result_from_responses_api(response: Response) -> ChatResult:
+ """Construct ChatResponse from OpenAI Response API response."""
+ if response.error:
+ raise ValueError(response.error)
+
+ response_metadata = {
+ k: v
+ for k, v in response.model_dump(exclude_none=True, mode="json").items()
+ if k
+ in (
+ "created_at",
+ "id",
+ "incomplete_details",
+ "metadata",
+ "object",
+ "status",
+ "user",
+ "model",
+ )
+ }
+ # for compatibility with chat completion calls.
+ response_metadata["model_name"] = response_metadata.get("model")
+ if response.usage:
+ usage_metadata = _create_usage_metadata_responses(response.usage.model_dump())
+ else:
+ usage_metadata = None
+
+ content_blocks: list = []
+ tool_calls = []
+ invalid_tool_calls = []
+ additional_kwargs: dict = {}
+ msg_id = None
+ for output in response.output:
+ if output.type == "message":
+ for content in output.content:
+ if content.type == "output_text":
+ block = {
+ "type": "text",
+ "text": content.text,
+ "annotations": [
+ annotation.model_dump()
+ for annotation in content.annotations
+ ],
+ }
+ content_blocks.append(block)
+ if content.type == "refusal":
+ additional_kwargs["refusal"] = content.refusal
+ msg_id = output.id
+ elif output.type == "function_call":
+ try:
+ args = json.loads(output.arguments, strict=False)
+ error = None
+ except JSONDecodeError as e:
+ args = output.arguments
+ error = str(e)
+ if error is None:
+ tool_call = {
+ "type": "tool_call",
+ "name": output.name,
+ "args": args,
+ "id": output.call_id,
+ }
+ tool_calls.append(tool_call)
+ else:
+ tool_call = {
+ "type": "invalid_tool_call",
+ "name": output.name,
+ "args": args,
+ "id": output.call_id,
+ "error": error,
+ }
+ invalid_tool_calls.append(tool_call)
+ if _FUNCTION_CALL_IDS_MAP_KEY not in additional_kwargs:
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {}
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][output.call_id] = output.id
+ elif output.type == "reasoning":
+ additional_kwargs["reasoning"] = output.model_dump(
+ exclude_none=True, mode="json"
+ )
+ else:
+ tool_output = output.model_dump(exclude_none=True, mode="json")
+ if "tool_outputs" in additional_kwargs:
+ additional_kwargs["tool_outputs"].append(tool_output)
+ else:
+ additional_kwargs["tool_outputs"] = [tool_output]
+ message = AIMessage(
+ content=content_blocks,
+ id=msg_id,
+ usage_metadata=usage_metadata,
+ response_metadata=response_metadata,
+ additional_kwargs=additional_kwargs,
+ tool_calls=tool_calls,
+ invalid_tool_calls=invalid_tool_calls,
+ )
+ return ChatResult(generations=[ChatGeneration(message=message)])
+
+
+def _convert_responses_chunk_to_generation_chunk(
+ chunk: Any,
+) -> Optional[ChatGenerationChunk]:
+ content = []
+ tool_call_chunks: list = []
+ additional_kwargs: dict = {}
+ response_metadata = {}
+ usage_metadata = None
+ id = None
+ if chunk.type == "response.output_text.delta":
+ content.append(
+ {"type": "text", "text": chunk.delta, "index": chunk.content_index}
+ )
+ elif chunk.type == "response.output_text.annotation.added":
+ content.append(
+ {
+ "annotations": [
+ chunk.annotation.model_dump(exclude_none=True, mode="json")
+ ],
+ "index": chunk.content_index,
+ }
+ )
+ elif chunk.type == "response.created":
+ response_metadata["id"] = chunk.response.id
+ elif chunk.type == "response.completed":
+ msg = cast(
+ AIMessage,
+ (
+ _construct_lc_result_from_responses_api(chunk.response)
+ .generations[0]
+ .message
+ ),
+ )
+ usage_metadata = msg.usage_metadata
+ response_metadata = {
+ k: v for k, v in msg.response_metadata.items() if k != "id"
+ }
+ elif chunk.type == "response.output_item.added" and chunk.item.type == "message":
+ id = chunk.item.id
+ elif (
+ chunk.type == "response.output_item.added"
+ and chunk.item.type == "function_call"
+ ):
+ tool_call_chunks.append(
+ {
+ "type": "tool_call_chunk",
+ "name": chunk.item.name,
+ "args": chunk.item.arguments,
+ "id": chunk.item.call_id,
+ "index": chunk.output_index,
+ }
+ )
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {
+ chunk.item.call_id: chunk.item.id
+ }
+ elif chunk.type == "response.output_item.done" and chunk.item.type in (
+ "web_search_call",
+ "file_search_call",
+ ):
+ additional_kwargs["tool_outputs"] = [
+ chunk.item.model_dump(exclude_none=True, mode="json")
+ ]
+ elif chunk.type == "response.function_call_arguments.delta":
+ tool_call_chunks.append(
+ {
+ "type": "tool_call_chunk",
+ "args": chunk.delta,
+ "index": chunk.output_index,
+ }
+ )
+ elif chunk.type == "response.refusal.done":
+ additional_kwargs["refusal"] = chunk.refusal
+ else:
+ return None
+
+ return ChatGenerationChunk(
+ message=AIMessageChunk(
+ content=content, # type: ignore[arg-type]
+ tool_call_chunks=tool_call_chunks,
+ usage_metadata=usage_metadata,
+ response_metadata=response_metadata,
+ additional_kwargs=additional_kwargs,
+ id=id,
+ )
+ )
diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml
index 5330ea6705b..4b4939d1032 100644
--- a/libs/partners/openai/pyproject.toml
+++ b/libs/partners/openai/pyproject.toml
@@ -7,12 +7,12 @@ authors = []
license = { text = "MIT" }
requires-python = "<4.0,>=3.9"
dependencies = [
- "langchain-core<1.0.0,>=0.3.39",
- "openai<2.0.0,>=1.58.1",
+ "langchain-core<1.0.0,>=0.3.45-rc.1",
+ "openai<2.0.0,>=1.66.0",
"tiktoken<1,>=0.7",
]
name = "langchain-openai"
-version = "0.3.7"
+version = "0.3.9-rc.1"
description = "An integration package connecting OpenAI and LangChain"
readme = "README.md"
diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py
index f131dbaa2bb..53528808c2a 100644
--- a/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py
+++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py
@@ -29,6 +29,10 @@ class TestOpenAIStandard(ChatModelIntegrationTests):
def supports_json_mode(self) -> bool:
return True
+ @property
+ def supports_anthropic_inputs(self) -> bool:
+ return True
+
@property
def supported_usage_metadata_details(
self,
diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py
new file mode 100644
index 00000000000..c320083e6ef
--- /dev/null
+++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py
@@ -0,0 +1,168 @@
+"""Test Responses API usage."""
+
+import os
+from typing import Any, Optional, cast
+
+import pytest
+from langchain_core.messages import (
+ AIMessage,
+ AIMessageChunk,
+ BaseMessage,
+ BaseMessageChunk,
+)
+
+from langchain_openai import ChatOpenAI
+
+
+def _check_response(response: Optional[BaseMessage]) -> None:
+ assert isinstance(response, AIMessage)
+ assert isinstance(response.content, list)
+ for block in response.content:
+ assert isinstance(block, dict)
+ if block["type"] == "text":
+ assert isinstance(block["text"], str)
+ for annotation in block["annotations"]:
+ if annotation["type"] == "file_citation":
+ assert all(
+ key in annotation
+ for key in ["file_id", "filename", "index", "type"]
+ )
+ elif annotation["type"] == "web_search":
+ assert all(
+ key in annotation
+ for key in ["end_index", "start_index", "title", "type", "url"]
+ )
+
+ text_content = response.text()
+ assert isinstance(text_content, str)
+ assert text_content
+ assert response.usage_metadata
+ assert response.usage_metadata["input_tokens"] > 0
+ assert response.usage_metadata["output_tokens"] > 0
+ assert response.usage_metadata["total_tokens"] > 0
+ assert response.response_metadata["model_name"]
+ for tool_output in response.additional_kwargs["tool_outputs"]:
+ assert tool_output["id"]
+ assert tool_output["status"]
+ assert tool_output["type"]
+
+
+def test_web_search() -> None:
+ llm = ChatOpenAI(model="gpt-4o-mini")
+ first_response = llm.invoke(
+ "What was a positive news story from today?",
+ tools=[{"type": "web_search_preview"}],
+ )
+ _check_response(first_response)
+
+ # Test streaming
+ full: Optional[BaseMessageChunk] = None
+ for chunk in llm.stream(
+ "What was a positive news story from today?",
+ tools=[{"type": "web_search_preview"}],
+ ):
+ assert isinstance(chunk, AIMessageChunk)
+ full = chunk if full is None else full + chunk
+ _check_response(full)
+
+ # Use OpenAI's stateful API
+ response = llm.invoke(
+ "what about a negative one",
+ tools=[{"type": "web_search_preview"}],
+ previous_response_id=first_response.response_metadata["id"],
+ )
+ _check_response(response)
+
+ # Manually pass in chat history
+ response = llm.invoke(
+ [
+ first_response,
+ {
+ "role": "user",
+ "content": [{"type": "text", "text": "what about a negative one"}],
+ },
+ ],
+ tools=[{"type": "web_search_preview"}],
+ )
+ _check_response(response)
+
+ # Bind tool
+ response = llm.bind_tools([{"type": "web_search_preview"}]).invoke(
+ "What was a positive news story from today?"
+ )
+ _check_response(response)
+
+
+async def test_web_search_async() -> None:
+ llm = ChatOpenAI(model="gpt-4o-mini")
+ response = await llm.ainvoke(
+ "What was a positive news story from today?",
+ tools=[{"type": "web_search_preview"}],
+ )
+ _check_response(response)
+ assert response.response_metadata["status"]
+
+ # Test streaming
+ full: Optional[BaseMessageChunk] = None
+ async for chunk in llm.astream(
+ "What was a positive news story from today?",
+ tools=[{"type": "web_search_preview"}],
+ ):
+ assert isinstance(chunk, AIMessageChunk)
+ full = chunk if full is None else full + chunk
+ assert isinstance(full, AIMessageChunk)
+ _check_response(full)
+
+
+def test_function_calling() -> None:
+ def multiply(x: int, y: int) -> int:
+ """return x * y"""
+ return x * y
+
+ llm = ChatOpenAI(model="gpt-4o-mini")
+ bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}])
+ ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
+ assert len(ai_msg.tool_calls) == 1
+ assert ai_msg.tool_calls[0]["name"] == "multiply"
+ assert set(ai_msg.tool_calls[0]["args"]) == {"x", "y"}
+
+ full: Any = None
+ for chunk in bound_llm.stream("whats 5 * 4"):
+ assert isinstance(chunk, AIMessageChunk)
+ full = chunk if full is None else full + chunk
+ assert len(full.tool_calls) == 1
+ assert full.tool_calls[0]["name"] == "multiply"
+ assert set(full.tool_calls[0]["args"]) == {"x", "y"}
+
+ response = bound_llm.invoke("whats some good news from today")
+ _check_response(response)
+
+
+def test_stateful_api() -> None:
+ llm = ChatOpenAI(model="gpt-4o-mini", use_responses_api=True)
+ response = llm.invoke("how are you, my name is Bobo")
+ assert "id" in response.response_metadata
+
+ second_response = llm.invoke(
+ "what's my name", previous_response_id=response.response_metadata["id"]
+ )
+ assert isinstance(second_response.content, list)
+ assert "bobo" in second_response.content[0]["text"].lower() # type: ignore
+
+
+def test_file_search() -> None:
+ pytest.skip() # TODO: set up infra
+ llm = ChatOpenAI(model="gpt-4o-mini")
+ tool = {
+ "type": "file_search",
+ "vector_store_ids": [os.environ["OPENAI_VECTOR_STORE_ID"]],
+ }
+ response = llm.invoke("What is deep research by OpenAI?", tools=[tool])
+ _check_response(response)
+
+ full: Optional[BaseMessageChunk] = None
+ for chunk in llm.stream("What is deep research by OpenAI?", tools=[tool]):
+ assert isinstance(chunk, AIMessageChunk)
+ full = chunk if full is None else full + chunk
+ assert isinstance(full, AIMessageChunk)
+ _check_response(full)
diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py
index 8f8c6fa0361..e5e89990b78 100644
--- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py
+++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py
@@ -3,7 +3,7 @@
import json
from functools import partial
from types import TracebackType
-from typing import Any, Dict, List, Literal, Optional, Type, Union
+from typing import Any, Dict, List, Literal, Optional, Type, Union, cast
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
@@ -19,13 +19,30 @@ from langchain_core.messages import (
ToolMessage,
)
from langchain_core.messages.ai import UsageMetadata
-from langchain_core.outputs import ChatGeneration
+from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.runnables import RunnableLambda
+from openai.types.responses import ResponseOutputMessage
+from openai.types.responses.response import IncompleteDetails, Response, ResponseUsage
+from openai.types.responses.response_error import ResponseError
+from openai.types.responses.response_file_search_tool_call import (
+ ResponseFileSearchToolCall,
+ Result,
+)
+from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
+from openai.types.responses.response_function_web_search import (
+ ResponseFunctionWebSearch,
+)
+from openai.types.responses.response_output_refusal import ResponseOutputRefusal
+from openai.types.responses.response_output_text import ResponseOutputText
+from openai.types.responses.response_usage import OutputTokensDetails
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
from langchain_openai import ChatOpenAI
from langchain_openai.chat_models.base import (
+ _FUNCTION_CALL_IDS_MAP_KEY,
+ _construct_lc_result_from_responses_api,
+ _construct_responses_api_input,
_convert_dict_to_message,
_convert_message_to_dict,
_convert_to_openai_response_format,
@@ -862,7 +879,7 @@ def test_nested_structured_output_strict() -> None:
setup: str
punchline: str
- self_evaluation: SelfEvaluation
+ _evaluation: SelfEvaluation
llm.with_structured_output(JokeWithEvaluation, method="json_schema")
@@ -936,3 +953,731 @@ def test_structured_outputs_parser() -> None:
assert isinstance(deserialized, ChatGeneration)
result = output_parser.invoke(deserialized.message)
assert result == parsed_response
+
+
+def test__construct_lc_result_from_responses_api_error_handling() -> None:
+ """Test that errors in the response are properly raised."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ error=ResponseError(message="Test error", code="server_error"),
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[],
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ _construct_lc_result_from_responses_api(response)
+
+ assert "Test error" in str(excinfo.value)
+
+
+def test__construct_lc_result_from_responses_api_basic_text_response() -> None:
+ """Test a basic text response with no tools or special features."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseOutputMessage(
+ type="message",
+ id="msg_123",
+ content=[
+ ResponseOutputText(
+ type="output_text", text="Hello, world!", annotations=[]
+ )
+ ],
+ role="assistant",
+ status="completed",
+ )
+ ],
+ usage=ResponseUsage(
+ input_tokens=10,
+ output_tokens=3,
+ total_tokens=13,
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
+ ),
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ assert isinstance(result, ChatResult)
+ assert len(result.generations) == 1
+ assert isinstance(result.generations[0], ChatGeneration)
+ assert isinstance(result.generations[0].message, AIMessage)
+ assert result.generations[0].message.content == [
+ {"type": "text", "text": "Hello, world!", "annotations": []}
+ ]
+ assert result.generations[0].message.id == "msg_123"
+ assert result.generations[0].message.usage_metadata
+ assert result.generations[0].message.usage_metadata["input_tokens"] == 10
+ assert result.generations[0].message.usage_metadata["output_tokens"] == 3
+ assert result.generations[0].message.usage_metadata["total_tokens"] == 13
+ assert result.generations[0].message.response_metadata["id"] == "resp_123"
+ assert result.generations[0].message.response_metadata["model_name"] == "gpt-4o"
+
+
+def test__construct_lc_result_from_responses_api_multiple_text_blocks() -> None:
+ """Test a response with multiple text blocks."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseOutputMessage(
+ type="message",
+ id="msg_123",
+ content=[
+ ResponseOutputText(
+ type="output_text", text="First part", annotations=[]
+ ),
+ ResponseOutputText(
+ type="output_text", text="Second part", annotations=[]
+ ),
+ ],
+ role="assistant",
+ status="completed",
+ )
+ ],
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ assert len(result.generations[0].message.content) == 2
+ assert result.generations[0].message.content[0]["text"] == "First part" # type: ignore
+ assert result.generations[0].message.content[1]["text"] == "Second part" # type: ignore
+
+
+def test__construct_lc_result_from_responses_api_refusal_response() -> None:
+ """Test a response with a refusal."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseOutputMessage(
+ type="message",
+ id="msg_123",
+ content=[
+ ResponseOutputRefusal(
+ type="refusal", refusal="I cannot assist with that request."
+ )
+ ],
+ role="assistant",
+ status="completed",
+ )
+ ],
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ assert result.generations[0].message.content == []
+ assert (
+ result.generations[0].message.additional_kwargs["refusal"]
+ == "I cannot assist with that request."
+ )
+
+
+def test__construct_lc_result_from_responses_api_function_call_valid_json() -> None:
+ """Test a response with a valid function call."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseFunctionToolCall(
+ type="function_call",
+ id="func_123",
+ call_id="call_123",
+ name="get_weather",
+ arguments='{"location": "New York", "unit": "celsius"}',
+ )
+ ],
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ msg: AIMessage = cast(AIMessage, result.generations[0].message)
+ assert len(msg.tool_calls) == 1
+ assert msg.tool_calls[0]["type"] == "tool_call"
+ assert msg.tool_calls[0]["name"] == "get_weather"
+ assert msg.tool_calls[0]["id"] == "call_123"
+ assert msg.tool_calls[0]["args"] == {"location": "New York", "unit": "celsius"}
+ assert _FUNCTION_CALL_IDS_MAP_KEY in result.generations[0].message.additional_kwargs
+ assert (
+ result.generations[0].message.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][
+ "call_123"
+ ]
+ == "func_123"
+ )
+
+
+def test__construct_lc_result_from_responses_api_function_call_invalid_json() -> None:
+ """Test a response with an invalid JSON function call."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseFunctionToolCall(
+ type="function_call",
+ id="func_123",
+ call_id="call_123",
+ name="get_weather",
+ arguments='{"location": "New York", "unit": "celsius"',
+ # Missing closing brace
+ )
+ ],
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ msg: AIMessage = cast(AIMessage, result.generations[0].message)
+ assert len(msg.invalid_tool_calls) == 1
+ assert msg.invalid_tool_calls[0]["type"] == "invalid_tool_call"
+ assert msg.invalid_tool_calls[0]["name"] == "get_weather"
+ assert msg.invalid_tool_calls[0]["id"] == "call_123"
+ assert (
+ msg.invalid_tool_calls[0]["args"]
+ == '{"location": "New York", "unit": "celsius"'
+ )
+ assert "error" in msg.invalid_tool_calls[0]
+ assert _FUNCTION_CALL_IDS_MAP_KEY in result.generations[0].message.additional_kwargs
+
+
+def test__construct_lc_result_from_responses_api_complex_response() -> None:
+ """Test a complex response with multiple output types."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseOutputMessage(
+ type="message",
+ id="msg_123",
+ content=[
+ ResponseOutputText(
+ type="output_text",
+ text="Here's the information you requested:",
+ annotations=[],
+ )
+ ],
+ role="assistant",
+ status="completed",
+ ),
+ ResponseFunctionToolCall(
+ type="function_call",
+ id="func_123",
+ call_id="call_123",
+ name="get_weather",
+ arguments='{"location": "New York"}',
+ ),
+ ],
+ metadata=dict(key1="value1", key2="value2"),
+ incomplete_details=IncompleteDetails(reason="max_output_tokens"),
+ status="completed",
+ user="user_123",
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ # Check message content
+ assert result.generations[0].message.content == [
+ {
+ "type": "text",
+ "text": "Here's the information you requested:",
+ "annotations": [],
+ }
+ ]
+
+ # Check tool calls
+ msg: AIMessage = cast(AIMessage, result.generations[0].message)
+ assert len(msg.tool_calls) == 1
+ assert msg.tool_calls[0]["name"] == "get_weather"
+
+ # Check metadata
+ assert result.generations[0].message.response_metadata["id"] == "resp_123"
+ assert result.generations[0].message.response_metadata["metadata"] == {
+ "key1": "value1",
+ "key2": "value2",
+ }
+ assert result.generations[0].message.response_metadata["incomplete_details"] == {
+ "reason": "max_output_tokens"
+ }
+ assert result.generations[0].message.response_metadata["status"] == "completed"
+ assert result.generations[0].message.response_metadata["user"] == "user_123"
+
+
+def test__construct_lc_result_from_responses_api_no_usage_metadata() -> None:
+ """Test a response without usage metadata."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseOutputMessage(
+ type="message",
+ id="msg_123",
+ content=[
+ ResponseOutputText(
+ type="output_text", text="Hello, world!", annotations=[]
+ )
+ ],
+ role="assistant",
+ status="completed",
+ )
+ ],
+ # No usage field
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ assert cast(AIMessage, result.generations[0].message).usage_metadata is None
+
+
+def test__construct_lc_result_from_responses_api_web_search_response() -> None:
+ """Test a response with web search output."""
+ from openai.types.responses.response_function_web_search import (
+ ResponseFunctionWebSearch,
+ )
+
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseFunctionWebSearch(
+ id="websearch_123", type="web_search_call", status="completed"
+ )
+ ],
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ assert "tool_outputs" in result.generations[0].message.additional_kwargs
+ assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 1
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["type"]
+ == "web_search_call"
+ )
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["id"]
+ == "websearch_123"
+ )
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["status"]
+ == "completed"
+ )
+
+
+def test__construct_lc_result_from_responses_api_file_search_response() -> None:
+ """Test a response with file search output."""
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseFileSearchToolCall(
+ id="filesearch_123",
+ type="file_search_call",
+ status="completed",
+ queries=["python code", "langchain"],
+ results=[
+ Result(
+ file_id="file_123",
+ filename="example.py",
+ score=0.95,
+ text="def hello_world() -> None:\n print('Hello, world!')",
+ attributes={"language": "python", "size": 42},
+ )
+ ],
+ )
+ ],
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ assert "tool_outputs" in result.generations[0].message.additional_kwargs
+ assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 1
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["type"]
+ == "file_search_call"
+ )
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["id"]
+ == "filesearch_123"
+ )
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["status"]
+ == "completed"
+ )
+ assert result.generations[0].message.additional_kwargs["tool_outputs"][0][
+ "queries"
+ ] == ["python code", "langchain"]
+ assert (
+ len(
+ result.generations[0].message.additional_kwargs["tool_outputs"][0][
+ "results"
+ ]
+ )
+ == 1
+ )
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["results"][
+ 0
+ ]["file_id"]
+ == "file_123"
+ )
+ assert (
+ result.generations[0].message.additional_kwargs["tool_outputs"][0]["results"][
+ 0
+ ]["score"]
+ == 0.95
+ )
+
+
+def test__construct_lc_result_from_responses_api_mixed_search_responses() -> None:
+ """Test a response with both web search and file search outputs."""
+
+ response = Response(
+ id="resp_123",
+ created_at=1234567890,
+ model="gpt-4o",
+ object="response",
+ parallel_tool_calls=True,
+ tools=[],
+ tool_choice="auto",
+ output=[
+ ResponseOutputMessage(
+ type="message",
+ id="msg_123",
+ content=[
+ ResponseOutputText(
+ type="output_text", text="Here's what I found:", annotations=[]
+ )
+ ],
+ role="assistant",
+ status="completed",
+ ),
+ ResponseFunctionWebSearch(
+ id="websearch_123", type="web_search_call", status="completed"
+ ),
+ ResponseFileSearchToolCall(
+ id="filesearch_123",
+ type="file_search_call",
+ status="completed",
+ queries=["python code"],
+ results=[
+ Result(
+ file_id="file_123",
+ filename="example.py",
+ score=0.95,
+ text="def hello_world() -> None:\n print('Hello, world!')",
+ )
+ ],
+ ),
+ ],
+ )
+
+ result = _construct_lc_result_from_responses_api(response)
+
+ # Check message content
+ assert result.generations[0].message.content == [
+ {"type": "text", "text": "Here's what I found:", "annotations": []}
+ ]
+
+ # Check tool outputs
+ assert "tool_outputs" in result.generations[0].message.additional_kwargs
+ assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 2
+
+ # Check web search output
+ web_search = next(
+ output
+ for output in result.generations[0].message.additional_kwargs["tool_outputs"]
+ if output["type"] == "web_search_call"
+ )
+ assert web_search["id"] == "websearch_123"
+ assert web_search["status"] == "completed"
+
+ # Check file search output
+ file_search = next(
+ output
+ for output in result.generations[0].message.additional_kwargs["tool_outputs"]
+ if output["type"] == "file_search_call"
+ )
+ assert file_search["id"] == "filesearch_123"
+ assert file_search["queries"] == ["python code"]
+ assert file_search["results"][0]["filename"] == "example.py"
+
+
+def test__construct_responses_api_input_human_message_with_text_blocks_conversion() -> (
+ None
+):
+ """Test that human messages with text blocks are properly converted."""
+ messages: list = [
+ HumanMessage(content=[{"type": "text", "text": "What's in this image?"}])
+ ]
+ result = _construct_responses_api_input(messages)
+
+ assert len(result) == 1
+ assert result[0]["role"] == "user"
+ assert isinstance(result[0]["content"], list)
+ assert len(result[0]["content"]) == 1
+ assert result[0]["content"][0]["type"] == "input_text"
+ assert result[0]["content"][0]["text"] == "What's in this image?"
+
+
+def test__construct_responses_api_input_human_message_with_image_url_conversion() -> (
+ None
+):
+ """Test that human messages with image_url blocks are properly converted."""
+ messages: list = [
+ HumanMessage(
+ content=[
+ {"type": "text", "text": "What's in this image?"},
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": "https://example.com/image.jpg",
+ "detail": "high",
+ },
+ },
+ ]
+ )
+ ]
+ result = _construct_responses_api_input(messages)
+
+ assert len(result) == 1
+ assert result[0]["role"] == "user"
+ assert isinstance(result[0]["content"], list)
+ assert len(result[0]["content"]) == 2
+
+ # Check text block conversion
+ assert result[0]["content"][0]["type"] == "input_text"
+ assert result[0]["content"][0]["text"] == "What's in this image?"
+
+ # Check image block conversion
+ assert result[0]["content"][1]["type"] == "input_image"
+ assert result[0]["content"][1]["image_url"] == "https://example.com/image.jpg"
+ assert result[0]["content"][1]["detail"] == "high"
+
+
+def test__construct_responses_api_input_ai_message_with_tool_calls() -> None:
+ """Test that AI messages with tool calls are properly converted."""
+ tool_calls = [
+ {
+ "id": "call_123",
+ "name": "get_weather",
+ "args": {"location": "San Francisco"},
+ "type": "tool_call",
+ }
+ ]
+
+ # Create a mapping from tool call IDs to function call IDs
+ function_call_ids = {"call_123": "func_456"}
+
+ ai_message = AIMessage(
+ content="",
+ tool_calls=tool_calls,
+ additional_kwargs={_FUNCTION_CALL_IDS_MAP_KEY: function_call_ids},
+ )
+
+ result = _construct_responses_api_input([ai_message])
+
+ assert len(result) == 1
+ assert result[0]["type"] == "function_call"
+ assert result[0]["name"] == "get_weather"
+ assert result[0]["arguments"] == '{"location": "San Francisco"}'
+ assert result[0]["call_id"] == "call_123"
+ assert result[0]["id"] == "func_456"
+
+
+def test__construct_responses_api_input_ai_message_with_tool_calls_and_content() -> (
+ None
+):
+ """Test that AI messages with both tool calls and content are properly converted."""
+ tool_calls = [
+ {
+ "id": "call_123",
+ "name": "get_weather",
+ "args": {"location": "San Francisco"},
+ "type": "tool_call",
+ }
+ ]
+
+ # Create a mapping from tool call IDs to function call IDs
+ function_call_ids = {"call_123": "func_456"}
+
+ ai_message = AIMessage(
+ content="I'll check the weather for you.",
+ tool_calls=tool_calls,
+ additional_kwargs={_FUNCTION_CALL_IDS_MAP_KEY: function_call_ids},
+ )
+
+ result = _construct_responses_api_input([ai_message])
+
+ assert len(result) == 2
+
+ # Check content
+ assert result[0]["role"] == "assistant"
+ assert result[0]["content"] == "I'll check the weather for you."
+
+ # Check function call
+ assert result[1]["type"] == "function_call"
+ assert result[1]["name"] == "get_weather"
+ assert result[1]["arguments"] == '{"location": "San Francisco"}'
+ assert result[1]["call_id"] == "call_123"
+ assert result[1]["id"] == "func_456"
+
+
+def test__construct_responses_api_input_missing_function_call_ids() -> None:
+ """Test AI messages with tool calls but missing function call IDs raise an error."""
+ tool_calls = [
+ {
+ "id": "call_123",
+ "name": "get_weather",
+ "args": {"location": "San Francisco"},
+ "type": "tool_call",
+ }
+ ]
+
+ ai_message = AIMessage(content="", tool_calls=tool_calls)
+
+ with pytest.raises(ValueError):
+ _construct_responses_api_input([ai_message])
+
+
+def test__construct_responses_api_input_tool_message_conversion() -> None:
+ """Test that tool messages are properly converted to function_call_output."""
+ messages = [
+ ToolMessage(
+ content='{"temperature": 72, "conditions": "sunny"}',
+ tool_call_id="call_123",
+ )
+ ]
+
+ result = _construct_responses_api_input(messages)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "function_call_output"
+ assert result[0]["output"] == '{"temperature": 72, "conditions": "sunny"}'
+ assert result[0]["call_id"] == "call_123"
+
+
+def test__construct_responses_api_input_multiple_message_types() -> None:
+ """Test conversion of a conversation with multiple message types."""
+ messages = [
+ SystemMessage(content="You are a helpful assistant."),
+ HumanMessage(content="What's the weather in San Francisco?"),
+ HumanMessage(
+ content=[{"type": "text", "text": "What's the weather in San Francisco?"}]
+ ),
+ AIMessage(
+ content="",
+ tool_calls=[
+ {
+ "type": "tool_call",
+ "id": "call_123",
+ "name": "get_weather",
+ "args": {"location": "San Francisco"},
+ }
+ ],
+ additional_kwargs={_FUNCTION_CALL_IDS_MAP_KEY: {"call_123": "func_456"}},
+ ),
+ ToolMessage(
+ content='{"temperature": 72, "conditions": "sunny"}',
+ tool_call_id="call_123",
+ ),
+ AIMessage(content="The weather in San Francisco is 72°F and sunny."),
+ AIMessage(
+ content=[
+ {
+ "type": "text",
+ "text": "The weather in San Francisco is 72°F and sunny.",
+ }
+ ]
+ ),
+ ]
+ messages_copy = [m.copy(deep=True) for m in messages]
+
+ result = _construct_responses_api_input(messages)
+
+ assert len(result) == len(messages)
+
+ # Check system message
+ assert result[0]["role"] == "system"
+ assert result[0]["content"] == "You are a helpful assistant."
+
+ # Check human message
+ assert result[1]["role"] == "user"
+ assert result[1]["content"] == "What's the weather in San Francisco?"
+ assert result[2]["role"] == "user"
+ assert result[2]["content"] == [
+ {"type": "input_text", "text": "What's the weather in San Francisco?"}
+ ]
+
+ # Check function call
+ assert result[3]["type"] == "function_call"
+ assert result[3]["name"] == "get_weather"
+ assert result[3]["arguments"] == '{"location": "San Francisco"}'
+ assert result[3]["call_id"] == "call_123"
+ assert result[3]["id"] == "func_456"
+
+ # Check function call output
+ assert result[4]["type"] == "function_call_output"
+ assert result[4]["output"] == '{"temperature": 72, "conditions": "sunny"}'
+ assert result[4]["call_id"] == "call_123"
+
+ assert result[5]["role"] == "assistant"
+ assert result[5]["content"] == "The weather in San Francisco is 72°F and sunny."
+
+ assert result[6]["role"] == "assistant"
+ assert result[6]["content"] == [
+ {
+ "type": "output_text",
+ "text": "The weather in San Francisco is 72°F and sunny.",
+ "annotations": [],
+ }
+ ]
+
+ # assert no mutation has occurred
+ assert messages_copy == messages
diff --git a/libs/partners/openai/uv.lock b/libs/partners/openai/uv.lock
index b4c5b7c970b..b79bdca9765 100644
--- a/libs/partners/openai/uv.lock
+++ b/libs/partners/openai/uv.lock
@@ -462,7 +462,7 @@ wheels = [
[[package]]
name = "langchain-core"
-version = "0.3.39"
+version = "0.3.45rc1"
source = { editable = "../../core" }
dependencies = [
{ name = "jsonpatch" },
@@ -520,7 +520,7 @@ typing = [
[[package]]
name = "langchain-openai"
-version = "0.3.7"
+version = "0.3.9rc1"
source = { editable = "." }
dependencies = [
{ name = "langchain-core" },
@@ -566,7 +566,7 @@ typing = [
[package.metadata]
requires-dist = [
{ name = "langchain-core", editable = "../../core" },
- { name = "openai", specifier = ">=1.58.1,<2.0.0" },
+ { name = "openai", specifier = ">=1.66.0,<2.0.0" },
{ name = "tiktoken", specifier = ">=0.7,<1" },
]
@@ -603,7 +603,7 @@ typing = [
[[package]]
name = "langchain-tests"
-version = "0.3.12"
+version = "0.3.14"
source = { editable = "../../standard-tests" }
dependencies = [
{ name = "httpx" },
@@ -619,8 +619,7 @@ dependencies = [
requires-dist = [
{ name = "httpx", specifier = ">=0.25.0,<1" },
{ name = "langchain-core", editable = "../../core" },
- { name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.24.0,<2.0.0" },
- { name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.2,<3" },
+ { name = "numpy", specifier = ">=1.26.2,<3" },
{ name = "pytest", specifier = ">=7,<9" },
{ name = "pytest-asyncio", specifier = ">=0.20,<1" },
{ name = "pytest-socket", specifier = ">=0.6.0,<1" },
@@ -752,7 +751,7 @@ wheels = [
[[package]]
name = "openai"
-version = "1.61.1"
+version = "1.66.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@@ -764,9 +763,9 @@ dependencies = [
{ name = "tqdm" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/d9/cf/61e71ce64cf0a38f029da0f9a5f10c9fa0e69a7a977b537126dac50adfea/openai-1.61.1.tar.gz", hash = "sha256:ce1851507218209961f89f3520e06726c0aa7d0512386f0f977e3ac3e4f2472e", size = 350784 }
+sdist = { url = "https://files.pythonhosted.org/packages/84/c5/3c422ca3ccc81c063955e7c20739d7f8f37fea0af865c4a60c81e6225e14/openai-1.66.0.tar.gz", hash = "sha256:8a9e672bc6eadec60a962f0b40d7d1c09050010179c919ed65322e433e2d1025", size = 396819 }
wheels = [
- { url = "https://files.pythonhosted.org/packages/9a/b6/2e2a011b2dc27a6711376808b4cd8c922c476ea0f1420b39892117fa8563/openai-1.61.1-py3-none-any.whl", hash = "sha256:72b0826240ce26026ac2cd17951691f046e5be82ad122d20a8e1b30ca18bd11e", size = 463126 },
+ { url = "https://files.pythonhosted.org/packages/d7/f1/d52960dac9519c9de64593460826a0fe2e19159389ec97ecf3e931d2e6a3/openai-1.66.0-py3-none-any.whl", hash = "sha256:43e4a3c0c066cc5809be4e6aac456a3ebc4ec1848226ef9d1340859ac130d45a", size = 566389 },
]
[[package]]
diff --git a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py
index abbe9fdf27a..3474aac5233 100644
--- a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py
+++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py
@@ -20,7 +20,7 @@ from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import BaseTool, tool
from langchain_core.utils.function_calling import (
- convert_to_openai_tool,
+ convert_to_json_schema,
tool_example_to_messages,
)
from pydantic import BaseModel, Field
@@ -72,21 +72,21 @@ def _get_joke_class(
class _TestCallbackHandler(BaseCallbackHandler):
- metadatas: list[Optional[dict]]
+ options: list[Optional[dict]]
def __init__(self) -> None:
super().__init__()
- self.metadatas = []
+ self.options = []
def on_chat_model_start(
self,
serialized: Any,
messages: Any,
*,
- metadata: Optional[dict[str, Any]] = None,
+ options: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> None:
- self.metadatas.append(metadata)
+ self.options.append(options)
class _MagicFunctionSchema(BaseModel):
@@ -1243,16 +1243,16 @@ class ChatModelIntegrationTests(ChatModelTests):
)
validation_function(result)
- assert len(invoke_callback.metadatas) == 1, (
+ assert len(invoke_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
- assert isinstance(invoke_callback.metadatas[0], dict)
+ assert isinstance(invoke_callback.options[0], dict)
assert isinstance(
- invoke_callback.metadatas[0]["structured_output_format"]["schema"], dict
+ invoke_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
- assert invoke_callback.metadatas[0]["structured_output_format"][
+ assert invoke_callback.options[0]["ls_structured_output_format"][
"schema"
- ] == convert_to_openai_tool(schema)
+ ] == convert_to_json_schema(schema)
stream_callback = _TestCallbackHandler()
@@ -1262,16 +1262,16 @@ class ChatModelIntegrationTests(ChatModelTests):
validation_function(chunk)
assert chunk
- assert len(stream_callback.metadatas) == 1, (
+ assert len(stream_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
- assert isinstance(stream_callback.metadatas[0], dict)
+ assert isinstance(stream_callback.options[0], dict)
assert isinstance(
- stream_callback.metadatas[0]["structured_output_format"]["schema"], dict
+ stream_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
- assert stream_callback.metadatas[0]["structured_output_format"][
+ assert stream_callback.options[0]["ls_structured_output_format"][
"schema"
- ] == convert_to_openai_tool(schema)
+ ] == convert_to_json_schema(schema)
@pytest.mark.parametrize("schema_type", ["pydantic", "typeddict", "json_schema"])
async def test_structured_output_async(
@@ -1319,16 +1319,16 @@ class ChatModelIntegrationTests(ChatModelTests):
)
validation_function(result)
- assert len(ainvoke_callback.metadatas) == 1, (
+ assert len(ainvoke_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
- assert isinstance(ainvoke_callback.metadatas[0], dict)
+ assert isinstance(ainvoke_callback.options[0], dict)
assert isinstance(
- ainvoke_callback.metadatas[0]["structured_output_format"]["schema"], dict
+ ainvoke_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
- assert ainvoke_callback.metadatas[0]["structured_output_format"][
+ assert ainvoke_callback.options[0]["ls_structured_output_format"][
"schema"
- ] == convert_to_openai_tool(schema)
+ ] == convert_to_json_schema(schema)
astream_callback = _TestCallbackHandler()
@@ -1338,17 +1338,17 @@ class ChatModelIntegrationTests(ChatModelTests):
validation_function(chunk)
assert chunk
- assert len(astream_callback.metadatas) == 1, (
+ assert len(astream_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
- assert isinstance(astream_callback.metadatas[0], dict)
+ assert isinstance(astream_callback.options[0], dict)
assert isinstance(
- astream_callback.metadatas[0]["structured_output_format"]["schema"], dict
+ astream_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
- assert astream_callback.metadatas[0]["structured_output_format"][
+ assert astream_callback.options[0]["ls_structured_output_format"][
"schema"
- ] == convert_to_openai_tool(schema)
+ ] == convert_to_json_schema(schema)
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Test requires pydantic 2.")
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
@@ -1960,7 +1960,7 @@ class ChatModelIntegrationTests(ChatModelTests):
set the ``supports_anthropic_inputs`` property to False.
""" # noqa: E501
if not self.supports_anthropic_inputs:
- return
+ pytest.skip("Model does not explicitly support Anthropic inputs.")
class color_picker(BaseModelV1):
"""Input your fav color and get a random fact about it."""
@@ -1998,26 +1998,55 @@ class ChatModelIntegrationTests(ChatModelTests):
"id": "foo",
"name": "color_picker",
},
+ ],
+ tool_calls=[
+ {
+ "name": "color_picker",
+ "args": {"fav_color": "green"},
+ "id": "foo",
+ "type": "tool_call",
+ }
+ ],
+ ),
+ ToolMessage("That's a great pick!", tool_call_id="foo"),
+ ]
+ response = model.bind_tools([color_picker]).invoke(messages)
+ assert isinstance(response, AIMessage)
+
+ # Test thinking blocks
+ messages = [
+ HumanMessage(
+ [
+ {
+ "type": "text",
+ "text": "Hello",
+ },
+ ]
+ ),
+ AIMessage(
+ [
+ {
+ "type": "thinking",
+ "thinking": "I'm thinking...",
+ "signature": "abc123",
+ },
+ {
+ "type": "text",
+ "text": "Hello, how are you?",
+ },
]
),
HumanMessage(
[
{
- "type": "tool_result",
- "tool_use_id": "foo",
- "content": [
- {
- "type": "text",
- "text": "green is a great pick! that's my sister's favorite color", # noqa: E501
- }
- ],
- "is_error": False,
+ "type": "text",
+ "text": "Well, thanks.",
},
- {"type": "text", "text": "what's my sister's favorite color"},
]
),
]
- model.bind_tools([color_picker]).invoke(messages)
+ response = model.invoke(messages)
+ assert isinstance(response, AIMessage)
def test_tool_message_error_status(
self, model: BaseChatModel, my_adder_tool: BaseTool
diff --git a/libs/standard-tests/pyproject.toml b/libs/standard-tests/pyproject.toml
index 7ee6dd59c98..45bbe98931c 100644
--- a/libs/standard-tests/pyproject.toml
+++ b/libs/standard-tests/pyproject.toml
@@ -7,7 +7,7 @@ authors = [{ name = "Erick Friis", email = "erick@langchain.dev" }]
license = { text = "MIT" }
requires-python = "<4.0,>=3.9"
dependencies = [
- "langchain-core<1.0.0,>=0.3.41",
+ "langchain-core<1.0.0,>=0.3.43",
"pytest<9,>=7",
"pytest-asyncio<1,>=0.20",
"httpx<1,>=0.25.0",
@@ -16,7 +16,7 @@ dependencies = [
"numpy<3,>=1.26.2",
]
name = "langchain-tests"
-version = "0.3.13"
+version = "0.3.14"
description = "Standard tests for LangChain implementations"
readme = "README.md"
diff --git a/libs/standard-tests/uv.lock b/libs/standard-tests/uv.lock
index 936bd25ac4c..2f9a5d4c765 100644
--- a/libs/standard-tests/uv.lock
+++ b/libs/standard-tests/uv.lock
@@ -288,7 +288,7 @@ wheels = [
[[package]]
name = "langchain-core"
-version = "0.3.41"
+version = "0.3.43"
source = { editable = "../core" }
dependencies = [
{ name = "jsonpatch" },
@@ -346,7 +346,7 @@ typing = [
[[package]]
name = "langchain-tests"
-version = "0.3.13"
+version = "0.3.14"
source = { editable = "." }
dependencies = [
{ name = "httpx" },
diff --git a/pyproject.toml b/pyproject.toml
index e932562aee6..a0dd56c30fd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -80,9 +80,6 @@ ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogy
[tool.ruff]
extend-include = ["*.ipynb"]
-extend-exclude = [
- "docs/docs/expression_language/why.ipynb", # TODO: look into why linter errors
-]
[tool.ruff.lint]
select = ["D"]
@@ -96,9 +93,4 @@ pydocstyle = { convention = "google" }
"F841", # allow assignments to variables that are never read -- it's example code
]
-"!libs/langchain/langchain/model_laboratory.py" = ["D"]
-
-# These files were failing the listed rules at the time ruff was adopted for notebooks.
-# Don't require them to change at once, though we should look into them eventually.
-"cookbook/gymnasium_agent_simulation.ipynb" = ["F821"]
-"docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb" = ["F821"]
+"!libs/langchain/langchain/model_laboratory.py" = ["D"]
\ No newline at end of file
diff --git a/uv.lock b/uv.lock
index 35bfac1abc1..a140495983b 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1,4 +1,5 @@
version = 1
+revision = 1
requires-python = ">=3.9, <4.0"
resolution-markers = [
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy'",
@@ -149,7 +150,7 @@ wheels = [
[[package]]
name = "anthropic"
-version = "0.45.2"
+version = "0.49.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@@ -160,9 +161,9 @@ dependencies = [
{ name = "sniffio" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/15/74/2b2485fc120da834c0c5be07462541ec082e9fa8851d845f2587e480535a/anthropic-0.45.2.tar.gz", hash = "sha256:32a18b9ecd12c91b2be4cae6ca2ab46a06937b5aa01b21308d97a6d29794fb5e", size = 200901 }
+sdist = { url = "https://files.pythonhosted.org/packages/86/e3/a88c8494ce4d1a88252b9e053607e885f9b14d0a32273d47b727cbee4228/anthropic-0.49.0.tar.gz", hash = "sha256:c09e885b0f674b9119b4f296d8508907f6cff0009bc20d5cf6b35936c40b4398", size = 210016 }
wheels = [
- { url = "https://files.pythonhosted.org/packages/74/86/e81814e542d1eaeec84d2312bec93a99b9ef1d78d9bfae1fc5dd74abdf15/anthropic-0.45.2-py3-none-any.whl", hash = "sha256:ecd746f7274451dfcb7e1180571ead624c7e1195d1d46cb7c70143d2aedb4d35", size = 222797 },
+ { url = "https://files.pythonhosted.org/packages/76/74/5d90ad14d55fbe3f9c474fdcb6e34b4bed99e3be8efac98734a5ddce88c1/anthropic-0.49.0-py3-none-any.whl", hash = "sha256:bbc17ad4e7094988d2fa86b87753ded8dce12498f4b85fe5810f208f454a8375", size = 243368 },
]
[[package]]
@@ -2152,25 +2153,21 @@ wheels = [
[[package]]
name = "langchain"
-version = "0.3.18"
+version = "0.3.20"
source = { editable = "libs/langchain" }
dependencies = [
- { name = "aiohttp" },
{ name = "async-timeout", marker = "python_full_version < '3.11'" },
{ name = "langchain-core" },
{ name = "langchain-text-splitters" },
{ name = "langsmith" },
- { name = "numpy" },
{ name = "pydantic" },
{ name = "pyyaml" },
{ name = "requests" },
{ name = "sqlalchemy" },
- { name = "tenacity" },
]
[package.metadata]
requires-dist = [
- { name = "aiohttp", specifier = ">=3.8.3,<4.0.0" },
{ name = "async-timeout", marker = "python_full_version < '3.11'", specifier = ">=4.0.0,<5.0.0" },
{ name = "langchain-anthropic", marker = "extra == 'anthropic'" },
{ name = "langchain-aws", marker = "extra == 'aws'" },
@@ -2188,15 +2185,14 @@ requires-dist = [
{ name = "langchain-openai", marker = "extra == 'openai'", editable = "libs/partners/openai" },
{ name = "langchain-text-splitters", editable = "libs/text-splitters" },
{ name = "langchain-together", marker = "extra == 'together'" },
+ { name = "langchain-xai", marker = "extra == 'xai'" },
{ name = "langsmith", specifier = ">=0.1.17,<0.4" },
- { name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.26.4,<2" },
- { name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.2,<3" },
{ name = "pydantic", specifier = ">=2.7.4,<3.0.0" },
{ name = "pyyaml", specifier = ">=5.3" },
{ name = "requests", specifier = ">=2,<3" },
{ name = "sqlalchemy", specifier = ">=1.4,<3" },
- { name = "tenacity", specifier = ">=8.1.0,!=8.4.0,<10" },
]
+provides-extras = ["community", "anthropic", "openai", "cohere", "google-vertexai", "google-genai", "fireworks", "ollama", "together", "mistralai", "huggingface", "groq", "aws", "deepseek", "xai"]
[package.metadata.requires-dev]
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
@@ -2213,7 +2209,7 @@ lint = [
{ name = "ruff", specifier = ">=0.9.2,<1.0.0" },
]
test = [
- { name = "blockbuster", specifier = ">=1.5.14,<1.6" },
+ { name = "blockbuster", specifier = ">=1.5.18,<1.6" },
{ name = "cffi", marker = "python_full_version < '3.10'", specifier = "<1.17.1" },
{ name = "cffi", marker = "python_full_version >= '3.10'" },
{ name = "duckdb-engine", specifier = ">=0.9.2,<1.0.0" },
@@ -2223,6 +2219,7 @@ test = [
{ name = "langchain-tests", editable = "libs/standard-tests" },
{ name = "langchain-text-splitters", editable = "libs/text-splitters" },
{ name = "lark", specifier = ">=1.1.5,<2.0.0" },
+ { name = "numpy", specifier = ">=1.26.4,<3" },
{ name = "packaging", specifier = ">=24.2" },
{ name = "pandas", specifier = ">=2.0.0,<3.0.0" },
{ name = "pytest", specifier = ">=8,<9" },
@@ -2253,6 +2250,7 @@ typing = [
{ name = "langchain-text-splitters", editable = "libs/text-splitters" },
{ name = "mypy", specifier = ">=1.10,<2.0" },
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
+ { name = "numpy", specifier = ">=1.26.4,<3" },
{ name = "types-chardet", specifier = ">=5.0.4.6,<6.0.0.0" },
{ name = "types-pytz", specifier = ">=2023.3.0.0,<2024.0.0.0" },
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
@@ -2263,7 +2261,7 @@ typing = [
[[package]]
name = "langchain-anthropic"
-version = "0.3.7"
+version = "0.3.9"
source = { editable = "libs/partners/anthropic" }
dependencies = [
{ name = "anthropic" },
@@ -2273,7 +2271,7 @@ dependencies = [
[package.metadata]
requires-dist = [
- { name = "anthropic", specifier = ">=0.45.0,<1" },
+ { name = "anthropic", specifier = ">=0.47.0,<1" },
{ name = "langchain-core", editable = "libs/core" },
{ name = "pydantic", specifier = ">=2.7.4,<3.0.0" },
]
@@ -2364,7 +2362,7 @@ typing = [
[[package]]
name = "langchain-community"
-version = "0.3.17"
+version = "0.3.19"
source = { editable = "libs/community" }
dependencies = [
{ name = "aiohttp" },
@@ -2389,8 +2387,7 @@ requires-dist = [
{ name = "langchain", editable = "libs/langchain" },
{ name = "langchain-core", editable = "libs/core" },
{ name = "langsmith", specifier = ">=0.1.125,<0.4" },
- { name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.26.4,<2" },
- { name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.2,<3" },
+ { name = "numpy", specifier = ">=1.26.2,<3" },
{ name = "pydantic-settings", specifier = ">=2.4.0,<3.0.0" },
{ name = "pyyaml", specifier = ">=5.3" },
{ name = "requests", specifier = ">=2,<3" },
@@ -2411,7 +2408,7 @@ lint = [
{ name = "ruff", specifier = ">=0.9,<0.10" },
]
test = [
- { name = "blockbuster", specifier = ">=1.5.13,<1.6" },
+ { name = "blockbuster", specifier = ">=1.5.18,<1.6" },
{ name = "cffi", marker = "python_full_version < '3.10'", specifier = "<1.17.1" },
{ name = "cffi", marker = "python_full_version >= '3.10'" },
{ name = "duckdb-engine", specifier = ">=0.13.6,<1.0.0" },
@@ -2454,7 +2451,7 @@ typing = [
[[package]]
name = "langchain-core"
-version = "0.3.35"
+version = "0.3.43"
source = { editable = "libs/core" }
dependencies = [
{ name = "jsonpatch" },
@@ -2486,7 +2483,7 @@ dev = [
]
lint = [{ name = "ruff", specifier = ">=0.9.2,<1.0.0" }]
test = [
- { name = "blockbuster", specifier = "~=1.5.11" },
+ { name = "blockbuster", specifier = "~=1.5.18" },
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
{ name = "grandalf", specifier = ">=0.8,<1.0" },
{ name = "langchain-tests", directory = "libs/standard-tests" },
@@ -2577,7 +2574,7 @@ dependencies = [
[[package]]
name = "langchain-groq"
-version = "0.2.4"
+version = "0.2.5"
source = { editable = "libs/partners/groq" }
dependencies = [
{ name = "groq" },
@@ -2610,7 +2607,7 @@ typing = [
[[package]]
name = "langchain-mistralai"
-version = "0.2.6"
+version = "0.2.7"
source = { editable = "libs/partners/mistralai" }
dependencies = [
{ name = "httpx" },
@@ -2736,7 +2733,7 @@ typing = []
[[package]]
name = "langchain-openai"
-version = "0.3.5"
+version = "0.3.8"
source = { editable = "libs/partners/openai" }
dependencies = [
{ name = "langchain-core" },
@@ -2747,7 +2744,7 @@ dependencies = [
[package.metadata]
requires-dist = [
{ name = "langchain-core", editable = "libs/core" },
- { name = "openai", specifier = ">=1.58.1,<2.0.0" },
+ { name = "openai", specifier = ">=1.66.0,<2.0.0" },
{ name = "tiktoken", specifier = ">=0.7,<1" },
]
@@ -3634,7 +3631,7 @@ wheels = [
[[package]]
name = "openai"
-version = "1.61.1"
+version = "1.66.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@@ -3646,9 +3643,9 @@ dependencies = [
{ name = "tqdm" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/d9/cf/61e71ce64cf0a38f029da0f9a5f10c9fa0e69a7a977b537126dac50adfea/openai-1.61.1.tar.gz", hash = "sha256:ce1851507218209961f89f3520e06726c0aa7d0512386f0f977e3ac3e4f2472e", size = 350784 }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/e1/b3e1fda1aa32d4f40d4de744e91de4de65c854c3e53c63342e4b5f9c5995/openai-1.66.2.tar.gz", hash = "sha256:9b3a843c25f81ee09b6469d483d9fba779d5c6ea41861180772f043481b0598d", size = 397041 }
wheels = [
- { url = "https://files.pythonhosted.org/packages/9a/b6/2e2a011b2dc27a6711376808b4cd8c922c476ea0f1420b39892117fa8563/openai-1.61.1-py3-none-any.whl", hash = "sha256:72b0826240ce26026ac2cd17951691f046e5be82ad122d20a8e1b30ca18bd11e", size = 463126 },
+ { url = "https://files.pythonhosted.org/packages/2c/6f/3315b3583ffe3e31c55b446cb22d2a7c235e65ca191674fffae62deb3c11/openai-1.66.2-py3-none-any.whl", hash = "sha256:75194057ee6bb8b732526387b6041327a05656d976fc21c064e21c8ac6b07999", size = 567268 },
]
[[package]]