mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-04 08:10:25 +00:00
Compare commits
151 Commits
nh/subagen
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
832036ef0f | ||
|
|
f1742954ab | ||
|
|
6ab0476676 | ||
|
|
d36413c821 | ||
|
|
99097f799c | ||
|
|
0666571519 | ||
|
|
ef85161525 | ||
|
|
079eb808f8 | ||
|
|
39fb2d1a3b | ||
|
|
db7f2db1ae | ||
|
|
df46c82ae2 | ||
|
|
f8adbbc461 | ||
|
|
17f0716d6c | ||
|
|
5acd34ae92 | ||
|
|
84dbebac4f | ||
|
|
eddfcd2c88 | ||
|
|
9f470d297f | ||
|
|
2222470f69 | ||
|
|
78175fcb96 | ||
|
|
d9e659ca4f | ||
|
|
e731ba1e47 | ||
|
|
557fc9a817 | ||
|
|
965dac74e5 | ||
|
|
7d7a50d4cc | ||
|
|
9319eecaba | ||
|
|
a47386f6dc | ||
|
|
aaf88c157f | ||
|
|
3dcf4ae1e9 | ||
|
|
3391168777 | ||
|
|
28728dca9f | ||
|
|
1ae7fb7694 | ||
|
|
7aef3388d9 | ||
|
|
1d056487c7 | ||
|
|
64e6798a39 | ||
|
|
4a65e827f7 | ||
|
|
35b89b8b10 | ||
|
|
8efa75d04c | ||
|
|
8fd54f13b5 | ||
|
|
952fa8aa99 | ||
|
|
3948273350 | ||
|
|
a16307fe84 | ||
|
|
af6f2cf366 | ||
|
|
6997867f0e | ||
|
|
de791bc3ef | ||
|
|
69c6e7de59 | ||
|
|
10cee59f2e | ||
|
|
58f521ea4f | ||
|
|
a194ae6959 | ||
|
|
4d623133a5 | ||
|
|
8fbf192c2a | ||
|
|
241a382fba | ||
|
|
c194ee2046 | ||
|
|
85567f1dc3 | ||
|
|
6f4978041e | ||
|
|
f1fca4f46f | ||
|
|
2b899fe961 | ||
|
|
3152d25811 | ||
|
|
3b8cb3d4b6 | ||
|
|
15047ae28a | ||
|
|
888fa3a2fb | ||
|
|
90346b8a35 | ||
|
|
2d5efd7b29 | ||
|
|
1d2273597a | ||
|
|
9dd494ddcd | ||
|
|
2fa07b19f6 | ||
|
|
a022e3c14d | ||
|
|
e0e11423d9 | ||
|
|
34de8ec1f3 | ||
|
|
3d288fd610 | ||
|
|
055cccde28 | ||
|
|
361514d11d | ||
|
|
90b68059f5 | ||
|
|
87ad5276e4 | ||
|
|
5489df75d7 | ||
|
|
c6b3f5b888 | ||
|
|
15db024811 | ||
|
|
6d73003b17 | ||
|
|
13259a109a | ||
|
|
aa78be574a | ||
|
|
d0dd1b30d1 | ||
|
|
0338a15192 | ||
|
|
e10d99b728 | ||
|
|
c9018f81ec | ||
|
|
31718492c7 | ||
|
|
2209878f48 | ||
|
|
dd77dbe3ab | ||
|
|
eb19e12527 | ||
|
|
551e86a517 | ||
|
|
8734c05f64 | ||
|
|
0c8cbfb7de | ||
|
|
89c3428d85 | ||
|
|
707e96c541 | ||
|
|
26e0a00c4c | ||
|
|
d0f8f00e7e | ||
|
|
a39132787c | ||
|
|
296994ebf0 | ||
|
|
b5b31eec88 | ||
|
|
8f6851c349 | ||
|
|
0788461abd | ||
|
|
3bfd1f6d8a | ||
|
|
d83c3a12bf | ||
|
|
79200cf3c2 | ||
|
|
bcb6789888 | ||
|
|
89b7933ef1 | ||
|
|
4da5a8081f | ||
|
|
53e9f00804 | ||
|
|
6e25e185f6 | ||
|
|
68ceeb64f6 | ||
|
|
edae976b81 | ||
|
|
9f4366bc9d | ||
|
|
99e0a60aab | ||
|
|
d38729fbac | ||
|
|
ff0d21cfd5 | ||
|
|
9140a7cb86 | ||
|
|
41fe18bc80 | ||
|
|
9105573cb3 | ||
|
|
fff87e95d1 | ||
|
|
9beb29a34c | ||
|
|
ca00f5aed9 | ||
|
|
637777b8e7 | ||
|
|
1cf851e054 | ||
|
|
961f965f0c | ||
|
|
760fc3bc12 | ||
|
|
e3fc7d8aa6 | ||
|
|
2b3b209e40 | ||
|
|
78903ac285 | ||
|
|
f361acc11c | ||
|
|
ed185c0026 | ||
|
|
6dc34beb71 | ||
|
|
c2205f88e6 | ||
|
|
abdbe185c5 | ||
|
|
c1b816cb7e | ||
|
|
0559558715 | ||
|
|
75965474fc | ||
|
|
5dc014fdf4 | ||
|
|
291a9fcea1 | ||
|
|
dd994b9d7f | ||
|
|
83901b30e3 | ||
|
|
bcfa21a6e7 | ||
|
|
af1da28459 | ||
|
|
ed2ee4e8cc | ||
|
|
f293c8ffd6 | ||
|
|
714c370191 | ||
|
|
a29d4e9c3a | ||
|
|
74983f8a96 | ||
|
|
11c5b86981 | ||
|
|
383f4c0ee9 | ||
|
|
045e7ad4a1 | ||
|
|
0e80291804 | ||
|
|
c99773b652 | ||
|
|
5f9e3e33cd |
23
.github/scripts/check_diff.py
vendored
23
.github/scripts/check_diff.py
vendored
@@ -130,29 +130,20 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
return _get_pydantic_test_configs(dir_)
|
||||
|
||||
if job == "codspeed":
|
||||
py_versions = ["3.12"] # 3.13 is not yet supported
|
||||
py_versions = ["3.13"]
|
||||
elif dir_ == "libs/core":
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13"]
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
# custom logic for specific directories
|
||||
|
||||
elif dir_ == "libs/langchain" and job == "extended-tests":
|
||||
elif dir_ in {"libs/partners/chroma", "libs/partners/nomic"}:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
elif dir_ == "libs/langchain_v1":
|
||||
py_versions = ["3.10", "3.13"]
|
||||
elif dir_ in {"libs/cli"}:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
|
||||
elif dir_ == ".":
|
||||
# unable to install with 3.13 because tokenizers doesn't support 3.13 yet
|
||||
py_versions = ["3.10", "3.12"]
|
||||
else:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
py_versions = ["3.10", "3.14"]
|
||||
|
||||
return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]
|
||||
|
||||
|
||||
def _get_pydantic_test_configs(
|
||||
dir_: str, *, python_version: str = "3.11"
|
||||
dir_: str, *, python_version: str = "3.12"
|
||||
) -> List[Dict[str, str]]:
|
||||
with open("./libs/core/uv.lock", "rb") as f:
|
||||
core_uv_lock_data = tomllib.load(f)
|
||||
@@ -306,7 +297,9 @@ if __name__ == "__main__":
|
||||
if not filename.startswith(".")
|
||||
] != ["README.md"]:
|
||||
dirs_to_run["test"].add(f"libs/partners/{partner_dir}")
|
||||
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
|
||||
# Skip codspeed for partners without benchmarks or in IGNORED_PARTNERS
|
||||
if partner_dir not in IGNORED_PARTNERS:
|
||||
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
|
||||
# Skip if the directory was deleted or is just a tombstone readme
|
||||
elif file.startswith("libs/"):
|
||||
# Check if this is a root-level file in libs/ (e.g., libs/README.md)
|
||||
|
||||
2
.github/workflows/_release.yml
vendored
2
.github/workflows/_release.yml
vendored
@@ -395,7 +395,7 @@ jobs:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
partner: [openai, anthropic]
|
||||
partner: [anthropic]
|
||||
fail-fast: false # Continue testing other partners if one fails
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
|
||||
6
.github/workflows/_test_pydantic.yml
vendored
6
.github/workflows/_test_pydantic.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
default: "3.11"
|
||||
default: "3.12"
|
||||
pydantic-version:
|
||||
required: true
|
||||
type: string
|
||||
@@ -51,7 +51,9 @@ jobs:
|
||||
|
||||
- name: "🔄 Install Specific Pydantic Version"
|
||||
shell: bash
|
||||
run: VIRTUAL_ENV=.venv uv pip install pydantic~=${{ inputs.pydantic-version }}
|
||||
env:
|
||||
PYDANTIC_VERSION: ${{ inputs.pydantic-version }}
|
||||
run: VIRTUAL_ENV=.venv uv pip install "pydantic~=$PYDANTIC_VERSION"
|
||||
|
||||
- name: "🧪 Run Core Tests"
|
||||
shell: bash
|
||||
|
||||
7
.github/workflows/check_diffs.yml
vendored
7
.github/workflows/check_diffs.yml
vendored
@@ -184,15 +184,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: "📦 Install UV Package Manager"
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
|
||||
- name: "📦 Install Test Dependencies"
|
||||
run: uv sync --group test
|
||||
|
||||
23
.github/workflows/integration_tests.yml
vendored
23
.github/workflows/integration_tests.yml
vendored
@@ -23,10 +23,8 @@ permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
|
||||
POETRY_LIBS: ("libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
# Generate dynamic test matrix based on input parameters or defaults
|
||||
@@ -60,7 +58,6 @@ jobs:
|
||||
echo $matrix
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
# Run integration tests against partner libraries with live API credentials
|
||||
# Tests are run with Poetry or UV depending on the library's setup
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
name: "🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}"
|
||||
@@ -95,17 +92,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + Poetry"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + UV"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -123,15 +110,7 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: "📦 Install Dependencies (Poetry)"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: "📦 Install Dependencies (UV)"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
- name: "📦 Install Dependencies"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
|
||||
4
.github/workflows/pr_lint.yml
vendored
4
.github/workflows/pr_lint.yml
vendored
@@ -27,7 +27,7 @@
|
||||
# * release — prepare a new release
|
||||
#
|
||||
# Allowed Scopes (optional):
|
||||
# core, cli, langchain, langchain_v1, langchain_legacy, standard-tests,
|
||||
# core, cli, langchain, langchain_v1, langchain-classic, standard-tests,
|
||||
# text-splitters, docs, anthropic, chroma, deepseek, exa, fireworks, groq,
|
||||
# huggingface, mistralai, nomic, ollama, openai, perplexity, prompty, qdrant,
|
||||
# xai, infra
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
cli
|
||||
langchain
|
||||
langchain_v1
|
||||
langchain_legacy
|
||||
langchain-classic
|
||||
standard-tests
|
||||
text-splitters
|
||||
docs
|
||||
|
||||
8
.mcp.json
Normal file
8
.mcp.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"docs-langchain": {
|
||||
"type": "http",
|
||||
"url": "https://docs.langchain.com/mcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -152,20 +152,22 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
@@ -152,20 +152,22 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
Please see the following guides for migrating LangChain code:
|
||||
|
||||
* Migrate to [LangChain v1.0](https://docs.langchain.com/oss/python/migrate/langchain-v1)
|
||||
* Migrate to [LangChain v0.3](https://python.langchain.com/docs/versions/v0_3/)
|
||||
* Migrate to [LangChain v0.2](https://python.langchain.com/docs/versions/v0_2/)
|
||||
* Migrating from [LangChain 0.0.x Chains](https://python.langchain.com/docs/versions/migrating_chains/)
|
||||
|
||||
31
README.md
31
README.md
@@ -12,13 +12,16 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://opensource.org/licenses/MIT" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/l/langchain-core?style=flat-square" alt="PyPI - License">
|
||||
<img src="https://img.shields.io/pypi/l/langchain" alt="PyPI - License">
|
||||
</a>
|
||||
<a href="https://pypistats.org/packages/langchain-core" target="_blank">
|
||||
<a href="https://pypistats.org/packages/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads">
|
||||
</a>
|
||||
<a href="https://pypi.org/project/langchain/#history" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/v/langchain?label=%20" alt="Version">
|
||||
</a>
|
||||
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square" alt="Open in Dev Containers">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode" alt="Open in Dev Containers">
|
||||
</a>
|
||||
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20">
|
||||
@@ -34,14 +37,14 @@
|
||||
LangChain is a framework for building LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves.
|
||||
|
||||
```bash
|
||||
pip install -U langchain
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/).
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/oss/python/langchain/overview).
|
||||
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building controllable agent workflows.
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our framework for building controllable agent workflows.
|
||||
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
@@ -62,16 +65,14 @@ While the LangChain framework can be used standalone, it also integrates seamles
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Conceptual Guides](https://docs.langchain.com/oss/python/langchain/overview): Explanations of key
|
||||
concepts behind the LangChain framework.
|
||||
- [Tutorials](https://docs.langchain.com/oss/python/learn): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [API Reference](https://reference.langchain.com/python/): Detailed reference on
|
||||
- [Learn](https://docs.langchain.com/oss/python/learn): Use cases, conceptual overviews, and more.
|
||||
- [API Reference](https://reference.langchain.com/python): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.
|
||||
- [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview): Learn how to contribute to LangChain and find good first issues.
|
||||
- [LangChain Forum](https://forum.langchain.com): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com): Ask questions & chat with our documentation.
|
||||
|
||||
@@ -1,6 +1,30 @@
|
||||
# langchain-cli
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful
|
||||
for getting started with LangChain Templates!
|
||||
[](https://pypi.org/project/langchain-cli/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-cli)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
## Quick Install
|
||||
|
||||
```bash
|
||||
pip install langchain-cli
|
||||
```
|
||||
|
||||
## 🤔 What is this?
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful for getting started with LangChain Templates!
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
[CLI Docs](https://github.com/langchain-ai/langchain/blob/master/libs/cli/DOCS.md)
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -19,8 +19,8 @@ And you should configure credentials by setting the following environment variab
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__()
|
||||
llm.invoke("Sing a ballad of LangChain.")
|
||||
model = Chat__ModuleName__()
|
||||
model.invoke("Sing a ballad of LangChain.")
|
||||
```
|
||||
|
||||
## Embeddings
|
||||
@@ -41,6 +41,6 @@ embeddings.embed_query("What is the meaning of life?")
|
||||
```python
|
||||
from __module_name__ import __ModuleName__LLM
|
||||
|
||||
llm = __ModuleName__LLM()
|
||||
llm.invoke("The meaning of life is")
|
||||
model = __ModuleName__LLM()
|
||||
model.invoke("The meaning of life is")
|
||||
```
|
||||
|
||||
@@ -1,262 +1,264 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"model = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = model.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"llm = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,236 +1,238 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"model = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = model.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"llm = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = llm.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -185,7 +185,7 @@
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -1,204 +1,204 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -1,271 +1,271 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -36,20 +36,20 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
model: str
|
||||
model:
|
||||
Name of __ModuleName__ model to use.
|
||||
temperature: float
|
||||
temperature:
|
||||
Sampling temperature.
|
||||
max_tokens: int | None
|
||||
max_tokens:
|
||||
Max number of tokens to generate.
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — client params:
|
||||
timeout: float | None
|
||||
timeout:
|
||||
Timeout for requests.
|
||||
max_retries: int
|
||||
max_retries:
|
||||
Max number of retries.
|
||||
api_key: str | None
|
||||
api_key:
|
||||
__ModuleName__ API key. If not passed in will be read from env var
|
||||
__MODULE_NAME___API_KEY.
|
||||
|
||||
@@ -60,7 +60,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__(
|
||||
model = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
@@ -77,7 +77,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
llm.invoke(messages)
|
||||
model.invoke(messages)
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -87,7 +87,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Stream:
|
||||
```python
|
||||
for chunk in llm.stream(messages):
|
||||
for chunk in model.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```
|
||||
|
||||
@@ -96,7 +96,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
```
|
||||
|
||||
```python
|
||||
stream = llm.stream(messages)
|
||||
stream = model.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
@@ -110,13 +110,13 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
```python
|
||||
await llm.ainvoke(messages)
|
||||
await model.ainvoke(messages)
|
||||
|
||||
# stream:
|
||||
# async for chunk in (await llm.astream(messages))
|
||||
# async for chunk in (await model.astream(messages))
|
||||
|
||||
# batch:
|
||||
# await llm.abatch([messages])
|
||||
# await model.abatch([messages])
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -137,8 +137,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
|
||||
@@ -162,8 +162,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
|
||||
|
||||
structured_llm = llm.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats")
|
||||
structured_model = model.with_structured_output(Joke)
|
||||
structured_model.invoke("Tell me a joke about cats")
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -176,8 +176,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
JSON mode:
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_llm = llm.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_llm.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
json_model = model.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
@@ -204,7 +204,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = llm.invoke([message])
|
||||
ai_msg = model.invoke([message])
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
@@ -235,7 +235,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# TODO: Delete if token usage metadata isn't supported.
|
||||
Token usage:
|
||||
```python
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
```
|
||||
|
||||
@@ -247,8 +247,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Logprobs:
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_llm = llm.bind(logprobs=True)
|
||||
ai_msg = logprobs_llm.invoke(messages)
|
||||
logprobs_model = model.bind(logprobs=True)
|
||||
ai_msg = logprobs_model.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
```
|
||||
|
||||
@@ -257,7 +257,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
```
|
||||
Response metadata
|
||||
```python
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
```
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
@@ -73,7 +73,7 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| llm
|
||||
| model
|
||||
| StrOutputParser()
|
||||
)
|
||||
|
||||
|
||||
@@ -37,16 +37,16 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — indexing params:
|
||||
collection_name: str
|
||||
collection_name:
|
||||
Name of the collection.
|
||||
embedding_function: Embeddings
|
||||
embedding_function:
|
||||
Embedding function to use.
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — client params:
|
||||
client: Client | None
|
||||
client:
|
||||
Client to use.
|
||||
connection_args: dict | None
|
||||
connection_args:
|
||||
Connection arguments.
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
|
||||
@@ -65,7 +65,7 @@ def is_subclass(class_obj: type, classes_: list[type]) -> bool:
|
||||
classes_: A list of classes to check against.
|
||||
|
||||
Returns:
|
||||
True if `class_obj` is a subclass of any class in `classes_`, False otherwise.
|
||||
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
|
||||
"""
|
||||
return any(
|
||||
issubclass(class_obj, kls)
|
||||
|
||||
@@ -182,7 +182,7 @@ def parse_dependencies(
|
||||
inner_branches = _list_arg_to_length(branch, num_deps)
|
||||
|
||||
return list(
|
||||
map( # type: ignore[call-overload]
|
||||
map( # type: ignore[call-overload, unused-ignore]
|
||||
parse_dependency_string,
|
||||
inner_deps,
|
||||
inner_repos,
|
||||
|
||||
@@ -20,12 +20,13 @@ description = "CLI for interacting with LangChain"
|
||||
readme = "README.md"
|
||||
|
||||
[project.urls]
|
||||
homepage = "https://docs.langchain.com/"
|
||||
repository = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
|
||||
changelog = "https://github.com/langchain-ai/langchain/releases?q=%22langchain-cli%3D%3D1%22"
|
||||
twitter = "https://x.com/LangChainAI"
|
||||
slack = "https://www.langchain.com/join-community"
|
||||
reddit = "https://www.reddit.com/r/LangChain/"
|
||||
Homepage = "https://docs.langchain.com/"
|
||||
Documentation = "https://docs.langchain.com/"
|
||||
Source = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
|
||||
Changelog = "https://github.com/langchain-ai/langchain/releases?q=%22langchain-cli%3D%3D1%22"
|
||||
Twitter = "https://x.com/LangChainAI"
|
||||
Slack = "https://www.langchain.com/join-community"
|
||||
Reddit = "https://www.reddit.com/r/LangChain/"
|
||||
|
||||
[project.scripts]
|
||||
langchain = "langchain_cli.cli:app"
|
||||
@@ -42,14 +43,14 @@ lint = [
|
||||
]
|
||||
test = [
|
||||
"langchain-core",
|
||||
"langchain"
|
||||
"langchain-classic"
|
||||
]
|
||||
typing = ["langchain"]
|
||||
typing = ["langchain-classic"]
|
||||
test_integration = []
|
||||
|
||||
[tool.uv.sources]
|
||||
langchain-core = { path = "../core", editable = true }
|
||||
langchain = { path = "../langchain", editable = true }
|
||||
langchain-classic = { path = "../langchain", editable = true }
|
||||
|
||||
[tool.ruff.format]
|
||||
docstring-code-format = true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import pytest
|
||||
from langchain._api import suppress_langchain_deprecation_warning as sup2
|
||||
from langchain_classic._api import suppress_langchain_deprecation_warning as sup2
|
||||
from langchain_core._api import suppress_langchain_deprecation_warning as sup1
|
||||
|
||||
from langchain_cli.namespaces.migrate.generate.generic import (
|
||||
|
||||
466
libs/cli/uv.lock
generated
466
libs/cli/uv.lock
generated
@@ -327,7 +327,21 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.3.27"
|
||||
version = "1.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langgraph" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7d/b8/36078257ba52351608129ee983079a4d77ee69eb1470ee248cd8f5728a31/langchain-1.0.0.tar.gz", hash = "sha256:56bf90d935ac1dda864519372d195ca58757b755dd4c44b87840b67d069085b7", size = 466932, upload-time = "2025-10-17T20:53:20.319Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/4d/2758a16ad01716c0fb3fe9ec205fd530eae4528b35a27ff44837c399e032/langchain-1.0.0-py3-none-any.whl", hash = "sha256:8c95e41250fc86d09a978fbdf999f86c18d50a28a2addc5da88546af00a1ad15", size = 106202, upload-time = "2025-10-17T20:53:18.685Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-classic"
|
||||
version = "1.0.0"
|
||||
source = { editable = "../langchain" }
|
||||
dependencies = [
|
||||
{ name = "async-timeout", marker = "python_full_version < '3.11'" },
|
||||
@@ -344,20 +358,28 @@ dependencies = [
|
||||
requires-dist = [
|
||||
{ name = "async-timeout", marker = "python_full_version < '3.11'", specifier = ">=4.0.0,<5.0.0" },
|
||||
{ name = "langchain-anthropic", marker = "extra == 'anthropic'" },
|
||||
{ name = "langchain-community", marker = "extra == 'community'" },
|
||||
{ name = "langchain-aws", marker = "extra == 'aws'" },
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "langchain-deepseek", marker = "extra == 'deepseek'" },
|
||||
{ name = "langchain-fireworks", marker = "extra == 'fireworks'" },
|
||||
{ name = "langchain-google-genai", marker = "extra == 'google-genai'" },
|
||||
{ name = "langchain-google-vertexai", marker = "extra == 'google-vertexai'" },
|
||||
{ name = "langchain-groq", marker = "extra == 'groq'" },
|
||||
{ name = "langchain-huggingface", marker = "extra == 'huggingface'" },
|
||||
{ name = "langchain-mistralai", marker = "extra == 'mistralai'" },
|
||||
{ name = "langchain-ollama", marker = "extra == 'ollama'" },
|
||||
{ name = "langchain-openai", marker = "extra == 'openai'", editable = "../partners/openai" },
|
||||
{ name = "langchain-perplexity", marker = "extra == 'perplexity'" },
|
||||
{ name = "langchain-text-splitters", editable = "../text-splitters" },
|
||||
{ name = "langchain-together", marker = "extra == 'together'" },
|
||||
{ name = "langchain-xai", marker = "extra == 'xai'" },
|
||||
{ name = "langsmith", specifier = ">=0.1.17,<1.0.0" },
|
||||
{ name = "pydantic", specifier = ">=2.7.4,<3.0.0" },
|
||||
{ name = "pyyaml", specifier = ">=5.3.0,<7.0.0" },
|
||||
{ name = "requests", specifier = ">=2.0.0,<3.0.0" },
|
||||
{ name = "sqlalchemy", specifier = ">=1.4.0,<3.0.0" },
|
||||
]
|
||||
provides-extras = ["community", "anthropic", "openai", "google-vertexai", "google-genai", "together"]
|
||||
provides-extras = ["anthropic", "openai", "google-vertexai", "google-genai", "fireworks", "ollama", "together", "mistralai", "huggingface", "groq", "aws", "deepseek", "xai", "perplexity"]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [
|
||||
@@ -376,7 +398,6 @@ test = [
|
||||
{ name = "blockbuster", specifier = ">=1.5.18,<1.6.0" },
|
||||
{ name = "cffi", marker = "python_full_version < '3.10'", specifier = "<1.17.1" },
|
||||
{ name = "cffi", marker = "python_full_version >= '3.10'" },
|
||||
{ name = "duckdb-engine", specifier = ">=0.9.2,<1.0.0" },
|
||||
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "langchain-openai", editable = "../partners/openai" },
|
||||
@@ -411,9 +432,10 @@ test-integration = [
|
||||
{ name = "wrapt", specifier = ">=1.15.0,<2.0.0" },
|
||||
]
|
||||
typing = [
|
||||
{ name = "fastapi", specifier = ">=0.116.1,<1.0.0" },
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "langchain-text-splitters", editable = "../text-splitters" },
|
||||
{ name = "mypy", specifier = ">=1.15.0,<1.16.0" },
|
||||
{ name = "mypy", specifier = ">=1.18.2,<1.19.0" },
|
||||
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
|
||||
{ name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.4" },
|
||||
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
|
||||
@@ -448,11 +470,11 @@ lint = [
|
||||
{ name = "ruff" },
|
||||
]
|
||||
test = [
|
||||
{ name = "langchain" },
|
||||
{ name = "langchain-classic" },
|
||||
{ name = "langchain-core" },
|
||||
]
|
||||
typing = [
|
||||
{ name = "langchain" },
|
||||
{ name = "langchain-classic" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
@@ -475,15 +497,15 @@ lint = [
|
||||
{ name = "ruff", specifier = ">=0.13.1,<0.14" },
|
||||
]
|
||||
test = [
|
||||
{ name = "langchain", editable = "../langchain" },
|
||||
{ name = "langchain-classic", editable = "../langchain" },
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
]
|
||||
test-integration = []
|
||||
typing = [{ name = "langchain", editable = "../langchain" }]
|
||||
typing = [{ name = "langchain-classic", editable = "../langchain" }]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "1.0.0a6"
|
||||
version = "1.0.0"
|
||||
source = { editable = "../core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -541,7 +563,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-text-splitters"
|
||||
version = "1.0.0a1"
|
||||
version = "1.0.0"
|
||||
source = { editable = "../text-splitters" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@@ -574,8 +596,8 @@ test-integration = [
|
||||
{ name = "nltk", specifier = ">=3.9.1,<4.0.0" },
|
||||
{ name = "scipy", marker = "python_full_version == '3.12.*'", specifier = ">=1.7.0,<2.0.0" },
|
||||
{ name = "scipy", marker = "python_full_version >= '3.13'", specifier = ">=1.14.1,<2.0.0" },
|
||||
{ name = "sentence-transformers", specifier = ">=3.0.1,<4.0.0" },
|
||||
{ name = "spacy", specifier = ">=3.8.7,<4.0.0" },
|
||||
{ name = "sentence-transformers", marker = "python_full_version < '3.14'", specifier = ">=3.0.1,<4.0.0" },
|
||||
{ name = "spacy", marker = "python_full_version < '3.14'", specifier = ">=3.8.7,<4.0.0" },
|
||||
{ name = "thinc", specifier = ">=8.3.6,<9.0.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.8.0,<1.0.0" },
|
||||
{ name = "transformers", specifier = ">=4.51.3,<5.0.0" },
|
||||
@@ -588,6 +610,62 @@ typing = [
|
||||
{ name = "types-requests", specifier = ">=2.31.0.20240218,<3.0.0.0" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langgraph"
|
||||
version = "1.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langgraph-checkpoint" },
|
||||
{ name = "langgraph-prebuilt" },
|
||||
{ name = "langgraph-sdk" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "xxhash" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/57/f7/7ae10f1832ab1a6a402f451e54d6dab277e28e7d4e4204e070c7897ca71c/langgraph-1.0.0.tar.gz", hash = "sha256:5f83ed0e9bbcc37635bc49cbc9b3d9306605fa07504f955b7a871ed715f9964c", size = 472835, upload-time = "2025-10-17T20:23:38.263Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/07/42/6f6d0fe4eb661b06da8e6c59e58044e9e4221fdbffdcacae864557de961e/langgraph-1.0.0-py3-none-any.whl", hash = "sha256:4d478781832a1bc67e06c3eb571412ec47d7c57a5467d1f3775adf0e9dd4042c", size = 155416, upload-time = "2025-10-17T20:23:36.978Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-checkpoint"
|
||||
version = "2.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "ormsgpack" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/29/83/6404f6ed23a91d7bc63d7df902d144548434237d017820ceaa8d014035f2/langgraph_checkpoint-2.1.2.tar.gz", hash = "sha256:112e9d067a6eff8937caf198421b1ffba8d9207193f14ac6f89930c1260c06f9", size = 142420, upload-time = "2025-10-07T17:45:17.129Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/f2/06bf5addf8ee664291e1b9ffa1f28fc9d97e59806dc7de5aea9844cbf335/langgraph_checkpoint-2.1.2-py3-none-any.whl", hash = "sha256:911ebffb069fd01775d4b5184c04aaafc2962fcdf50cf49d524cd4367c4d0c60", size = 45763, upload-time = "2025-10-07T17:45:16.19Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-prebuilt"
|
||||
version = "1.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langgraph-checkpoint" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/02/2d/934b1129e217216a0dfaf0f7df0a10cedf2dfafe6cc8e1ee238cafaaa4a7/langgraph_prebuilt-1.0.0.tar.gz", hash = "sha256:eb75dad9aca0137451ca0395aa8541a665b3f60979480b0431d626fd195dcda2", size = 119927, upload-time = "2025-10-17T20:15:21.429Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/33/2e/ffa698eedc4c355168a9207ee598b2cc74ede92ce2b55c3469ea06978b6e/langgraph_prebuilt-1.0.0-py3-none-any.whl", hash = "sha256:ceaae4c5cee8c1f9b6468f76c114cafebb748aed0c93483b7c450e5a89de9c61", size = 28455, upload-time = "2025-10-17T20:15:20.043Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-sdk"
|
||||
version = "0.2.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
{ name = "orjson" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/23/d8/40e01190a73c564a4744e29a6c902f78d34d43dad9b652a363a92a67059c/langgraph_sdk-0.2.9.tar.gz", hash = "sha256:b3bd04c6be4fa382996cd2be8fbc1e7cc94857d2bc6b6f4599a7f2a245975303", size = 99802, upload-time = "2025-09-20T18:49:14.734Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/66/05/b2d34e16638241e6f27a6946d28160d4b8b641383787646d41a3727e0896/langgraph_sdk-0.2.9-py3-none-any.whl", hash = "sha256:fbf302edadbf0fb343596f91c597794e936ef68eebc0d3e1d358b6f9f72a1429", size = 56752, upload-time = "2025-09-20T18:49:13.346Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langserve"
|
||||
version = "0.0.51"
|
||||
@@ -780,6 +858,61 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/28/01/d6b274a0635be0468d4dbd9cafe80c47105937a0d42434e805e67cd2ed8b/orjson-3.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:e8f6a7a27d7b7bec81bd5924163e9af03d49bbb63013f107b48eb5d16db711bc", size = 125985, upload-time = "2025-08-26T17:46:16.67Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ormsgpack"
|
||||
version = "1.11.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/65/f8/224c342c0e03e131aaa1a1f19aa2244e167001783a433f4eed10eedd834b/ormsgpack-1.11.0.tar.gz", hash = "sha256:7c9988e78fedba3292541eb3bb274fa63044ef4da2ddb47259ea70c05dee4206", size = 49357, upload-time = "2025-10-08T17:29:15.621Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/3d/6996193cb2babc47fc92456223bef7d141065357ad4204eccf313f47a7b3/ormsgpack-1.11.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:03d4e658dd6e1882a552ce1d13cc7b49157414e7d56a4091fbe7823225b08cba", size = 367965, upload-time = "2025-10-08T17:28:06.736Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/89/c83b805dd9caebb046f4ceeed3706d0902ed2dbbcf08b8464e89f2c52e05/ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bb67eb913c2b703f0ed39607fc56e50724dd41f92ce080a586b4d6149eb3fe4", size = 195209, upload-time = "2025-10-08T17:28:08.395Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/17/427d9c4f77b120f0af01d7a71d8144771c9388c2a81f712048320e31353b/ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e54175b92411f73a238e5653a998627f6660de3def37d9dd7213e0fd264ca56", size = 205868, upload-time = "2025-10-08T17:28:09.688Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/32/a9ce218478bdbf3fee954159900e24b314ab3064f7b6a217ccb1e3464324/ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca2b197f4556e1823d1319869d4c5dc278be335286d2308b0ed88b59a5afcc25", size = 207391, upload-time = "2025-10-08T17:28:11.031Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/d3/4413fe7454711596fdf08adabdfa686580e4656702015108e4975f00a022/ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bc62388262f58c792fe1e450e1d9dbcc174ed2fb0b43db1675dd7c5ff2319d6a", size = 377078, upload-time = "2025-10-08T17:28:12.39Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/ad/13fae555a45e35ca1ca929a27c9ee0a3ecada931b9d44454658c543f9b9c/ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c48bc10af74adfbc9113f3fb160dc07c61ad9239ef264c17e449eba3de343dc2", size = 470776, upload-time = "2025-10-08T17:28:13.484Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/60/51178b093ffc4e2ef3381013a67223e7d56224434fba80047249f4a84b26/ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a608d3a1d4fa4acdc5082168a54513cff91f47764cef435e81a483452f5f7647", size = 380862, upload-time = "2025-10-08T17:28:14.747Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/e3/1cb6c161335e2ae7d711ecfb007a31a3936603626e347c13e5e53b7c7cf8/ormsgpack-1.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:97217b4f7f599ba45916b9c4c4b1d5656e8e2a4d91e2e191d72a7569d3c30923", size = 112058, upload-time = "2025-10-08T17:28:15.777Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/7c/90164d00e8e94b48eff8a17bc2f4be6b71ae356a00904bc69d5e8afe80fb/ormsgpack-1.11.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:c7be823f47d8e36648d4bc90634b93f02b7d7cc7480081195f34767e86f181fb", size = 367964, upload-time = "2025-10-08T17:28:16.778Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/c2/fb6331e880a3446c1341e72c77bd5a46da3e92a8e2edf7ea84a4c6c14fff/ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68accf15d1b013812755c0eb7a30e1fc2f81eb603a1a143bf0cda1b301cfa797", size = 195209, upload-time = "2025-10-08T17:28:17.796Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/50/4943fb5df8cc02da6b7b1ee2c2a7fb13aebc9f963d69280b1bb02b1fb178/ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:805d06fb277d9a4e503c0c707545b49cde66cbb2f84e5cf7c58d81dfc20d8658", size = 205869, upload-time = "2025-10-08T17:28:19.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/fa/e7e06835bfea9adeef43915143ce818098aecab0cbd3df584815adf3e399/ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1e57cdf003e77acc43643bda151dc01f97147a64b11cdee1380bb9698a7601c", size = 207391, upload-time = "2025-10-08T17:28:20.352Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/f0/f28a19e938a14ec223396e94f4782fbcc023f8c91f2ab6881839d3550f32/ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:37fc05bdaabd994097c62e2f3e08f66b03f856a640ede6dc5ea340bd15b77f4d", size = 377081, upload-time = "2025-10-08T17:28:21.926Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/e3/73d1d7287637401b0b6637e30ba9121e1aa1d9f5ea185ed9834ca15d512c/ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:a6e9db6c73eb46b2e4d97bdffd1368a66f54e6806b563a997b19c004ef165e1d", size = 470779, upload-time = "2025-10-08T17:28:22.993Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/46/7ba7f9721e766dd0dfe4cedf444439447212abffe2d2f4538edeeec8ccbd/ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e9c44eae5ac0196ffc8b5ed497c75511056508f2303fa4d36b208eb820cf209e", size = 380865, upload-time = "2025-10-08T17:28:24.012Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/7d/bb92a0782bbe0626c072c0320001410cf3f6743ede7dc18f034b1a18edef/ormsgpack-1.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:11d0dfaf40ae7c6de4f7dbd1e4892e2e6a55d911ab1774357c481158d17371e4", size = 112058, upload-time = "2025-10-08T17:28:25.015Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/1a/f07c6f74142815d67e1d9d98c5b2960007100408ade8242edac96d5d1c73/ormsgpack-1.11.0-cp311-cp311-win_arm64.whl", hash = "sha256:0c63a3f7199a3099c90398a1bdf0cb577b06651a442dc5efe67f2882665e5b02", size = 105894, upload-time = "2025-10-08T17:28:25.93Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/16/2805ebfb3d2cbb6c661b5fae053960fc90a2611d0d93e2207e753e836117/ormsgpack-1.11.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3434d0c8d67de27d9010222de07fb6810fb9af3bb7372354ffa19257ac0eb83b", size = 368474, upload-time = "2025-10-08T17:28:27.532Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/39/6afae47822dca0ce4465d894c0bbb860a850ce29c157882dbdf77a5dd26e/ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2da5bd097e8dbfa4eb0d4ccfe79acd6f538dee4493579e2debfe4fc8f4ca89b", size = 195321, upload-time = "2025-10-08T17:28:28.573Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/54/11eda6b59f696d2f16de469bfbe539c9f469c4b9eef5a513996b5879c6e9/ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fdbaa0a5a8606a486960b60c24f2d5235d30ac7a8b98eeaea9854bffef14dc3d", size = 206036, upload-time = "2025-10-08T17:28:29.785Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/86/890430f704f84c4699ddad61c595d171ea2fd77a51fbc106f83981e83939/ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3682f24f800c1837017ee90ce321086b2cbaef88db7d4cdbbda1582aa6508159", size = 207615, upload-time = "2025-10-08T17:28:31.076Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/b9/77383e16c991c0ecb772205b966fc68d9c519e0b5f9c3913283cbed30ffe/ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcca21202bb05ccbf3e0e92f560ee59b9331182e4c09c965a28155efbb134993", size = 377195, upload-time = "2025-10-08T17:28:32.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/e2/15f9f045d4947f3c8a5e0535259fddf027b17b1215367488b3565c573b9d/ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c30e5c4655ba46152d722ec7468e8302195e6db362ec1ae2c206bc64f6030e43", size = 470960, upload-time = "2025-10-08T17:28:33.556Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/61/403ce188c4c495bc99dff921a0ad3d9d352dd6d3c4b629f3638b7f0cf79b/ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7138a341f9e2c08c59368f03d3be25e8b87b3baaf10d30fb1f6f6b52f3d47944", size = 381174, upload-time = "2025-10-08T17:28:34.781Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/a8/94c94bc48c68da4374870a851eea03fc5a45eb041182ad4c5ed9acfc05a4/ormsgpack-1.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:d4bd8589b78a11026d47f4edf13c1ceab9088bb12451f34396afe6497db28a27", size = 112314, upload-time = "2025-10-08T17:28:36.259Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/d0/aa4cf04f04e4cc180ce7a8d8ddb5a7f3af883329cbc59645d94d3ba157a5/ormsgpack-1.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:e5e746a1223e70f111d4001dab9585ac8639eee8979ca0c8db37f646bf2961da", size = 106072, upload-time = "2025-10-08T17:28:37.518Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/35/e34722edb701d053cf2240f55974f17b7dbfd11fdef72bd2f1835bcebf26/ormsgpack-1.11.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e7b36ab7b45cb95217ae1f05f1318b14a3e5ef73cb00804c0f06233f81a14e8", size = 368502, upload-time = "2025-10-08T17:28:38.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/6a/c2fc369a79d6aba2aa28c8763856c95337ac7fcc0b2742185cd19397212a/ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43402d67e03a9a35cc147c8c03f0c377cad016624479e1ee5b879b8425551484", size = 195344, upload-time = "2025-10-08T17:28:39.554Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/6a/0f8e24b7489885534c1a93bdba7c7c434b9b8638713a68098867db9f254c/ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:64fd992f932764d6306b70ddc755c1bc3405c4c6a69f77a36acf7af1c8f5ada4", size = 206045, upload-time = "2025-10-08T17:28:40.561Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/71/8b460ba264f3c6f82ef5b1920335720094e2bd943057964ce5287d6df83a/ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0362fb7fe4a29c046c8ea799303079a09372653a1ce5a5a588f3bbb8088368d0", size = 207641, upload-time = "2025-10-08T17:28:41.736Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/cf/f369446abaf65972424ed2651f2df2b7b5c3b735c93fc7fa6cfb81e34419/ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:de2f7a65a9d178ed57be49eba3d0fc9b833c32beaa19dbd4ba56014d3c20b152", size = 377211, upload-time = "2025-10-08T17:28:43.12Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/3f/948bb0047ce0f37c2efc3b9bb2bcfdccc61c63e0b9ce8088d4903ba39dcf/ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:f38cfae95461466055af966fc922d06db4e1654966385cda2828653096db34da", size = 470973, upload-time = "2025-10-08T17:28:44.465Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/a4/92a8114d1d017c14aaa403445060f345df9130ca532d538094f38e535988/ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c88396189d238f183cea7831b07a305ab5c90d6d29b53288ae11200bd956357b", size = 381161, upload-time = "2025-10-08T17:28:46.063Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/64/5b76447da654798bfcfdfd64ea29447ff2b7f33fe19d0e911a83ad5107fc/ormsgpack-1.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:5403d1a945dd7c81044cebeca3f00a28a0f4248b33242a5d2d82111628043725", size = 112321, upload-time = "2025-10-08T17:28:47.393Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/5e/89900d06db9ab81e7ec1fd56a07c62dfbdcda398c435718f4252e1dc52a0/ormsgpack-1.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:c57357b8d43b49722b876edf317bdad9e6d52071b523fdd7394c30cd1c67d5a0", size = 106084, upload-time = "2025-10-08T17:28:48.305Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/0b/c659e8657085c8c13f6a0224789f422620cef506e26573b5434defe68483/ormsgpack-1.11.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d390907d90fd0c908211592c485054d7a80990697ef4dff4e436ac18e1aab98a", size = 368497, upload-time = "2025-10-08T17:28:49.297Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/0e/451e5848c7ed56bd287e8a2b5cb5926e54466f60936e05aec6cb299f9143/ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6153c2e92e789509098e04c9aa116b16673bd88ec78fbe0031deeb34ab642d10", size = 195385, upload-time = "2025-10-08T17:28:50.314Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/28/90f78cbbe494959f2439c2ec571f08cd3464c05a6a380b0d621c622122a9/ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2b2c2a065a94d742212b2018e1fecd8f8d72f3c50b53a97d1f407418093446d", size = 206114, upload-time = "2025-10-08T17:28:51.336Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/db/34163f4c0923bea32dafe42cd878dcc66795a3e85669bc4b01c1e2b92a7b/ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:110e65b5340f3d7ef8b0009deae3c6b169437e6b43ad5a57fd1748085d29d2ac", size = 207679, upload-time = "2025-10-08T17:28:53.627Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/14/04ee741249b16f380a9b4a0cc19d4134d0b7c74bab27a2117da09e525eb9/ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c27e186fca96ab34662723e65b420919910acbbc50fc8e1a44e08f26268cb0e0", size = 377237, upload-time = "2025-10-08T17:28:56.12Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/ff/53e588a6aaa833237471caec679582c2950f0e7e1a8ba28c1511b465c1f4/ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d56b1f877c13d499052d37a3db2378a97d5e1588d264f5040b3412aee23d742c", size = 471021, upload-time = "2025-10-08T17:28:57.299Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/f9/f20a6d9ef2be04da3aad05e8f5699957e9a30c6d5c043a10a296afa7e890/ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c88e28cd567c0a3269f624b4ade28142d5e502c8e826115093c572007af5be0a", size = 381205, upload-time = "2025-10-08T17:28:58.872Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/64/96c07d084b479ac8b7821a77ffc8d3f29d8b5c95ebfdf8db1c03dff02762/ormsgpack-1.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:8811160573dc0a65f62f7e0792c4ca6b7108dfa50771edb93f9b84e2d45a08ae", size = 112374, upload-time = "2025-10-08T17:29:00Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/a5/5dcc18b818d50213a3cadfe336bb6163a102677d9ce87f3d2f1a1bee0f8c/ormsgpack-1.11.0-cp314-cp314-win_arm64.whl", hash = "sha256:23e30a8d3c17484cf74e75e6134322255bd08bc2b5b295cc9c442f4bae5f3c2d", size = 106056, upload-time = "2025-10-08T17:29:01.29Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/2b/776d1b411d2be50f77a6e6e94a25825cca55dcacfe7415fd691a144db71b/ormsgpack-1.11.0-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2905816502adfaf8386a01dd85f936cd378d243f4f5ee2ff46f67f6298dc90d5", size = 368661, upload-time = "2025-10-08T17:29:02.382Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/0c/81a19e6115b15764db3d241788f9fac093122878aaabf872cc545b0c4650/ormsgpack-1.11.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c04402fb9a0a9b9f18fbafd6d5f8398ee99b3ec619fb63952d3a954bc9d47daa", size = 195539, upload-time = "2025-10-08T17:29:03.472Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/86/e5b50247a61caec5718122feb2719ea9d451d30ac0516c288c1dbc6408e8/ormsgpack-1.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a025ec07ac52056ecfd9e57b5cbc6fff163f62cb9805012b56cda599157f8ef2", size = 207718, upload-time = "2025-10-08T17:29:04.545Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "25.0"
|
||||
@@ -809,7 +942,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.11.9"
|
||||
version = "2.12.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "annotated-types" },
|
||||
@@ -817,96 +950,123 @@ dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "typing-inspection" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ff/5d/09a551ba512d7ca404d785072700d3f6727a02f6f3c24ecfd081c7cf0aa8/pydantic-2.11.9.tar.gz", hash = "sha256:6b8ffda597a14812a7975c90b82a8a2e777d9257aba3453f973acd3c032a18e2", size = 788495, upload-time = "2025-09-13T11:26:39.325Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/1e/4f0a3233767010308f2fd6bd0814597e3f63f1dc98304a9112b8759df4ff/pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74", size = 819383, upload-time = "2025-10-17T15:04:21.222Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/d3/108f2006987c58e76691d5ae5d200dd3e0f532cb4e5fa3560751c3a1feba/pydantic-2.11.9-py3-none-any.whl", hash = "sha256:c42dd626f5cfc1c6950ce6205ea58c93efa406da65f479dcb4029d5934857da2", size = 444855, upload-time = "2025-09-13T11:26:36.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/6b/83661fa77dcefa195ad5f8cd9af3d1a7450fd57cc883ad04d65446ac2029/pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf", size = 462431, upload-time = "2025-10-17T15:04:19.346Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.33.2"
|
||||
version = "2.41.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/df/18/d0944e8eaaa3efd0a91b0f1fc537d3be55ad35091b6a87638211ba691964/pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5", size = 457557, upload-time = "2025-10-14T10:23:47.909Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/3d/9b8ca77b0f76fcdbf8bc6b72474e264283f461284ca84ac3fde570c6c49a/pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e", size = 2111197, upload-time = "2025-10-14T10:19:43.303Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/92/b7b0fe6ed4781642232755cb7e56a86e2041e1292f16d9ae410a0ccee5ac/pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b", size = 1917909, upload-time = "2025-10-14T10:19:45.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/8c/3eb872009274ffa4fb6a9585114e161aa1a0915af2896e2d441642929fe4/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd", size = 1969905, upload-time = "2025-10-14T10:19:46.567Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/21/35adf4a753bcfaea22d925214a0c5b880792e3244731b3f3e6fec0d124f7/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945", size = 2051938, upload-time = "2025-10-14T10:19:48.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/d0/cdf7d126825e36d6e3f1eccf257da8954452934ede275a8f390eac775e89/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706", size = 2250710, upload-time = "2025-10-14T10:19:49.619Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/1c/af1e6fd5ea596327308f9c8d1654e1285cc3d8de0d584a3c9d7705bf8a7c/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba", size = 2367445, upload-time = "2025-10-14T10:19:51.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/81/8cece29a6ef1b3a92f956ea6da6250d5b2d2e7e4d513dd3b4f0c7a83dfea/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b", size = 2072875, upload-time = "2025-10-14T10:19:52.671Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/37/a6a579f5fc2cd4d5521284a0ab6a426cc6463a7b3897aeb95b12f1ba607b/pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d", size = 2191329, upload-time = "2025-10-14T10:19:54.214Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/03/505020dc5c54ec75ecba9f41119fd1e48f9e41e4629942494c4a8734ded1/pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700", size = 2151658, upload-time = "2025-10-14T10:19:55.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/5d/2c0d09fb53aa03bbd2a214d89ebfa6304be7df9ed86ee3dc7770257f41ee/pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6", size = 2316777, upload-time = "2025-10-14T10:19:57.607Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/4b/c2c9c8f5e1f9c864b57d08539d9d3db160e00491c9f5ee90e1bfd905e644/pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9", size = 2320705, upload-time = "2025-10-14T10:19:59.016Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/c3/a74c1c37f49c0a02c89c7340fafc0ba816b29bd495d1a31ce1bdeacc6085/pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57", size = 1975464, upload-time = "2025-10-14T10:20:00.581Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/23/5dd5c1324ba80303368f7569e2e2e1a721c7d9eb16acb7eb7b7f85cb1be2/pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc", size = 2024497, upload-time = "2025-10-14T10:20:03.018Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/4c/f6cbfa1e8efacd00b846764e8484fe173d25b8dab881e277a619177f3384/pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80", size = 2109062, upload-time = "2025-10-14T10:20:04.486Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/f8/40b72d3868896bfcd410e1bd7e516e762d326201c48e5b4a06446f6cf9e8/pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae", size = 1916301, upload-time = "2025-10-14T10:20:06.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/4d/d203dce8bee7faeca791671c88519969d98d3b4e8f225da5b96dad226fc8/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827", size = 1968728, upload-time = "2025-10-14T10:20:08.353Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/f5/6a66187775df87c24d526985b3a5d78d861580ca466fbd9d4d0e792fcf6c/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f", size = 2050238, upload-time = "2025-10-14T10:20:09.766Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/b9/78336345de97298cf53236b2f271912ce11f32c1e59de25a374ce12f9cce/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def", size = 2249424, upload-time = "2025-10-14T10:20:11.732Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/bb/a4584888b70ee594c3d374a71af5075a68654d6c780369df269118af7402/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2", size = 2366047, upload-time = "2025-10-14T10:20:13.647Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/8d/17fc5de9d6418e4d2ae8c675f905cdafdc59d3bf3bf9c946b7ab796a992a/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8", size = 2071163, upload-time = "2025-10-14T10:20:15.307Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/e7/03d2c5c0b8ed37a4617430db68ec5e7dbba66358b629cd69e11b4d564367/pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265", size = 2190585, upload-time = "2025-10-14T10:20:17.3Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/be/fc/15d1c9fe5ad9266a5897d9b932b7f53d7e5cfc800573917a2c5d6eea56ec/pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c", size = 2150109, upload-time = "2025-10-14T10:20:19.143Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/ef/e735dd008808226c83ba56972566138665b71477ad580fa5a21f0851df48/pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a", size = 2315078, upload-time = "2025-10-14T10:20:20.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/00/806efdcf35ff2ac0f938362350cd9827b8afb116cc814b6b75cf23738c7c/pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e", size = 2318737, upload-time = "2025-10-14T10:20:22.306Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/7e/6ac90673fe6cb36621a2283552897838c020db343fa86e513d3f563b196f/pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03", size = 1974160, upload-time = "2025-10-14T10:20:23.817Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/9d/7c5e24ee585c1f8b6356e1d11d40ab807ffde44d2db3b7dfd6d20b09720e/pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e", size = 2021883, upload-time = "2025-10-14T10:20:25.48Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/90/5c172357460fc28b2871eb4a0fb3843b136b429c6fa827e4b588877bf115/pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db", size = 1968026, upload-time = "2025-10-14T10:20:27.039Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/81/d3b3e95929c4369d30b2a66a91db63c8ed0a98381ae55a45da2cd1cc1288/pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887", size = 2099043, upload-time = "2025-10-14T10:20:28.561Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/58/da/46fdac49e6717e3a94fc9201403e08d9d61aa7a770fab6190b8740749047/pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2", size = 1910699, upload-time = "2025-10-14T10:20:30.217Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/63/4d948f1b9dd8e991a5a98b77dd66c74641f5f2e5225fee37994b2e07d391/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999", size = 1952121, upload-time = "2025-10-14T10:20:32.246Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/a7/e5fc60a6f781fc634ecaa9ecc3c20171d238794cef69ae0af79ac11b89d7/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4", size = 2041590, upload-time = "2025-10-14T10:20:34.332Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/69/dce747b1d21d59e85af433428978a1893c6f8a7068fa2bb4a927fba7a5ff/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f", size = 2219869, upload-time = "2025-10-14T10:20:35.965Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/6a/c070e30e295403bf29c4df1cb781317b6a9bac7cd07b8d3acc94d501a63c/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b", size = 2345169, upload-time = "2025-10-14T10:20:37.627Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/83/06d001f8043c336baea7fd202a9ac7ad71f87e1c55d8112c50b745c40324/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47", size = 2070165, upload-time = "2025-10-14T10:20:39.246Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/0a/e567c2883588dd12bcbc110232d892cf385356f7c8a9910311ac997ab715/pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970", size = 2189067, upload-time = "2025-10-14T10:20:41.015Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/1d/3d9fca34273ba03c9b1c5289f7618bc4bd09c3ad2289b5420481aa051a99/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed", size = 2132997, upload-time = "2025-10-14T10:20:43.106Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/70/d702ef7a6cd41a8afc61f3554922b3ed8d19dd54c3bd4bdbfe332e610827/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8", size = 2307187, upload-time = "2025-10-14T10:20:44.849Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/4c/c06be6e27545d08b802127914156f38d10ca287a9e8489342793de8aae3c/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431", size = 2305204, upload-time = "2025-10-14T10:20:46.781Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/e5/35ae4919bcd9f18603419e23c5eaf32750224a89d41a8df1a3704b69f77e/pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd", size = 1972536, upload-time = "2025-10-14T10:20:48.39Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/c2/49c5bb6d2a49eb2ee3647a93e3dae7080c6409a8a7558b075027644e879c/pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff", size = 2031132, upload-time = "2025-10-14T10:20:50.421Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/23/936343dbcba6eec93f73e95eb346810fc732f71ba27967b287b66f7b7097/pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8", size = 1969483, upload-time = "2025-10-14T10:20:52.35Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/d0/c20adabd181a029a970738dfe23710b52a31f1258f591874fcdec7359845/pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746", size = 2105688, upload-time = "2025-10-14T10:20:54.448Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/b6/0ce5c03cec5ae94cca220dfecddc453c077d71363b98a4bbdb3c0b22c783/pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced", size = 1910807, upload-time = "2025-10-14T10:20:56.115Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/3e/800d3d02c8beb0b5c069c870cbb83799d085debf43499c897bb4b4aaff0d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a", size = 1956669, upload-time = "2025-10-14T10:20:57.874Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/a4/24271cc71a17f64589be49ab8bd0751f6a0a03046c690df60989f2f95c2c/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02", size = 2051629, upload-time = "2025-10-14T10:21:00.006Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/de/45af3ca2f175d91b96bfb62e1f2d2f1f9f3b14a734afe0bfeff079f78181/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1", size = 2224049, upload-time = "2025-10-14T10:21:01.801Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/af/8f/ae4e1ff84672bf869d0a77af24fd78387850e9497753c432875066b5d622/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2", size = 2342409, upload-time = "2025-10-14T10:21:03.556Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/62/273dd70b0026a085c7b74b000394e1ef95719ea579c76ea2f0cc8893736d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84", size = 2069635, upload-time = "2025-10-14T10:21:05.385Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/03/cf485fff699b4cdaea469bc481719d3e49f023241b4abb656f8d422189fc/pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d", size = 2194284, upload-time = "2025-10-14T10:21:07.122Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/7e/c8e713db32405dfd97211f2fc0a15d6bf8adb7640f3d18544c1f39526619/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d", size = 2137566, upload-time = "2025-10-14T10:21:08.981Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/f7/db71fd4cdccc8b75990f79ccafbbd66757e19f6d5ee724a6252414483fb4/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2", size = 2316809, upload-time = "2025-10-14T10:21:10.805Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/63/a54973ddb945f1bca56742b48b144d85c9fc22f819ddeb9f861c249d5464/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab", size = 2311119, upload-time = "2025-10-14T10:21:12.583Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/03/5d12891e93c19218af74843a27e32b94922195ded2386f7b55382f904d2f/pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c", size = 1981398, upload-time = "2025-10-14T10:21:14.584Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/be/d8/fd0de71f39db91135b7a26996160de71c073d8635edfce8b3c3681be0d6d/pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4", size = 2030735, upload-time = "2025-10-14T10:21:16.432Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/86/c99921c1cf6650023c08bfab6fe2d7057a5142628ef7ccfa9921f2dda1d5/pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564", size = 1973209, upload-time = "2025-10-14T10:21:18.213Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/0d/b5706cacb70a8414396efdda3d72ae0542e050b591119e458e2490baf035/pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4", size = 1877324, upload-time = "2025-10-14T10:21:20.363Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/2d/cba1fa02cfdea72dfb3a9babb067c83b9dff0bbcb198368e000a6b756ea7/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2", size = 1884515, upload-time = "2025-10-14T10:21:22.339Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/ea/3df927c4384ed9b503c9cc2d076cf983b4f2adb0c754578dfb1245c51e46/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf", size = 2042819, upload-time = "2025-10-14T10:21:26.683Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/ee/df8e871f07074250270a3b1b82aad4cd0026b588acd5d7d3eb2fcb1471a3/pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2", size = 1995866, upload-time = "2025-10-14T10:21:28.951Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/de/b20f4ab954d6d399499c33ec4fafc46d9551e11dc1858fb7f5dca0748ceb/pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89", size = 1970034, upload-time = "2025-10-14T10:21:30.869Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/28/d3325da57d413b9819365546eb9a6e8b7cbd9373d9380efd5f74326143e6/pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1", size = 2102022, upload-time = "2025-10-14T10:21:32.809Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/24/b58a1bc0d834bf1acc4361e61233ee217169a42efbdc15a60296e13ce438/pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac", size = 1905495, upload-time = "2025-10-14T10:21:34.812Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/a4/71f759cc41b7043e8ecdaab81b985a9b6cad7cec077e0b92cff8b71ecf6b/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554", size = 1956131, upload-time = "2025-10-14T10:21:36.924Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/64/1e79ac7aa51f1eec7c4cda8cbe456d5d09f05fdd68b32776d72168d54275/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e", size = 2052236, upload-time = "2025-10-14T10:21:38.927Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/e3/a3ffc363bd4287b80f1d43dc1c28ba64831f8dfc237d6fec8f2661138d48/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616", size = 2223573, upload-time = "2025-10-14T10:21:41.574Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/27/78814089b4d2e684a9088ede3790763c64693c3d1408ddc0a248bc789126/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af", size = 2342467, upload-time = "2025-10-14T10:21:44.018Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/97/4de0e2a1159cb85ad737e03306717637842c88c7fd6d97973172fb183149/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12", size = 2063754, upload-time = "2025-10-14T10:21:46.466Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/50/8cb90ce4b9efcf7ae78130afeb99fd1c86125ccdf9906ef64b9d42f37c25/pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d", size = 2196754, upload-time = "2025-10-14T10:21:48.486Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/3b/ccdc77af9cd5082723574a1cc1bcae7a6acacc829d7c0a06201f7886a109/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad", size = 2137115, upload-time = "2025-10-14T10:21:50.63Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/ba/e7c7a02651a8f7c52dc2cff2b64a30c313e3b57c7d93703cecea76c09b71/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a", size = 2317400, upload-time = "2025-10-14T10:21:52.959Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/ba/6c533a4ee8aec6b812c643c49bb3bd88d3f01e3cebe451bb85512d37f00f/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025", size = 2312070, upload-time = "2025-10-14T10:21:55.419Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/ae/f10524fcc0ab8d7f96cf9a74c880243576fd3e72bd8ce4f81e43d22bcab7/pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e", size = 1982277, upload-time = "2025-10-14T10:21:57.474Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/dc/e5aa27aea1ad4638f0c3fb41132f7eb583bd7420ee63204e2d4333a3bbf9/pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894", size = 2024608, upload-time = "2025-10-14T10:21:59.557Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/61/51d89cc2612bd147198e120a13f150afbf0bcb4615cddb049ab10b81b79e/pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d", size = 1967614, upload-time = "2025-10-14T10:22:01.847Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/c2/472f2e31b95eff099961fa050c376ab7156a81da194f9edb9f710f68787b/pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da", size = 1876904, upload-time = "2025-10-14T10:22:04.062Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/07/ea8eeb91173807ecdae4f4a5f4b150a520085b35454350fc219ba79e66a3/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e", size = 1882538, upload-time = "2025-10-14T10:22:06.39Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/29/b53a9ca6cd366bfc928823679c6a76c7a4c69f8201c0ba7903ad18ebae2f/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa", size = 2041183, upload-time = "2025-10-14T10:22:08.812Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/3d/f8c1a371ceebcaf94d6dd2d77c6cf4b1c078e13a5837aee83f760b4f7cfd/pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d", size = 1993542, upload-time = "2025-10-14T10:22:11.332Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/ac/9fc61b4f9d079482a290afe8d206b8f490e9fd32d4fc03ed4fc698214e01/pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0", size = 1973897, upload-time = "2025-10-14T10:22:13.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/12/5ba58daa7f453454464f92b3ca7b9d7c657d8641c48e370c3ebc9a82dd78/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b", size = 2122139, upload-time = "2025-10-14T10:22:47.288Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/fb/6860126a77725c3108baecd10fd3d75fec25191d6381b6eb2ac660228eac/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42", size = 1936674, upload-time = "2025-10-14T10:22:49.555Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/be/57dcaa3ed595d81f8757e2b44a38240ac5d37628bce25fb20d02c7018776/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee", size = 1956398, upload-time = "2025-10-14T10:22:52.19Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/1d/679a344fadb9695f1a6a294d739fbd21d71fa023286daeea8c0ed49e7c2b/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c", size = 2138674, upload-time = "2025-10-14T10:22:54.499Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/48/ae937e5a831b7c0dc646b2ef788c27cd003894882415300ed21927c21efa/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537", size = 2112087, upload-time = "2025-10-14T10:22:56.818Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/db/6db8073e3d32dae017da7e0d16a9ecb897d0a4d92e00634916e486097961/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94", size = 1920387, upload-time = "2025-10-14T10:22:59.342Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/c1/dd3542d072fcc336030d66834872f0328727e3b8de289c662faa04aa270e/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c", size = 1951495, upload-time = "2025-10-14T10:23:02.089Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/c6/db8d13a1f8ab3f1eb08c88bd00fd62d44311e3456d1e85c0e59e0a0376e7/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335", size = 2139008, upload-time = "2025-10-14T10:23:04.539Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/d4/912e976a2dd0b49f31c98a060ca90b353f3b73ee3ea2fd0030412f6ac5ec/pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00", size = 2106739, upload-time = "2025-10-14T10:23:06.934Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/f0/66ec5a626c81eba326072d6ee2b127f8c139543f1bf609b4842978d37833/pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9", size = 1932549, upload-time = "2025-10-14T10:23:09.24Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/af/625626278ca801ea0a658c2dcf290dc9f21bb383098e99e7c6a029fccfc0/pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2", size = 2135093, upload-time = "2025-10-14T10:23:11.626Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/f6/2fba049f54e0f4975fef66be654c597a1d005320fa141863699180c7697d/pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258", size = 2187971, upload-time = "2025-10-14T10:23:14.437Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/80/65ab839a2dfcd3b949202f9d920c34f9de5a537c3646662bdf2f7d999680/pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347", size = 2147939, upload-time = "2025-10-14T10:23:16.831Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/58/627565d3d182ce6dfda18b8e1c841eede3629d59c9d7cbc1e12a03aeb328/pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa", size = 2311400, upload-time = "2025-10-14T10:23:19.234Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/06/8a84711162ad5a5f19a88cead37cca81b4b1f294f46260ef7334ae4f24d3/pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a", size = 2316840, upload-time = "2025-10-14T10:23:21.738Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/8b/b7bb512a4682a2f7fbfae152a755d37351743900226d29bd953aaf870eaa/pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d", size = 2149135, upload-time = "2025-10-14T10:23:24.379Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/7d/138e902ed6399b866f7cfe4435d22445e16fff888a1c00560d9dc79a780f/pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5", size = 2104721, upload-time = "2025-10-14T10:23:26.906Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/13/0525623cf94627f7b53b4c2034c81edc8491cbfc7c28d5447fa318791479/pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2", size = 1931608, upload-time = "2025-10-14T10:23:29.306Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/f9/744bc98137d6ef0a233f808bfc9b18cf94624bf30836a18d3b05d08bf418/pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd", size = 2132986, upload-time = "2025-10-14T10:23:32.057Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/c8/629e88920171173f6049386cc71f893dff03209a9ef32b4d2f7e7c264bcf/pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c", size = 2187516, upload-time = "2025-10-14T10:23:34.871Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/0f/4f2734688d98488782218ca61bcc118329bf5de05bb7fe3adc7dd79b0b86/pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405", size = 2146146, upload-time = "2025-10-14T10:23:37.342Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/f2/ab385dbd94a052c62224b99cf99002eee99dbec40e10006c78575aead256/pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8", size = 2311296, upload-time = "2025-10-14T10:23:40.145Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/8e/e4f12afe1beeb9823bba5375f8f258df0cc61b056b0195fb1cf9f62a1a58/pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308", size = 2315386, upload-time = "2025-10-14T10:23:42.624Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/f7/925f65d930802e3ea2eb4d5afa4cb8730c8dc0d2cb89a59dc4ed2fcb2d74/pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f", size = 2147775, upload-time = "2025-10-14T10:23:45.406Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1327,6 +1487,124 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "xxhash"
|
||||
version = "3.6.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/02/84/30869e01909fb37a6cc7e18688ee8bf1e42d57e7e0777636bd47524c43c7/xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6", size = 85160, upload-time = "2025-10-02T14:37:08.097Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/34/ee/f9f1d656ad168681bb0f6b092372c1e533c4416b8069b1896a175c46e484/xxhash-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87ff03d7e35c61435976554477a7f4cd1704c3596a89a8300d5ce7fc83874a71", size = 32845, upload-time = "2025-10-02T14:33:51.573Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/b1/93508d9460b292c74a09b83d16750c52a0ead89c51eea9951cb97a60d959/xxhash-3.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f572dfd3d0e2eb1a57511831cf6341242f5a9f8298a45862d085f5b93394a27d", size = 30807, upload-time = "2025-10-02T14:33:52.964Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/55/28c93a3662f2d200c70704efe74aab9640e824f8ce330d8d3943bf7c9b3c/xxhash-3.6.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:89952ea539566b9fed2bbd94e589672794b4286f342254fad28b149f9615fef8", size = 193786, upload-time = "2025-10-02T14:33:54.272Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/96/fec0be9bb4b8f5d9c57d76380a366f31a1781fb802f76fc7cda6c84893c7/xxhash-3.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e6f2ffb07a50b52465a1032c3cf1f4a5683f944acaca8a134a2f23674c2058", size = 212830, upload-time = "2025-10-02T14:33:55.706Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/a0/c706845ba77b9611f81fd2e93fad9859346b026e8445e76f8c6fd057cc6d/xxhash-3.6.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5b848ad6c16d308c3ac7ad4ba6bede80ed5df2ba8ed382f8932df63158dd4b2", size = 211606, upload-time = "2025-10-02T14:33:57.133Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/1e/164126a2999e5045f04a69257eea946c0dc3e86541b400d4385d646b53d7/xxhash-3.6.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a034590a727b44dd8ac5914236a7b8504144447a9682586c3327e935f33ec8cc", size = 444872, upload-time = "2025-10-02T14:33:58.446Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/4b/55ab404c56cd70a2cf5ecfe484838865d0fea5627365c6c8ca156bd09c8f/xxhash-3.6.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a8f1972e75ebdd161d7896743122834fe87378160c20e97f8b09166213bf8cc", size = 193217, upload-time = "2025-10-02T14:33:59.724Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/e6/52abf06bac316db33aa269091ae7311bd53cfc6f4b120ae77bac1b348091/xxhash-3.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ee34327b187f002a596d7b167ebc59a1b729e963ce645964bbc050d2f1b73d07", size = 210139, upload-time = "2025-10-02T14:34:02.041Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/37/db94d490b8691236d356bc249c08819cbcef9273a1a30acf1254ff9ce157/xxhash-3.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:339f518c3c7a850dd033ab416ea25a692759dc7478a71131fe8869010d2b75e4", size = 197669, upload-time = "2025-10-02T14:34:03.664Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/36/c4f219ef4a17a4f7a64ed3569bc2b5a9c8311abdb22249ac96093625b1a4/xxhash-3.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bf48889c9630542d4709192578aebbd836177c9f7a4a2778a7d6340107c65f06", size = 210018, upload-time = "2025-10-02T14:34:05.325Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/06/bfac889a374fc2fc439a69223d1750eed2e18a7db8514737ab630534fa08/xxhash-3.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5576b002a56207f640636056b4160a378fe36a58db73ae5c27a7ec8db35f71d4", size = 413058, upload-time = "2025-10-02T14:34:06.925Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/d1/555d8447e0dd32ad0930a249a522bb2e289f0d08b6b16204cfa42c1f5a0c/xxhash-3.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af1f3278bd02814d6dedc5dec397993b549d6f16c19379721e5a1d31e132c49b", size = 190628, upload-time = "2025-10-02T14:34:08.669Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/15/8751330b5186cedc4ed4b597989882ea05e0408b53fa47bcb46a6125bfc6/xxhash-3.6.0-cp310-cp310-win32.whl", hash = "sha256:aed058764db109dc9052720da65fafe84873b05eb8b07e5e653597951af57c3b", size = 30577, upload-time = "2025-10-02T14:34:10.234Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/cc/53f87e8b5871a6eb2ff7e89c48c66093bda2be52315a8161ddc54ea550c4/xxhash-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:e82da5670f2d0d98950317f82a0e4a0197150ff19a6df2ba40399c2a3b9ae5fb", size = 31487, upload-time = "2025-10-02T14:34:11.618Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/00/60f9ea3bb697667a14314d7269956f58bf56bb73864f8f8d52a3c2535e9a/xxhash-3.6.0-cp310-cp310-win_arm64.whl", hash = "sha256:4a082ffff8c6ac07707fb6b671caf7c6e020c75226c561830b73d862060f281d", size = 27863, upload-time = "2025-10-02T14:34:12.619Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/d4/cc2f0400e9154df4b9964249da78ebd72f318e35ccc425e9f403c392f22a/xxhash-3.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b47bbd8cf2d72797f3c2772eaaac0ded3d3af26481a26d7d7d41dc2d3c46b04a", size = 32844, upload-time = "2025-10-02T14:34:14.037Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/ec/1cc11cd13e26ea8bc3cb4af4eaadd8d46d5014aebb67be3f71fb0b68802a/xxhash-3.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2b6821e94346f96db75abaa6e255706fb06ebd530899ed76d32cd99f20dc52fa", size = 30809, upload-time = "2025-10-02T14:34:15.484Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/5f/19fe357ea348d98ca22f456f75a30ac0916b51c753e1f8b2e0e6fb884cce/xxhash-3.6.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d0a9751f71a1a65ce3584e9cae4467651c7e70c9d31017fa57574583a4540248", size = 194665, upload-time = "2025-10-02T14:34:16.541Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/3b/d1f1a8f5442a5fd8beedae110c5af7604dc37349a8e16519c13c19a9a2de/xxhash-3.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b29ee68625ab37b04c0b40c3fafdf24d2f75ccd778333cfb698f65f6c463f62", size = 213550, upload-time = "2025-10-02T14:34:17.878Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/ef/3a9b05eb527457d5db13a135a2ae1a26c80fecd624d20f3e8dcc4cb170f3/xxhash-3.6.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6812c25fe0d6c36a46ccb002f40f27ac903bf18af9f6dd8f9669cb4d176ab18f", size = 212384, upload-time = "2025-10-02T14:34:19.182Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/18/ccc194ee698c6c623acbf0f8c2969811a8a4b6185af5e824cd27b9e4fd3e/xxhash-3.6.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ccbff013972390b51a18ef1255ef5ac125c92dc9143b2d1909f59abc765540e", size = 445749, upload-time = "2025-10-02T14:34:20.659Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/86/cf2c0321dc3940a7aa73076f4fd677a0fb3e405cb297ead7d864fd90847e/xxhash-3.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:297b7fbf86c82c550e12e8fb71968b3f033d27b874276ba3624ea868c11165a8", size = 193880, upload-time = "2025-10-02T14:34:22.431Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/fb/96213c8560e6f948a1ecc9a7613f8032b19ee45f747f4fca4eb31bb6d6ed/xxhash-3.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dea26ae1eb293db089798d3973a5fc928a18fdd97cc8801226fae705b02b14b0", size = 210912, upload-time = "2025-10-02T14:34:23.937Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/aa/4395e669b0606a096d6788f40dbdf2b819d6773aa290c19e6e83cbfc312f/xxhash-3.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7a0b169aafb98f4284f73635a8e93f0735f9cbde17bd5ec332480484241aaa77", size = 198654, upload-time = "2025-10-02T14:34:25.644Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/74/b044fcd6b3d89e9b1b665924d85d3f400636c23590226feb1eb09e1176ce/xxhash-3.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:08d45aef063a4531b785cd72de4887766d01dc8f362a515693df349fdb825e0c", size = 210867, upload-time = "2025-10-02T14:34:27.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/fd/3ce73bf753b08cb19daee1eb14aa0d7fe331f8da9c02dd95316ddfe5275e/xxhash-3.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929142361a48ee07f09121fe9e96a84950e8d4df3bb298ca5d88061969f34d7b", size = 414012, upload-time = "2025-10-02T14:34:28.409Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/b3/5a4241309217c5c876f156b10778f3ab3af7ba7e3259e6d5f5c7d0129eb2/xxhash-3.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:51312c768403d8540487dbbfb557454cfc55589bbde6424456951f7fcd4facb3", size = 191409, upload-time = "2025-10-02T14:34:29.696Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/01/99bfbc15fb9abb9a72b088c1d95219fc4782b7d01fc835bd5744d66dd0b8/xxhash-3.6.0-cp311-cp311-win32.whl", hash = "sha256:d1927a69feddc24c987b337ce81ac15c4720955b667fe9b588e02254b80446fd", size = 30574, upload-time = "2025-10-02T14:34:31.028Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/79/9d24d7f53819fe301b231044ea362ce64e86c74f6e8c8e51320de248b3e5/xxhash-3.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:26734cdc2d4ffe449b41d186bbeac416f704a482ed835d375a5c0cb02bc63fef", size = 31481, upload-time = "2025-10-02T14:34:32.062Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/4e/15cd0e3e8772071344eab2961ce83f6e485111fed8beb491a3f1ce100270/xxhash-3.6.0-cp311-cp311-win_arm64.whl", hash = "sha256:d72f67ef8bf36e05f5b6c65e8524f265bd61071471cd4cf1d36743ebeeeb06b7", size = 27861, upload-time = "2025-10-02T14:34:33.555Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/07/d9412f3d7d462347e4511181dea65e47e0d0e16e26fbee2ea86a2aefb657/xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c", size = 32744, upload-time = "2025-10-02T14:34:34.622Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/35/0429ee11d035fc33abe32dca1b2b69e8c18d236547b9a9b72c1929189b9a/xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204", size = 30816, upload-time = "2025-10-02T14:34:36.043Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/f2/57eb99aa0f7d98624c0932c5b9a170e1806406cdbcdb510546634a1359e0/xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490", size = 194035, upload-time = "2025-10-02T14:34:37.354Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/ed/6224ba353690d73af7a3f1c7cdb1fc1b002e38f783cb991ae338e1eb3d79/xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2", size = 212914, upload-time = "2025-10-02T14:34:38.6Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/86/fb6b6130d8dd6b8942cc17ab4d90e223653a89aa32ad2776f8af7064ed13/xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa", size = 212163, upload-time = "2025-10-02T14:34:39.872Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/dc/e84875682b0593e884ad73b2d40767b5790d417bde603cceb6878901d647/xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0", size = 445411, upload-time = "2025-10-02T14:34:41.569Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/4f/426f91b96701ec2f37bb2b8cec664eff4f658a11f3fa9d94f0a887ea6d2b/xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2", size = 193883, upload-time = "2025-10-02T14:34:43.249Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/5a/ddbb83eee8e28b778eacfc5a85c969673e4023cdeedcfcef61f36731610b/xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9", size = 210392, upload-time = "2025-10-02T14:34:45.042Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/c2/ff69efd07c8c074ccdf0a4f36fcdd3d27363665bcdf4ba399abebe643465/xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e", size = 197898, upload-time = "2025-10-02T14:34:46.302Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/58/ca/faa05ac19b3b622c7c9317ac3e23954187516298a091eb02c976d0d3dd45/xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374", size = 210655, upload-time = "2025-10-02T14:34:47.571Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/7a/06aa7482345480cc0cb597f5c875b11a82c3953f534394f620b0be2f700c/xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d", size = 414001, upload-time = "2025-10-02T14:34:49.273Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/07/63ffb386cd47029aa2916b3d2f454e6cc5b9f5c5ada3790377d5430084e7/xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae", size = 191431, upload-time = "2025-10-02T14:34:50.798Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/93/14fde614cadb4ddf5e7cebf8918b7e8fac5ae7861c1875964f17e678205c/xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb", size = 30617, upload-time = "2025-10-02T14:34:51.954Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/5d/0d125536cbe7565a83d06e43783389ecae0c0f2ed037b48ede185de477c0/xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c", size = 31534, upload-time = "2025-10-02T14:34:53.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/85/6ec269b0952ec7e36ba019125982cf11d91256a778c7c3f98a4c5043d283/xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829", size = 27876, upload-time = "2025-10-02T14:34:54.371Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/76/35d05267ac82f53ae9b0e554da7c5e281ee61f3cad44c743f0fcd354f211/xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec", size = 32738, upload-time = "2025-10-02T14:34:55.839Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/a8/3fbce1cd96534a95e35d5120637bf29b0d7f5d8fa2f6374e31b4156dd419/xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1", size = 30821, upload-time = "2025-10-02T14:34:57.219Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/ea/d387530ca7ecfa183cb358027f1833297c6ac6098223fd14f9782cd0015c/xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6", size = 194127, upload-time = "2025-10-02T14:34:59.21Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/0c/71435dcb99874b09a43b8d7c54071e600a7481e42b3e3ce1eb5226a5711a/xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263", size = 212975, upload-time = "2025-10-02T14:35:00.816Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/7a/c2b3d071e4bb4a90b7057228a99b10d51744878f4a8a6dd643c8bd897620/xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546", size = 212241, upload-time = "2025-10-02T14:35:02.207Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/81/5f/640b6eac0128e215f177df99eadcd0f1b7c42c274ab6a394a05059694c5a/xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89", size = 445471, upload-time = "2025-10-02T14:35:03.61Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/1e/3c3d3ef071b051cc3abbe3721ffb8365033a172613c04af2da89d5548a87/xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d", size = 193936, upload-time = "2025-10-02T14:35:05.013Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/bd/4a5f68381939219abfe1c22a9e3a5854a4f6f6f3c4983a87d255f21f2e5d/xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7", size = 210440, upload-time = "2025-10-02T14:35:06.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/37/b80fe3d5cfb9faff01a02121a0f4d565eb7237e9e5fc66e73017e74dcd36/xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db", size = 197990, upload-time = "2025-10-02T14:35:07.735Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/fd/2c0a00c97b9e18f72e1f240ad4e8f8a90fd9d408289ba9c7c495ed7dc05c/xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42", size = 210689, upload-time = "2025-10-02T14:35:09.438Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/86/5dd8076a926b9a95db3206aba20d89a7fc14dd5aac16e5c4de4b56033140/xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11", size = 414068, upload-time = "2025-10-02T14:35:11.162Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/af/3c/0bb129170ee8f3650f08e993baee550a09593462a5cddd8e44d0011102b1/xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd", size = 191495, upload-time = "2025-10-02T14:35:12.971Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/3a/6797e0114c21d1725e2577508e24006fd7ff1d8c0c502d3b52e45c1771d8/xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799", size = 30620, upload-time = "2025-10-02T14:35:14.129Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/15/9bc32671e9a38b413a76d24722a2bf8784a132c043063a8f5152d390b0f9/xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392", size = 31542, upload-time = "2025-10-02T14:35:15.21Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/c5/cc01e4f6188656e56112d6a8e0dfe298a16934b8c47a247236549a3f7695/xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6", size = 27880, upload-time = "2025-10-02T14:35:16.315Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/30/25e5321c8732759e930c555176d37e24ab84365482d257c3b16362235212/xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702", size = 32956, upload-time = "2025-10-02T14:35:17.413Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/3c/0573299560d7d9f8ab1838f1efc021a280b5ae5ae2e849034ef3dee18810/xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db", size = 31072, upload-time = "2025-10-02T14:35:18.844Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/1c/52d83a06e417cd9d4137722693424885cc9878249beb3a7c829e74bf7ce9/xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54", size = 196409, upload-time = "2025-10-02T14:35:20.31Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/8e/c6d158d12a79bbd0b878f8355432075fc82759e356ab5a111463422a239b/xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f", size = 215736, upload-time = "2025-10-02T14:35:21.616Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/68/c4c80614716345d55071a396cf03d06e34b5f4917a467faf43083c995155/xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5", size = 214833, upload-time = "2025-10-02T14:35:23.32Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/e9/ae27c8ffec8b953efa84c7c4a6c6802c263d587b9fc0d6e7cea64e08c3af/xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1", size = 448348, upload-time = "2025-10-02T14:35:25.111Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/6b/33e21afb1b5b3f46b74b6bd1913639066af218d704cc0941404ca717fc57/xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee", size = 196070, upload-time = "2025-10-02T14:35:26.586Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/b6/fcabd337bc5fa624e7203aa0fa7d0c49eed22f72e93229431752bddc83d9/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd", size = 212907, upload-time = "2025-10-02T14:35:28.087Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/d3/9ee6160e644d660fcf176c5825e61411c7f62648728f69c79ba237250143/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729", size = 200839, upload-time = "2025-10-02T14:35:29.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/98/e8de5baa5109394baf5118f5e72ab21a86387c4f89b0e77ef3e2f6b0327b/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292", size = 213304, upload-time = "2025-10-02T14:35:31.222Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/1d/71056535dec5c3177eeb53e38e3d367dd1d16e024e63b1cee208d572a033/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf", size = 416930, upload-time = "2025-10-02T14:35:32.517Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/6c/5cbde9de2cd967c322e651c65c543700b19e7ae3e0aae8ece3469bf9683d/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033", size = 193787, upload-time = "2025-10-02T14:35:33.827Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/fa/0172e350361d61febcea941b0cc541d6e6c8d65d153e85f850a7b256ff8a/xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec", size = 30916, upload-time = "2025-10-02T14:35:35.107Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/e6/e8cf858a2b19d6d45820f072eff1bea413910592ff17157cabc5f1227a16/xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8", size = 31799, upload-time = "2025-10-02T14:35:36.165Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/15/064b197e855bfb7b343210e82490ae672f8bc7cdf3ddb02e92f64304ee8a/xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746", size = 28044, upload-time = "2025-10-02T14:35:37.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/5e/0138bc4484ea9b897864d59fce9be9086030825bc778b76cb5a33a906d37/xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e", size = 32754, upload-time = "2025-10-02T14:35:38.245Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/d7/5dac2eb2ec75fd771957a13e5dda560efb2176d5203f39502a5fc571f899/xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405", size = 30846, upload-time = "2025-10-02T14:35:39.6Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/71/8bc5be2bb00deb5682e92e8da955ebe5fa982da13a69da5a40a4c8db12fb/xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3", size = 194343, upload-time = "2025-10-02T14:35:40.69Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/3b/52badfb2aecec2c377ddf1ae75f55db3ba2d321c5e164f14461c90837ef3/xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6", size = 213074, upload-time = "2025-10-02T14:35:42.29Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/2b/ae46b4e9b92e537fa30d03dbc19cdae57ed407e9c26d163895e968e3de85/xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063", size = 212388, upload-time = "2025-10-02T14:35:43.929Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/80/49f88d3afc724b4ac7fbd664c8452d6db51b49915be48c6982659e0e7942/xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7", size = 445614, upload-time = "2025-10-02T14:35:45.216Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/ba/603ce3961e339413543d8cd44f21f2c80e2a7c5cfe692a7b1f2cccf58f3c/xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b", size = 194024, upload-time = "2025-10-02T14:35:46.959Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/d1/8e225ff7113bf81545cfdcd79eef124a7b7064a0bba53605ff39590b95c2/xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd", size = 210541, upload-time = "2025-10-02T14:35:48.301Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/58/0f89d149f0bad89def1a8dd38feb50ccdeb643d9797ec84707091d4cb494/xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0", size = 198305, upload-time = "2025-10-02T14:35:49.584Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/38/5eab81580703c4df93feb5f32ff8fa7fe1e2c51c1f183ee4e48d4bb9d3d7/xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152", size = 210848, upload-time = "2025-10-02T14:35:50.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/6b/953dc4b05c3ce678abca756416e4c130d2382f877a9c30a20d08ee6a77c0/xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11", size = 414142, upload-time = "2025-10-02T14:35:52.15Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/a9/238ec0d4e81a10eb5026d4a6972677cbc898ba6c8b9dbaec12ae001b1b35/xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5", size = 191547, upload-time = "2025-10-02T14:35:53.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/ee/3cf8589e06c2164ac77c3bf0aa127012801128f1feebf2a079272da5737c/xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f", size = 31214, upload-time = "2025-10-02T14:35:54.746Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/5d/a19552fbc6ad4cb54ff953c3908bbc095f4a921bc569433d791f755186f1/xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad", size = 32290, upload-time = "2025-10-02T14:35:55.791Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/11/dafa0643bc30442c887b55baf8e73353a344ee89c1901b5a5c54a6c17d39/xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679", size = 28795, upload-time = "2025-10-02T14:35:57.162Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/db/0e99732ed7f64182aef4a6fb145e1a295558deec2a746265dcdec12d191e/xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4", size = 32955, upload-time = "2025-10-02T14:35:58.267Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/f4/2a7c3c68e564a099becfa44bb3d398810cc0ff6749b0d3cb8ccb93f23c14/xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67", size = 31072, upload-time = "2025-10-02T14:35:59.382Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/d9/72a29cddc7250e8a5819dad5d466facb5dc4c802ce120645630149127e73/xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad", size = 196579, upload-time = "2025-10-02T14:36:00.838Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/63/93/b21590e1e381040e2ca305a884d89e1c345b347404f7780f07f2cdd47ef4/xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b", size = 215854, upload-time = "2025-10-02T14:36:02.207Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/b8/edab8a7d4fa14e924b29be877d54155dcbd8b80be85ea00d2be3413a9ed4/xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b", size = 214965, upload-time = "2025-10-02T14:36:03.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/67/dfa980ac7f0d509d54ea0d5a486d2bb4b80c3f1bb22b66e6a05d3efaf6c0/xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca", size = 448484, upload-time = "2025-10-02T14:36:04.828Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/63/8ffc2cc97e811c0ca5d00ab36604b3ea6f4254f20b7bc658ca825ce6c954/xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a", size = 196162, upload-time = "2025-10-02T14:36:06.182Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/77/07f0e7a3edd11a6097e990f6e5b815b6592459cb16dae990d967693e6ea9/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99", size = 213007, upload-time = "2025-10-02T14:36:07.733Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/d8/bc5fa0d152837117eb0bef6f83f956c509332ce133c91c63ce07ee7c4873/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3", size = 200956, upload-time = "2025-10-02T14:36:09.106Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/a5/d749334130de9411783873e9b98ecc46688dad5db64ca6e04b02acc8b473/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6", size = 213401, upload-time = "2025-10-02T14:36:10.585Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/72/abed959c956a4bfc72b58c0384bb7940663c678127538634d896b1195c10/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93", size = 417083, upload-time = "2025-10-02T14:36:12.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/b3/62fd2b586283b7d7d665fb98e266decadf31f058f1cf6c478741f68af0cb/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518", size = 193913, upload-time = "2025-10-02T14:36:14.025Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/9a/c19c42c5b3f5a4aad748a6d5b4f23df3bed7ee5445accc65a0fb3ff03953/xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119", size = 31586, upload-time = "2025-10-02T14:36:15.603Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/d6/4cc450345be9924fd5dc8c590ceda1db5b43a0a889587b0ae81a95511360/xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f", size = 32526, upload-time = "2025-10-02T14:36:16.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/c9/7243eb3f9eaabd1a88a5a5acadf06df2d83b100c62684b7425c6a11bcaa8/xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95", size = 28898, upload-time = "2025-10-02T14:36:17.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/1e/8aec23647a34a249f62e2398c42955acd9b4c6ed5cf08cbea94dc46f78d2/xxhash-3.6.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f7b7e2ec26c1666ad5fc9dbfa426a6a3367ceaf79db5dd76264659d509d73b0", size = 30662, upload-time = "2025-10-02T14:37:01.743Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/0b/b14510b38ba91caf43006209db846a696ceea6a847a0c9ba0a5b1adc53d6/xxhash-3.6.0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5dc1e14d14fa0f5789ec29a7062004b5933964bb9b02aae6622b8f530dc40296", size = 41056, upload-time = "2025-10-02T14:37:02.879Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/55/15a7b8a56590e66ccd374bbfa3f9ffc45b810886c8c3b614e3f90bd2367c/xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:881b47fc47e051b37d94d13e7455131054b56749b91b508b0907eb07900d1c13", size = 36251, upload-time = "2025-10-02T14:37:04.44Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/b2/5ac99a041a29e58e95f907876b04f7067a0242cb85b5f39e726153981503/xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6dc31591899f5e5666f04cc2e529e69b4072827085c1ef15294d91a004bc1bd", size = 32481, upload-time = "2025-10-02T14:37:05.869Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/d9/8d95e906764a386a3d3b596f3c68bb63687dfca806373509f51ce8eea81f/xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d", size = 31565, upload-time = "2025-10-02T14:37:06.966Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstandard"
|
||||
version = "0.25.0"
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
# 🦜🍎️ LangChain Core
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypi.org/project/langchain-core/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-core)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
|
||||
## Quick Install
|
||||
|
||||
@@ -9,16 +16,14 @@
|
||||
pip install langchain-core
|
||||
```
|
||||
|
||||
## What is it?
|
||||
## 🤔 What is this?
|
||||
|
||||
LangChain Core contains the base abstractions that power the the LangChain ecosystem.
|
||||
LangChain Core contains the base abstractions that power the LangChain ecosystem.
|
||||
|
||||
These abstractions are designed to be as modular and simple as possible.
|
||||
|
||||
The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
|
||||
|
||||
For full documentation see the [API reference](https://reference.langchain.com/python/).
|
||||
|
||||
## ⛰️ Why build on top of LangChain Core?
|
||||
|
||||
The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
|
||||
@@ -27,12 +32,16 @@ The LangChain ecosystem is built on top of `langchain-core`. Some of the benefit
|
||||
- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
|
||||
- **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_core/).
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning Policy](https://docs.langchain.com/oss/python/versioning).
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing).
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -84,7 +84,7 @@ class AgentAction(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "agent"]`
|
||||
@@ -112,7 +112,7 @@ class AgentActionMessageLog(AgentAction):
|
||||
if (tool, tool_input) cannot be used to fully recreate the LLM
|
||||
prediction, and you need that LLM prediction (for future agent iteration).
|
||||
Compared to `log`, this is useful when the underlying LLM is a
|
||||
ChatModel (and therefore returns messages rather than a string)."""
|
||||
chat model (and therefore returns messages rather than a string)."""
|
||||
# Ignoring type because we're overriding the type from AgentAction.
|
||||
# And this is the correct thing to do in this case.
|
||||
# The type literal is used for serialization purposes.
|
||||
@@ -161,7 +161,7 @@ class AgentFinish(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "agent"]`
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
"""Cache classes.
|
||||
"""Optional caching layer for language models.
|
||||
|
||||
Distinct from provider-based [prompt caching](https://docs.langchain.com/oss/python/langchain/models#prompt-caching).
|
||||
|
||||
!!! warning
|
||||
Beta Feature!
|
||||
This is a beta feature! Please be wary of deploying experimental code to production
|
||||
unless you've taken appropriate precautions.
|
||||
|
||||
**Cache** provides an optional caching layer for LLMs.
|
||||
A cache is useful for two reasons:
|
||||
|
||||
Cache is useful for two reasons:
|
||||
|
||||
- It can save you money by reducing the number of API calls you make to the LLM
|
||||
1. It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
- It can speed up your application by reducing the number of API calls you make
|
||||
to the LLM provider.
|
||||
|
||||
Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
2. It can speed up your application by reducing the number of API calls you make to the
|
||||
LLM provider.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -34,8 +33,8 @@ class BaseCache(ABC):
|
||||
|
||||
The cache interface consists of the following methods:
|
||||
|
||||
- lookup: Look up a value based on a prompt and llm_string.
|
||||
- update: Update the cache based on a prompt and llm_string.
|
||||
- lookup: Look up a value based on a prompt and `llm_string`.
|
||||
- update: Update the cache based on a prompt and `llm_string`.
|
||||
- clear: Clear the cache.
|
||||
|
||||
In addition, the cache interface provides an async version of each method.
|
||||
@@ -47,14 +46,14 @@ class BaseCache(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -63,27 +62,27 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the lookup method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
|
||||
@@ -92,14 +91,14 @@ class BaseCache(ABC):
|
||||
"""Clear cache that can take additional keyword arguments."""
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -108,29 +107,29 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.lookup, prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the look up method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.update, prompt, llm_string, return_val)
|
||||
@@ -150,10 +149,9 @@ class InMemoryCache(BaseCache):
|
||||
maxsize: The maximum number of items to store in the cache.
|
||||
If `None`, the cache has no maximum size.
|
||||
If the cache exceeds the maximum size, the oldest items are removed.
|
||||
Default is None.
|
||||
|
||||
Raises:
|
||||
ValueError: If maxsize is less than or equal to 0.
|
||||
ValueError: If `maxsize` is less than or equal to `0`.
|
||||
"""
|
||||
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
if maxsize is not None and maxsize <= 0:
|
||||
@@ -162,28 +160,28 @@ class InMemoryCache(BaseCache):
|
||||
self._maxsize = maxsize
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self._cache.get((prompt, llm_string), None)
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
if self._maxsize is not None and len(self._cache) == self._maxsize:
|
||||
@@ -196,30 +194,30 @@ class InMemoryCache(BaseCache):
|
||||
self._cache = {}
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self.lookup(prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
self.update(prompt, llm_string, return_val)
|
||||
|
||||
@@ -420,8 +420,6 @@ class RunManagerMixin:
|
||||
(includes inherited tags).
|
||||
metadata: The metadata associated with the custom event
|
||||
(includes inherited metadata).
|
||||
|
||||
!!! version-added "Added in version 0.2.15"
|
||||
"""
|
||||
|
||||
|
||||
@@ -882,8 +880,6 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
(includes inherited tags).
|
||||
metadata: The metadata associated with the custom event
|
||||
(includes inherited metadata).
|
||||
|
||||
!!! version-added "Added in version 0.2.15"
|
||||
"""
|
||||
|
||||
|
||||
@@ -1001,7 +997,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
handler: The handler to add.
|
||||
inherit: Whether to inherit the handler. Default is True.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
if handler not in self.handlers:
|
||||
self.handlers.append(handler)
|
||||
@@ -1028,7 +1024,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
handlers: The handlers to set.
|
||||
inherit: Whether to inherit the handlers. Default is True.
|
||||
inherit: Whether to inherit the handlers.
|
||||
"""
|
||||
self.handlers = []
|
||||
self.inheritable_handlers = []
|
||||
@@ -1044,7 +1040,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
handler: The handler to set.
|
||||
inherit: Whether to inherit the handler. Default is True.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
self.set_handlers([handler], inherit=inherit)
|
||||
|
||||
@@ -1057,7 +1053,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
tags: The tags to add.
|
||||
inherit: Whether to inherit the tags. Default is True.
|
||||
inherit: Whether to inherit the tags.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1087,7 +1083,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
metadata: The metadata to add.
|
||||
inherit: Whether to inherit the metadata. Default is True.
|
||||
inherit: Whether to inherit the metadata.
|
||||
"""
|
||||
self.metadata.update(metadata)
|
||||
if inherit:
|
||||
|
||||
@@ -47,7 +47,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
filename: The file path to write to.
|
||||
mode: The file open mode. Defaults to `'a'` (append).
|
||||
color: Default color for text output. Defaults to `None`.
|
||||
color: Default color for text output.
|
||||
|
||||
!!! note
|
||||
When not used as a context manager, a deprecation warning will be issued
|
||||
@@ -64,7 +64,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
filename: Path to the output file.
|
||||
mode: File open mode (e.g., `'w'`, `'a'`, `'x'`). Defaults to `'a'`.
|
||||
color: Default text color for output. Defaults to `None`.
|
||||
color: Default text color for output.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
@@ -132,7 +132,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
text: The text to write to the file.
|
||||
color: Optional color for the text. Defaults to `self.color`.
|
||||
end: String appended after the text. Defaults to `""`.
|
||||
end: String appended after the text.
|
||||
file: Optional file to write to. Defaults to `self.file`.
|
||||
|
||||
Raises:
|
||||
@@ -239,7 +239,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
text: The text to write.
|
||||
color: Color override for this specific output. If `None`, uses
|
||||
`self.color`.
|
||||
end: String appended after the text. Defaults to `""`.
|
||||
end: String appended after the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
"""
|
||||
|
||||
@@ -79,13 +79,13 @@ def trace_as_chain_group(
|
||||
|
||||
Args:
|
||||
group_name: The name of the chain group.
|
||||
callback_manager: The callback manager to use. Defaults to `None`.
|
||||
inputs: The inputs to the chain group. Defaults to `None`.
|
||||
project_name: The name of the project. Defaults to `None`.
|
||||
example_id: The ID of the example. Defaults to `None`.
|
||||
callback_manager: The callback manager to use.
|
||||
inputs: The inputs to the chain group.
|
||||
project_name: The name of the project.
|
||||
example_id: The ID of the example.
|
||||
run_id: The ID of the run.
|
||||
tags: The inheritable tags to apply to all runs. Defaults to `None`.
|
||||
metadata: The metadata to apply to all runs. Defaults to `None`.
|
||||
tags: The inheritable tags to apply to all runs.
|
||||
metadata: The metadata to apply to all runs.
|
||||
|
||||
!!! note
|
||||
Must have `LANGCHAIN_TRACING_V2` env var set to true to see the trace in
|
||||
@@ -155,13 +155,13 @@ async def atrace_as_chain_group(
|
||||
Args:
|
||||
group_name: The name of the chain group.
|
||||
callback_manager: The async callback manager to use,
|
||||
which manages tracing and other callback behavior. Defaults to `None`.
|
||||
inputs: The inputs to the chain group. Defaults to `None`.
|
||||
project_name: The name of the project. Defaults to `None`.
|
||||
example_id: The ID of the example. Defaults to `None`.
|
||||
which manages tracing and other callback behavior.
|
||||
inputs: The inputs to the chain group.
|
||||
project_name: The name of the project.
|
||||
example_id: The ID of the example.
|
||||
run_id: The ID of the run.
|
||||
tags: The inheritable tags to apply to all runs. Defaults to `None`.
|
||||
metadata: The metadata to apply to all runs. Defaults to `None`.
|
||||
tags: The inheritable tags to apply to all runs.
|
||||
metadata: The metadata to apply to all runs.
|
||||
|
||||
Yields:
|
||||
The async callback manager for the chain group.
|
||||
@@ -462,11 +462,11 @@ class BaseRunManager(RunManagerMixin):
|
||||
run_id: The ID of the run.
|
||||
handlers: The list of handlers.
|
||||
inheritable_handlers: The list of inheritable handlers.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
tags: The list of tags. Defaults to `None`.
|
||||
inheritable_tags: The list of inheritable tags. Defaults to `None`.
|
||||
metadata: The metadata. Defaults to `None`.
|
||||
inheritable_metadata: The inheritable metadata. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run.
|
||||
tags: The list of tags.
|
||||
inheritable_tags: The list of inheritable tags.
|
||||
metadata: The metadata.
|
||||
inheritable_metadata: The inheritable metadata.
|
||||
|
||||
"""
|
||||
self.run_id = run_id
|
||||
@@ -557,7 +557,7 @@ class ParentRunManager(RunManager):
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag: The tag for the child callback manager. Defaults to `None`.
|
||||
tag: The tag for the child callback manager.
|
||||
|
||||
Returns:
|
||||
The child callback manager.
|
||||
@@ -641,7 +641,7 @@ class AsyncParentRunManager(AsyncRunManager):
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag: The tag for the child callback manager. Defaults to `None`.
|
||||
tag: The tag for the child callback manager.
|
||||
|
||||
Returns:
|
||||
The child callback manager.
|
||||
@@ -1303,7 +1303,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
prompts: The list of prompts.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1354,7 +1354,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
messages: The list of messages.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1408,7 +1408,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1457,8 +1457,8 @@ class CallbackManager(BaseCallbackManager):
|
||||
serialized: Serialized representation of the tool.
|
||||
input_str: The input to the tool as a string.
|
||||
Non-string inputs are cast to strings.
|
||||
run_id: ID for the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: ID for the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
inputs: The original input to the tool if provided.
|
||||
Recommended for usage instead of input_str when the original
|
||||
input is needed.
|
||||
@@ -1512,8 +1512,8 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1562,13 +1562,10 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
|
||||
Raises:
|
||||
ValueError: If additional keyword arguments are passed.
|
||||
|
||||
!!! version-added "Added in version 0.2.14"
|
||||
|
||||
"""
|
||||
if not self.handlers:
|
||||
return
|
||||
@@ -1782,7 +1779,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
prompts: The list of prompts.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1870,7 +1867,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
messages: The list of messages.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1941,7 +1938,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1988,8 +1985,8 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized tool.
|
||||
input_str: The input to the tool.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -2038,12 +2035,10 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
|
||||
Raises:
|
||||
ValueError: If additional keyword arguments are passed.
|
||||
|
||||
!!! version-added "Added in version 0.2.14"
|
||||
"""
|
||||
if not self.handlers:
|
||||
return
|
||||
@@ -2082,8 +2077,8 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -2555,9 +2550,6 @@ async def adispatch_custom_event(
|
||||
This is due to a limitation in asyncio for python <= 3.10 that prevents
|
||||
LangChain from automatically propagating the config object on the user's
|
||||
behalf.
|
||||
|
||||
!!! version-added "Added in version 0.2.15"
|
||||
|
||||
"""
|
||||
# Import locally to prevent circular imports.
|
||||
from langchain_core.runnables.config import ( # noqa: PLC0415
|
||||
@@ -2630,9 +2622,6 @@ def dispatch_custom_event(
|
||||
foo_ = RunnableLambda(foo)
|
||||
foo_.invoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]})
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.15"
|
||||
|
||||
"""
|
||||
# Import locally to prevent circular imports.
|
||||
from langchain_core.runnables.config import ( # noqa: PLC0415
|
||||
|
||||
@@ -20,7 +20,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Initialize callback handler.
|
||||
|
||||
Args:
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
"""
|
||||
self.color = color
|
||||
|
||||
@@ -61,7 +61,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
action: The agent action.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(action.log, color=color or self.color)
|
||||
@@ -79,9 +79,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
output: The output to print.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
observation_prefix: The observation prefix. Defaults to `None`.
|
||||
llm_prefix: The LLM prefix. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
observation_prefix: The observation prefix.
|
||||
llm_prefix: The LLM prefix.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
output = str(output)
|
||||
@@ -103,8 +103,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
text: The text to print.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
end: The end character to use. Defaults to "".
|
||||
color: The color to use for the text.
|
||||
end: The end character to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(text, color=color or self.color, end=end)
|
||||
@@ -117,7 +117,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
finish: The agent finish.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(finish.log, color=color or self.color, end="\n")
|
||||
|
||||
@@ -153,7 +153,7 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the sub-class has not implemented an efficient
|
||||
add_messages method.
|
||||
`add_messages` method.
|
||||
"""
|
||||
if type(self).add_messages != BaseChatMessageHistory.add_messages:
|
||||
# This means that the sub-class has implemented an efficient add_messages
|
||||
|
||||
@@ -35,38 +35,38 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Sub-classes should not implement this method directly. Instead, they
|
||||
# should implement the lazy load method.
|
||||
def load(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return list(self.lazy_load())
|
||||
|
||||
async def aload(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return [document async for document in self.alazy_load()]
|
||||
|
||||
def load_and_split(
|
||||
self, text_splitter: TextSplitter | None = None
|
||||
) -> list[Document]:
|
||||
"""Load Documents and split into chunks. Chunks are returned as Documents.
|
||||
"""Load Documents and split into chunks. Chunks are returned as `Document`.
|
||||
|
||||
Do not override this method. It should be considered to be deprecated!
|
||||
|
||||
Args:
|
||||
text_splitter: TextSplitter instance to use for splitting documents.
|
||||
Defaults to RecursiveCharacterTextSplitter.
|
||||
text_splitter: `TextSplitter` instance to use for splitting documents.
|
||||
Defaults to `RecursiveCharacterTextSplitter`.
|
||||
|
||||
Raises:
|
||||
ImportError: If langchain-text-splitters is not installed
|
||||
and no text_splitter is provided.
|
||||
ImportError: If `langchain-text-splitters` is not installed
|
||||
and no `text_splitter` is provided.
|
||||
|
||||
Returns:
|
||||
List of Documents.
|
||||
List of `Document`.
|
||||
"""
|
||||
if text_splitter is None:
|
||||
if not _HAS_TEXT_SPLITTERS:
|
||||
@@ -86,10 +86,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Attention: This method will be upgraded into an abstractmethod once it's
|
||||
# implemented in all the existing subclasses.
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
if type(self).load != BaseLoader.load:
|
||||
return iter(self.load())
|
||||
@@ -97,10 +97,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
async def alazy_load(self) -> AsyncIterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
iterator = await run_in_executor(None, self.lazy_load)
|
||||
done = object()
|
||||
@@ -115,7 +115,7 @@ class BaseBlobParser(ABC):
|
||||
"""Abstract interface for blob parsers.
|
||||
|
||||
A blob parser provides a way to parse raw data stored in a blob into one
|
||||
or more documents.
|
||||
or more `Document` objects.
|
||||
|
||||
The parser can be composed with blob loaders, making it easy to reuse
|
||||
a parser independent of how the blob was originally loaded.
|
||||
@@ -128,25 +128,25 @@ class BaseBlobParser(ABC):
|
||||
Subclasses are required to implement this method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
Generator of documents
|
||||
Generator of `Document` objects
|
||||
"""
|
||||
|
||||
def parse(self, blob: Blob) -> list[Document]:
|
||||
"""Eagerly parse the blob into a document or documents.
|
||||
"""Eagerly parse the blob into a `Document` or `Document` objects.
|
||||
|
||||
This is a convenience method for interactive development environment.
|
||||
|
||||
Production applications should favor the lazy_parse method instead.
|
||||
Production applications should favor the `lazy_parse` method instead.
|
||||
|
||||
Subclasses should generally not over-ride this parse method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
List of documents
|
||||
List of `Document` objects
|
||||
"""
|
||||
return list(self.lazy_parse(blob))
|
||||
|
||||
@@ -34,9 +34,6 @@ class LangSmithLoader(BaseLoader):
|
||||
```python
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.34"
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -60,15 +57,15 @@ class LangSmithLoader(BaseLoader):
|
||||
"""Create a LangSmith loader.
|
||||
|
||||
Args:
|
||||
dataset_id: The ID of the dataset to filter by. Defaults to `None`.
|
||||
dataset_name: The name of the dataset to filter by. Defaults to `None`.
|
||||
dataset_id: The ID of the dataset to filter by.
|
||||
dataset_name: The name of the dataset to filter by.
|
||||
content_key: The inputs key to set as Document page content. `'.'` characters
|
||||
are interpreted as nested keys. E.g. `content_key="first.second"` will
|
||||
result in
|
||||
`Document(page_content=format_content(example.inputs["first"]["second"]))`
|
||||
format_content: Function for converting the content extracted from the example
|
||||
inputs into a string. Defaults to JSON-encoding the contents.
|
||||
example_ids: The IDs of the examples to filter by. Defaults to `None`.
|
||||
example_ids: The IDs of the examples to filter by.
|
||||
as_of: The dataset version tag OR
|
||||
timestamp to retrieve the examples as of.
|
||||
Response examples will only be those that were present at the time
|
||||
@@ -76,10 +73,10 @@ class LangSmithLoader(BaseLoader):
|
||||
splits: A list of dataset splits, which are
|
||||
divisions of your dataset such as 'train', 'test', or 'validation'.
|
||||
Returns examples only from the specified splits.
|
||||
inline_s3_urls: Whether to inline S3 URLs. Defaults to `True`.
|
||||
offset: The offset to start from. Defaults to 0.
|
||||
inline_s3_urls: Whether to inline S3 URLs.
|
||||
offset: The offset to start from.
|
||||
limit: The maximum number of examples to return.
|
||||
metadata: Metadata to filter by. Defaults to `None`.
|
||||
metadata: Metadata to filter by.
|
||||
filter: A structured filter string to apply to the examples.
|
||||
client: LangSmith Client. If not provided will be initialized from below args.
|
||||
client_kwargs: Keyword args to pass to LangSmith client init. Should only be
|
||||
|
||||
@@ -38,8 +38,6 @@ class BaseMedia(Serializable):
|
||||
|
||||
Ideally this should be unique across the document collection and formatted
|
||||
as a UUID, but this will not be enforced.
|
||||
|
||||
!!! version-added "Added in version 0.2.11"
|
||||
"""
|
||||
|
||||
metadata: dict = Field(default_factory=dict)
|
||||
@@ -57,51 +55,51 @@ class Blob(BaseMedia):
|
||||
|
||||
Example: Initialize a blob from in-memory data
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
|
||||
Example: Load from memory and specify mime-type and metadata
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
```
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
```
|
||||
|
||||
Example: Load the blob from a file
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
"""
|
||||
|
||||
data: bytes | str | None = None
|
||||
@@ -111,7 +109,7 @@ class Blob(BaseMedia):
|
||||
encoding: str = "utf-8"
|
||||
"""Encoding to use if decoding the bytes into a string.
|
||||
|
||||
Use utf-8 as default encoding, if decoding to string.
|
||||
Use `utf-8` as default encoding, if decoding to string.
|
||||
"""
|
||||
path: PathLike | None = None
|
||||
"""Location where the original content was found."""
|
||||
@@ -127,7 +125,7 @@ class Blob(BaseMedia):
|
||||
|
||||
If a path is associated with the blob, it will default to the path location.
|
||||
|
||||
Unless explicitly set via a metadata field called "source", in which
|
||||
Unless explicitly set via a metadata field called `"source"`, in which
|
||||
case that value will be used instead.
|
||||
"""
|
||||
if self.metadata and "source" in self.metadata:
|
||||
@@ -211,11 +209,11 @@ class Blob(BaseMedia):
|
||||
"""Load the blob from a path like object.
|
||||
|
||||
Args:
|
||||
path: path like object to file to be read
|
||||
path: Path-like object to file to be read
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
guess_type: If `True`, the mimetype will be guessed from the file extension,
|
||||
if a mime-type was not provided
|
||||
if a mime-type was not provided
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -248,10 +246,10 @@ class Blob(BaseMedia):
|
||||
"""Initialize the blob from in-memory data.
|
||||
|
||||
Args:
|
||||
data: the in-memory data associated with the blob
|
||||
data: The in-memory data associated with the blob
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
path: if provided, will be set as the source from which the data came
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
path: If provided, will be set as the source from which the data came
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -303,7 +301,7 @@ class Document(BaseMedia):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
["langchain", "schema", "document"]
|
||||
|
||||
@@ -57,10 +57,10 @@ class BaseDocumentTransformer(ABC):
|
||||
"""Transform a list of documents.
|
||||
|
||||
Args:
|
||||
documents: A sequence of Documents to be transformed.
|
||||
documents: A sequence of `Document` objects to be transformed.
|
||||
|
||||
Returns:
|
||||
A sequence of transformed Documents.
|
||||
A sequence of transformed `Document` objects.
|
||||
"""
|
||||
|
||||
async def atransform_documents(
|
||||
@@ -69,10 +69,10 @@ class BaseDocumentTransformer(ABC):
|
||||
"""Asynchronously transform a list of documents.
|
||||
|
||||
Args:
|
||||
documents: A sequence of Documents to be transformed.
|
||||
documents: A sequence of `Document` objects to be transformed.
|
||||
|
||||
Returns:
|
||||
A sequence of transformed Documents.
|
||||
A sequence of transformed `Document` objects.
|
||||
"""
|
||||
return await run_in_executor(
|
||||
None, self.transform_documents, documents, **kwargs
|
||||
|
||||
@@ -18,7 +18,8 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
This embedding model creates embeddings by sampling from a normal distribution.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
```python
|
||||
@@ -72,7 +73,8 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
This embedding model creates embeddings by sampling from a normal distribution
|
||||
with a seed based on the hash of the text.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
```python
|
||||
|
||||
@@ -154,7 +154,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -198,7 +198,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -285,9 +285,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -333,9 +332,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
|
||||
@@ -16,7 +16,7 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
"""Exception that output parsers should raise to signify a parsing error.
|
||||
|
||||
This exists to differentiate parsing errors from other code or execution errors
|
||||
that also may arise inside the output parser. OutputParserExceptions will be
|
||||
that also may arise inside the output parser. `OutputParserException` will be
|
||||
available to catch and handle in ways to fix the parsing error, while other
|
||||
errors will be raised.
|
||||
"""
|
||||
@@ -28,20 +28,19 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
llm_output: str | None = None,
|
||||
send_to_llm: bool = False, # noqa: FBT001,FBT002
|
||||
):
|
||||
"""Create an OutputParserException.
|
||||
"""Create an `OutputParserException`.
|
||||
|
||||
Args:
|
||||
error: The error that's being re-raised or an error message.
|
||||
observation: String explanation of error which can be passed to a
|
||||
model to try and remediate the issue. Defaults to `None`.
|
||||
model to try and remediate the issue.
|
||||
llm_output: String model output which is error-ing.
|
||||
Defaults to `None`.
|
||||
|
||||
send_to_llm: Whether to send the observation and llm_output back to an Agent
|
||||
after an OutputParserException has been raised.
|
||||
after an `OutputParserException` has been raised.
|
||||
This gives the underlying model driving the agent the context that the
|
||||
previous output was improperly structured, in the hopes that it will
|
||||
update the output to the correct format.
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
ValueError: If `send_to_llm` is True but either observation or
|
||||
|
||||
@@ -326,8 +326,8 @@ def index(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -342,15 +342,12 @@ def index(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
@@ -667,8 +664,8 @@ async def aindex(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -683,15 +680,12 @@ async def aindex(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
|
||||
@@ -278,10 +278,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -315,10 +315,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -361,13 +361,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -397,13 +397,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -508,8 +508,6 @@ class DocumentIndex(BaseRetriever):
|
||||
1. Storing document in the index.
|
||||
2. Fetching document by ID.
|
||||
3. Searching for document using a query.
|
||||
|
||||
!!! version-added "Added in version 0.2.29"
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -522,10 +520,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
When an ID is specified and the content already exists in the vectorstore,
|
||||
the upsert method should update the content with the new data. If the content
|
||||
does not exist, the upsert method should add the item to the vectorstore.
|
||||
does not exist, the upsert method should add the item to the `VectorStore`.
|
||||
|
||||
Args:
|
||||
items: Sequence of documents to add to the vectorstore.
|
||||
items: Sequence of documents to add to the `VectorStore`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -545,10 +543,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
When an ID is specified and the item already exists in the vectorstore,
|
||||
the upsert method should update the item with the new data. If the item
|
||||
does not exist, the upsert method should add the item to the vectorstore.
|
||||
does not exist, the upsert method should add the item to the `VectorStore`.
|
||||
|
||||
Args:
|
||||
items: Sequence of documents to add to the vectorstore.
|
||||
items: Sequence of documents to add to the `VectorStore`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -23,8 +23,6 @@ class InMemoryDocumentIndex(DocumentIndex):
|
||||
|
||||
It provides a simple search API that returns documents by the number of
|
||||
counts the given query appears in the document.
|
||||
|
||||
!!! version-added "Added in version 0.2.29"
|
||||
"""
|
||||
|
||||
store: dict[str, Document] = Field(default_factory=dict)
|
||||
|
||||
@@ -1,43 +1,29 @@
|
||||
"""Language models.
|
||||
|
||||
**Language Model** is a type of model that can generate text or complete
|
||||
text prompts.
|
||||
LangChain has two main classes to work with language models: chat models and
|
||||
"old-fashioned" LLMs.
|
||||
|
||||
LangChain has two main classes to work with language models: **Chat Models**
|
||||
and "old-fashioned" **LLMs**.
|
||||
|
||||
**Chat Models**
|
||||
**Chat models**
|
||||
|
||||
Language models that use a sequence of messages as inputs and return chat messages
|
||||
as outputs (as opposed to using plain text). These are traditionally newer models (
|
||||
older models are generally LLMs, see below). Chat models support the assignment of
|
||||
as outputs (as opposed to using plain text). Chat models support the assignment of
|
||||
distinct roles to conversation messages, helping to distinguish messages from the AI,
|
||||
users, and instructions such as system messages.
|
||||
|
||||
The key abstraction for chat models is `BaseChatModel`. Implementations
|
||||
should inherit from this class. Please see LangChain how-to guides with more
|
||||
information on how to implement a custom chat model.
|
||||
should inherit from this class.
|
||||
|
||||
To implement a custom Chat Model, inherit from `BaseChatModel`. See
|
||||
the following guide for more information on how to implement a custom Chat Model:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_chat_model/
|
||||
See existing [chat model integrations](https://docs.langchain.com/oss/python/integrations/chat).
|
||||
|
||||
**LLMs**
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are Chat Models,
|
||||
see below).
|
||||
These are traditionally older models (newer models generally are chat models).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers
|
||||
also allow these models to take messages as input. This gives them the same interface
|
||||
as Chat Models. When messages are passed in as input, they will be formatted into a
|
||||
string under the hood before being passed to the underlying model.
|
||||
|
||||
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
|
||||
Please see the following guide for more information on how to implement a custom LLM:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_llm/
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also
|
||||
allow these models to take messages as input. This gives them the same interface as
|
||||
chat models. When messages are passed in as input, they will be formatted into a string
|
||||
under the hood before being passed to the underlying model.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -35,7 +35,7 @@ def is_openai_data_block(
|
||||
different type, this function will return False.
|
||||
|
||||
Returns:
|
||||
True if the block is a valid OpenAI data block and matches the filter_
|
||||
`True` if the block is a valid OpenAI data block and matches the filter_
|
||||
(if provided).
|
||||
|
||||
"""
|
||||
@@ -89,7 +89,8 @@ class ParsedDataUri(TypedDict):
|
||||
def _parse_data_uri(uri: str) -> ParsedDataUri | None:
|
||||
"""Parse a data URI into its components.
|
||||
|
||||
If parsing fails, return None. If either MIME type or data is missing, return None.
|
||||
If parsing fails, return `None`. If either MIME type or data is missing, return
|
||||
`None`.
|
||||
|
||||
Example:
|
||||
```python
|
||||
|
||||
@@ -96,9 +96,16 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
||||
|
||||
|
||||
LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
|
||||
"""Input to a language model."""
|
||||
|
||||
LanguageModelOutput = BaseMessage | str
|
||||
"""Output from a language model."""
|
||||
|
||||
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
||||
"""Input/output interface for a language model."""
|
||||
|
||||
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
|
||||
"""Type variable for the output of a language model."""
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
@@ -123,7 +130,6 @@ class BaseLanguageModel(
|
||||
* If instance of `BaseCache`, will use the provided cache.
|
||||
|
||||
Caching is not currently supported for streaming methods of models.
|
||||
|
||||
"""
|
||||
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
|
||||
"""Whether to print out response text."""
|
||||
@@ -146,7 +152,7 @@ class BaseLanguageModel(
|
||||
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
|
||||
"""If verbose is `None`, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
This allows users to pass in `None` as verbose to access the global setting.
|
||||
|
||||
Args:
|
||||
verbose: The verbosity setting to use.
|
||||
@@ -186,22 +192,22 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -223,22 +229,22 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An `LLMResult`, which contains a list of candidate Generations for each
|
||||
input prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -263,8 +269,7 @@ class BaseLanguageModel(
|
||||
|
||||
Returns:
|
||||
A list of ids corresponding to the tokens in the text, in order they occur
|
||||
in the text.
|
||||
|
||||
in the text.
|
||||
"""
|
||||
if self.custom_get_token_ids is not None:
|
||||
return self.custom_get_token_ids(text)
|
||||
|
||||
@@ -240,79 +240,52 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
"""Base class for chat models.
|
||||
r"""Base class for chat models.
|
||||
|
||||
Key imperative methods:
|
||||
Methods that actually call the underlying model.
|
||||
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| Method | Input | Output | Description |
|
||||
+===========================+================================================================+=====================================================================+==================================================================================================+
|
||||
| `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
|
||||
|
||||
This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
|
||||
| Method | Input | Output | Description |
|
||||
| ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
|
||||
| `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
|
||||
| `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
|
||||
| `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
|
||||
| `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
|
||||
| `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
|
||||
| `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
| `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
|
||||
Key declarative methods:
|
||||
Methods for creating another Runnable using the ChatModel.
|
||||
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Method | Description |
|
||||
+==================================+===========================================================================================================+
|
||||
| `bind_tools` | Create ChatModel that can call tools. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
Methods for creating another `Runnable` using the chat model.
|
||||
|
||||
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
|
||||
|
||||
| Method | Description |
|
||||
| ---------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| `bind_tools` | Create chat model that can call tools. |
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
|
||||
|
||||
Creating custom chat model:
|
||||
Custom chat model implementations should inherit from this class.
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| Method/Property | Description | Required/Optional |
|
||||
+==================================+====================================================================+===================+
|
||||
| Method/Property | Description | Required |
|
||||
| -------------------------------- | ------------------------------------------------------------------ | ----------------- |
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_stream` | Use to implement streaming | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
|
||||
Follow the guide for more information on how to implement a custom Chat Model:
|
||||
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
@@ -327,9 +300,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
|
||||
- If `True`, will always bypass streaming case.
|
||||
- If `'tool_calling'`, will bypass streaming case only when the model is called
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
- If `False` (Default), will always use streaming case if available.
|
||||
|
||||
The main reason for this flag is that code might be written using `stream` and
|
||||
@@ -349,11 +322,12 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
Supported values:
|
||||
|
||||
- `'v0'`: provider-specific format in content (can lazily-parse with
|
||||
`.content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `.content_blocks`)
|
||||
`content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `content_blocks`)
|
||||
|
||||
Partner packages (e.g., `langchain-openai`) can also use this field to roll out
|
||||
new content formats in a backward-compatible way.
|
||||
Partner packages (e.g.,
|
||||
[`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
|
||||
field to roll out new content formats in a backward-compatible way.
|
||||
|
||||
!!! version-added "Added in version 1.0"
|
||||
|
||||
@@ -864,13 +838,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
messages: List of list of messages.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: The tags to apply.
|
||||
metadata: The metadata to apply.
|
||||
@@ -880,8 +854,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
@@ -982,13 +956,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
messages: List of list of messages.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: The tags to apply.
|
||||
metadata: The metadata to apply.
|
||||
@@ -998,8 +972,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
@@ -1536,17 +1510,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
If `schema` is a Pydantic class then the model output will be a
|
||||
Pydantic instance of that class, and the model-generated fields will be
|
||||
validated by the Pydantic class. Otherwise the model output will be a
|
||||
dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
|
||||
for more on how to properly specify types and descriptions of
|
||||
schema fields when specifying a Pydantic or `TypedDict` class.
|
||||
dict and will not be validated.
|
||||
|
||||
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
|
||||
more on how to properly specify types and descriptions of schema fields
|
||||
when specifying a Pydantic or `TypedDict` class.
|
||||
|
||||
include_raw:
|
||||
If `False` then only the parsed structured output is returned. If
|
||||
an error occurs during model output parsing it will be raised. If `True`
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
then both the raw model response (a `BaseMessage`) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys `'raw'`, `'parsed'`, and `'parsing_error'`.
|
||||
will be caught and returned as well.
|
||||
|
||||
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
|
||||
`'parsing_error'`.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are any unsupported `kwargs`.
|
||||
@@ -1554,99 +1532,102 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
`with_structured_output()`.
|
||||
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
|
||||
A `Runnable` that takes same inputs as a
|
||||
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
|
||||
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
|
||||
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
|
||||
`False` then `Runnable` outputs a `dict`.
|
||||
|
||||
If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
|
||||
an instance of `schema` (i.e., a Pydantic object).
|
||||
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
|
||||
|
||||
Otherwise, if `include_raw` is False then Runnable outputs a dict.
|
||||
- `'raw'`: `BaseMessage`
|
||||
- `'parsed'`: `None` if there was a parsing error, otherwise the type
|
||||
depends on the `schema` as described above.
|
||||
- `'parsing_error'`: `BaseException | None`
|
||||
|
||||
If `include_raw` is True, then Runnable outputs a dict with keys:
|
||||
Example: Pydantic schema (`include_raw=False`):
|
||||
|
||||
- `'raw'`: BaseMessage
|
||||
- `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
|
||||
- `'parsing_error'`: BaseException | None
|
||||
|
||||
Example: Pydantic schema (include_raw=False):
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(AnswerWithJustification)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
```
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
```
|
||||
|
||||
Example: Pydantic schema (include_raw=True):
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
Example: Pydantic schema (`include_raw=True`):
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
```
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
```
|
||||
|
||||
Example: Dict schema (include_raw=False):
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
Example: `dict` schema (`include_raw=False`):
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(dict_schema)
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(dict_schema)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
```
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.2.26"
|
||||
Added support for TypedDict class.
|
||||
Added support for TypedDict class.
|
||||
|
||||
""" # noqa: E501
|
||||
_ = kwargs.pop("method", None)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
|
||||
|
||||
|
||||
class FakeMessagesListChatModel(BaseChatModel):
|
||||
"""Fake `ChatModel` for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[BaseMessage]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
@@ -57,7 +57,7 @@ class FakeListChatModelError(Exception):
|
||||
|
||||
|
||||
class FakeListChatModel(SimpleChatModel):
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[str]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
|
||||
@@ -74,8 +74,8 @@ def create_base_retry_decorator(
|
||||
|
||||
Args:
|
||||
error_types: List of error types to retry on.
|
||||
max_retries: Number of retries. Default is 1.
|
||||
run_manager: Callback manager for the run. Default is None.
|
||||
max_retries: Number of retries.
|
||||
run_manager: Callback manager for the run.
|
||||
|
||||
Returns:
|
||||
A retry decorator.
|
||||
@@ -91,13 +91,17 @@ def create_base_retry_decorator(
|
||||
if isinstance(run_manager, AsyncCallbackManagerForLLMRun):
|
||||
coro = run_manager.on_retry(retry_state)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
if loop.is_running():
|
||||
# TODO: Fix RUF006 - this task should have a reference
|
||||
# and be awaited somewhere
|
||||
loop.create_task(coro) # noqa: RUF006
|
||||
else:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError:
|
||||
asyncio.run(coro)
|
||||
else:
|
||||
if loop.is_running():
|
||||
# TODO: Fix RUF006 - this task should have a reference
|
||||
# and be awaited somewhere
|
||||
loop.create_task(coro) # noqa: RUF006
|
||||
else:
|
||||
asyncio.run(coro)
|
||||
except Exception as e:
|
||||
_log_error_once(f"Error in on_retry: {e}")
|
||||
else:
|
||||
@@ -153,7 +157,7 @@ def get_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -189,7 +193,7 @@ async def aget_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -841,7 +845,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
prompts: List of string prompts.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: List of tags to associate with each prompt. If provided, the length
|
||||
of the list must match the length of the prompts list.
|
||||
@@ -861,8 +865,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
"""
|
||||
if not isinstance(prompts, list):
|
||||
msg = (
|
||||
@@ -1111,7 +1115,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
prompts: List of string prompts.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: List of tags to associate with each prompt. If provided, the length
|
||||
of the list must match the length of the prompts list.
|
||||
@@ -1130,8 +1134,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
"""
|
||||
if isinstance(metadata, list):
|
||||
metadata = [
|
||||
|
||||
@@ -38,17 +38,16 @@ def _dump_pydantic_models(obj: Any) -> Any:
|
||||
|
||||
|
||||
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
"""Return a json string representation of an object.
|
||||
"""Return a JSON string representation of an object.
|
||||
|
||||
Args:
|
||||
obj: The object to dump.
|
||||
pretty: Whether to pretty print the json. If true, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of kwargs).
|
||||
Default is False.
|
||||
**kwargs: Additional arguments to pass to json.dumps
|
||||
pretty: Whether to pretty print the json. If `True`, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of `kwargs`).
|
||||
**kwargs: Additional arguments to pass to `json.dumps`
|
||||
|
||||
Returns:
|
||||
A json string representation of the object.
|
||||
A JSON string representation of the object.
|
||||
|
||||
Raises:
|
||||
ValueError: If `default` is passed as a kwarg.
|
||||
@@ -72,14 +71,12 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
def dumpd(obj: Any) -> Any:
|
||||
"""Return a dict representation of an object.
|
||||
|
||||
!!! note
|
||||
Unfortunately this function is not as efficient as it could be because it first
|
||||
dumps the object to a json string and then loads it back into a dictionary.
|
||||
|
||||
Args:
|
||||
obj: The object to dump.
|
||||
|
||||
Returns:
|
||||
dictionary that can be serialized to json using json.dumps
|
||||
Dictionary that can be serialized to json using `json.dumps`.
|
||||
"""
|
||||
# Unfortunately this function is not as efficient as it could be because it first
|
||||
# dumps the object to a json string and then loads it back into a dictionary.
|
||||
return json.loads(dumps(obj))
|
||||
|
||||
@@ -63,16 +63,13 @@ class Reviver:
|
||||
Args:
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
"""
|
||||
self.secrets_from_env = secrets_from_env
|
||||
self.secrets_map = secrets_map or {}
|
||||
@@ -200,16 +197,13 @@ def loads(
|
||||
text: The string to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
@@ -245,16 +239,13 @@ def load(
|
||||
obj: The object to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
|
||||
@@ -25,9 +25,9 @@ class BaseSerialized(TypedDict):
|
||||
id: list[str]
|
||||
"""The unique identifier of the object."""
|
||||
name: NotRequired[str]
|
||||
"""The name of the object. Optional."""
|
||||
"""The name of the object."""
|
||||
graph: NotRequired[dict[str, Any]]
|
||||
"""The graph of the object. Optional."""
|
||||
"""The graph of the object."""
|
||||
|
||||
|
||||
class SerializedConstructor(BaseSerialized):
|
||||
@@ -52,7 +52,7 @@ class SerializedNotImplemented(BaseSerialized):
|
||||
type: Literal["not_implemented"]
|
||||
"""The type of the object. Must be `'not_implemented'`."""
|
||||
repr: str | None
|
||||
"""The representation of the object. Optional."""
|
||||
"""The representation of the object."""
|
||||
|
||||
|
||||
def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
@@ -61,7 +61,7 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
Args:
|
||||
value: The value.
|
||||
key: The key.
|
||||
model: The pydantic model.
|
||||
model: The Pydantic model.
|
||||
|
||||
Returns:
|
||||
Whether the value is different from the default.
|
||||
@@ -93,18 +93,18 @@ class Serializable(BaseModel, ABC):
|
||||
It relies on the following methods and properties:
|
||||
|
||||
- `is_lc_serializable`: Is this class serializable?
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the langchain object.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the LangChain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the `Reviver` class in `langchain_core.load.load` for more details.
|
||||
During deserialization an additional mapping is handle
|
||||
classes that have moved or been renamed across package versions.
|
||||
During deserialization an additional mapping is handle classes that have moved
|
||||
or been renamed across package versions.
|
||||
- `lc_secrets`: A map of constructor argument names to secret ids.
|
||||
- `lc_attributes`: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
as part of the serialized representation.
|
||||
"""
|
||||
|
||||
# Remove default BaseModel init docstring.
|
||||
@@ -116,24 +116,24 @@ class Serializable(BaseModel, ABC):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""Is this class serializable?
|
||||
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
|
||||
Returns:
|
||||
Whether the class is serializable. Default is False.
|
||||
Whether the class is serializable. Default is `False`.
|
||||
"""
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
||||
namespace is ["langchain", "llms", "openai"]
|
||||
namespace is `["langchain", "llms", "openai"]`
|
||||
|
||||
Returns:
|
||||
The namespace as a list of strings.
|
||||
The namespace.
|
||||
"""
|
||||
return cls.__module__.split(".")
|
||||
|
||||
@@ -141,8 +141,7 @@ class Serializable(BaseModel, ABC):
|
||||
def lc_secrets(self) -> dict[str, str]:
|
||||
"""A map of constructor argument names to secret ids.
|
||||
|
||||
For example,
|
||||
{"openai_api_key": "OPENAI_API_KEY"}
|
||||
For example, `{"openai_api_key": "OPENAI_API_KEY"}`
|
||||
"""
|
||||
return {}
|
||||
|
||||
@@ -151,6 +150,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""List of attribute names that should be included in the serialized kwargs.
|
||||
|
||||
These attributes must be accepted by the constructor.
|
||||
|
||||
Default is an empty dictionary.
|
||||
"""
|
||||
return {}
|
||||
@@ -194,7 +194,7 @@ class Serializable(BaseModel, ABC):
|
||||
ValueError: If the class has deprecated attributes.
|
||||
|
||||
Returns:
|
||||
A json serializable object or a SerializedNotImplemented object.
|
||||
A json serializable object or a `SerializedNotImplemented` object.
|
||||
"""
|
||||
if not self.is_lc_serializable():
|
||||
return self.to_json_not_implemented()
|
||||
@@ -269,7 +269,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented.
|
||||
`SerializedNotImplemented`.
|
||||
"""
|
||||
return to_json_not_implemented(self)
|
||||
|
||||
@@ -284,8 +284,8 @@ def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool:
|
||||
|
||||
Returns:
|
||||
Whether the field is useful. If the field is required, it is useful.
|
||||
If the field is not required, it is useful if the value is not None.
|
||||
If the field is not required and the value is None, it is useful if the
|
||||
If the field is not required, it is useful if the value is not `None`.
|
||||
If the field is not required and the value is `None`, it is useful if the
|
||||
default value is different from the value.
|
||||
"""
|
||||
field = type(inst).model_fields.get(key)
|
||||
@@ -344,10 +344,10 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Args:
|
||||
obj: object to serialize.
|
||||
obj: Object to serialize.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented
|
||||
`SerializedNotImplemented`
|
||||
"""
|
||||
id_: list[str] = []
|
||||
try:
|
||||
|
||||
@@ -148,27 +148,26 @@ class UsageMetadata(TypedDict):
|
||||
class AIMessage(BaseMessage):
|
||||
"""Message from an AI.
|
||||
|
||||
AIMessage is returned from a chat model as a response to a prompt.
|
||||
An `AIMessage` is returned from a chat model as a response to a prompt.
|
||||
|
||||
This message represents the output of the model and consists of both
|
||||
the raw output as returned by the model together standardized fields
|
||||
the raw output as returned by the model and standardized fields
|
||||
(e.g., tool calls, usage metadata) added by the LangChain framework.
|
||||
|
||||
"""
|
||||
|
||||
tool_calls: list[ToolCall] = []
|
||||
"""If provided, tool calls associated with the message."""
|
||||
"""If present, tool calls associated with the message."""
|
||||
invalid_tool_calls: list[InvalidToolCall] = []
|
||||
"""If provided, tool calls with parsing errors associated with the message."""
|
||||
"""If present, tool calls with parsing errors associated with the message."""
|
||||
usage_metadata: UsageMetadata | None = None
|
||||
"""If provided, usage metadata for a message, such as token counts.
|
||||
"""If present, usage metadata for a message, such as token counts.
|
||||
|
||||
This is a standard representation of token usage that is consistent across models.
|
||||
|
||||
"""
|
||||
|
||||
type: Literal["ai"] = "ai"
|
||||
"""The type of the message (used for deserialization). Defaults to "ai"."""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -191,7 +190,7 @@ class AIMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `AIMessage`.
|
||||
"""Initialize an `AIMessage`.
|
||||
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
@@ -217,7 +216,11 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized.
|
||||
|
||||
Includes all attributes, even if they are derived from other initialization
|
||||
arguments.
|
||||
"""
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -225,7 +228,7 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message.
|
||||
"""Return standard, typed `ContentBlock` dicts from the message.
|
||||
|
||||
If the message has a known model provider, use the provider-specific translator
|
||||
first before falling back to best-effort parsing. For details, see the property
|
||||
@@ -331,11 +334,10 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@override
|
||||
def pretty_repr(self, html: bool = False) -> str:
|
||||
"""Return a pretty representation of the message.
|
||||
"""Return a pretty representation of the message for display.
|
||||
|
||||
Args:
|
||||
html: Whether to return an HTML-formatted string.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -372,23 +374,19 @@ class AIMessage(BaseMessage):
|
||||
|
||||
|
||||
class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
"""Message chunk from an AI."""
|
||||
"""Message chunk from an AI (yielded when streaming)."""
|
||||
|
||||
# Ignoring mypy re-assignment here since we're overriding the value
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for deserialization).
|
||||
|
||||
Defaults to `AIMessageChunk`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
tool_call_chunks: list[ToolCallChunk] = []
|
||||
"""If provided, tool call chunks associated with the message."""
|
||||
|
||||
chunk_position: Literal["last"] | None = None
|
||||
"""Optional span represented by an aggregated AIMessageChunk.
|
||||
"""Optional span represented by an aggregated `AIMessageChunk`.
|
||||
|
||||
If a chunk with `chunk_position="last"` is aggregated into a stream,
|
||||
`tool_call_chunks` in message content will be parsed into `tool_calls`.
|
||||
@@ -396,7 +394,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized, even if they are derived from other initialization args.""" # noqa: E501
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -404,7 +402,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message."""
|
||||
"""Return standard, typed `ContentBlock` dicts from the message."""
|
||||
if self.response_metadata.get("output_version") == "v1":
|
||||
return cast("list[types.ContentBlock]", self.content)
|
||||
|
||||
@@ -545,12 +543,15 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
and call_id in id_to_tc
|
||||
):
|
||||
self.content[idx] = cast("dict[str, Any]", id_to_tc[call_id])
|
||||
if "extras" in block:
|
||||
# mypy does not account for instance check for dict above
|
||||
self.content[idx]["extras"] = block["extras"] # type: ignore[index]
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def init_server_tool_calls(self) -> Self:
|
||||
"""Parse server_tool_call_chunks."""
|
||||
"""Parse `server_tool_call_chunks`."""
|
||||
if (
|
||||
self.chunk_position == "last"
|
||||
and self.response_metadata.get("output_version") == "v1"
|
||||
|
||||
@@ -92,11 +92,11 @@ class TextAccessor(str):
|
||||
class BaseMessage(Serializable):
|
||||
"""Base abstract message class.
|
||||
|
||||
Messages are the inputs and outputs of a `ChatModel`.
|
||||
Messages are the inputs and outputs of a chat model.
|
||||
"""
|
||||
|
||||
content: str | list[str | dict]
|
||||
"""The string contents of the message."""
|
||||
"""The contents of the message."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict)
|
||||
"""Reserved for additional payload data associated with the message.
|
||||
@@ -159,12 +159,12 @@ class BaseMessage(Serializable):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `BaseMessage`.
|
||||
"""Initialize a `BaseMessage`.
|
||||
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
@@ -184,7 +184,7 @@ class BaseMessage(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "messages"]`
|
||||
@@ -262,7 +262,7 @@ class BaseMessage(Serializable):
|
||||
Can be used as both property (`message.text`) and method (`message.text()`).
|
||||
|
||||
!!! deprecated
|
||||
As of langchain-core 1.0.0, calling `.text()` as a method is deprecated.
|
||||
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
@@ -307,7 +307,7 @@ class BaseMessage(Serializable):
|
||||
|
||||
Args:
|
||||
html: Whether to format the message as HTML. If `True`, the message will be
|
||||
formatted with HTML tags. Default is False.
|
||||
formatted with HTML tags.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -464,7 +464,7 @@ def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
|
||||
|
||||
Args:
|
||||
title: The title.
|
||||
bold: Whether to bold the title. Default is False.
|
||||
bold: Whether to bold the title.
|
||||
|
||||
Returns:
|
||||
The title representation.
|
||||
|
||||
@@ -28,7 +28,7 @@ dictionary with two keys:
|
||||
- `'translate_content'`: Function to translate `AIMessage` content.
|
||||
- `'translate_content_chunk'`: Function to translate `AIMessageChunk` content.
|
||||
|
||||
When calling `.content_blocks` on an `AIMessage` or `AIMessageChunk`, if
|
||||
When calling `content_blocks` on an `AIMessage` or `AIMessageChunk`, if
|
||||
`model_provider` is set in `response_metadata`, the corresponding translator
|
||||
functions will be used to parse the content into blocks. Otherwise, best-effort parsing
|
||||
in `BaseMessage` will be used.
|
||||
|
||||
@@ -31,7 +31,7 @@ def _convert_to_v1_from_anthropic_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Anthropic format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Anthropic format to v1 ContentBlocks.
|
||||
|
||||
@@ -35,7 +35,7 @@ def _convert_to_v1_from_converse_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Bedrock Converse format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Converse format to v1 ContentBlocks.
|
||||
|
||||
@@ -105,7 +105,7 @@ def _convert_to_v1_from_genai_input(
|
||||
Called when message isn't an `AIMessage` or `model_provider` isn't set on
|
||||
`response_metadata`.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be GenAI format to v1 ContentBlocks.
|
||||
@@ -282,7 +282,7 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
|
||||
standard content blocks for returning.
|
||||
|
||||
Args:
|
||||
message: The AIMessage or AIMessageChunk to convert.
|
||||
message: The `AIMessage` or `AIMessageChunk` to convert.
|
||||
|
||||
Returns:
|
||||
List of standard content blocks derived from the message content.
|
||||
@@ -453,9 +453,10 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"status": status, # type: ignore[typeddict-item]
|
||||
"output": item.get("code_execution_result", ""),
|
||||
}
|
||||
server_tool_result_block["extras"] = {"block_type": item_type}
|
||||
# Preserve original outcome in extras
|
||||
if outcome is not None:
|
||||
server_tool_result_block["extras"] = {"outcome": outcome}
|
||||
server_tool_result_block["extras"]["outcome"] = outcome
|
||||
converted_blocks.append(server_tool_result_block)
|
||||
else:
|
||||
# Unknown type, preserve as non-standard
|
||||
|
||||
@@ -1,37 +1,9 @@
|
||||
"""Derivations of standard content blocks from Google (VertexAI) content."""
|
||||
|
||||
import warnings
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a chunk with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
from langchain_core.messages.block_translators.google_genai import (
|
||||
translate_content,
|
||||
translate_content_chunk,
|
||||
)
|
||||
|
||||
|
||||
def _register_google_vertexai_translator() -> None:
|
||||
|
||||
@@ -1,39 +1,135 @@
|
||||
"""Derivations of standard content blocks from Groq content."""
|
||||
|
||||
import warnings
|
||||
import json
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _populate_extras(
|
||||
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
|
||||
) -> types.ContentBlock:
|
||||
"""Mutate a block, populating extras."""
|
||||
if standard_block.get("type") == "non_standard":
|
||||
return standard_block
|
||||
|
||||
for key, value in block.items():
|
||||
if key not in known_fields:
|
||||
if "extras" not in standard_block:
|
||||
# Below type-ignores are because mypy thinks a non-standard block can
|
||||
# get here, although we exclude them above.
|
||||
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
|
||||
standard_block["extras"][key] = value # type: ignore[typeddict-item]
|
||||
|
||||
return standard_block
|
||||
|
||||
|
||||
def _parse_code_json(s: str) -> dict:
|
||||
"""Extract Python code from Groq built-in tool content.
|
||||
|
||||
Extracts the value of the 'code' field from a string of the form:
|
||||
{"code": some_arbitrary_text_with_unescaped_quotes}
|
||||
|
||||
As Groq may not escape quotes in the executed tools, e.g.:
|
||||
```
|
||||
'{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
|
||||
```
|
||||
""" # noqa: E501
|
||||
m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
|
||||
if not m:
|
||||
msg = (
|
||||
"Could not extract Python code from Groq tool arguments. "
|
||||
"Expected a JSON object with a 'code' field."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
raise ValueError(msg)
|
||||
return {"code": m.group(1)}
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message chunk with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Convert groq message content to v1 format."""
|
||||
content_blocks: list[types.ContentBlock] = []
|
||||
|
||||
if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
|
||||
content_blocks.append(reasoning_block)
|
||||
|
||||
if executed_tools := message.additional_kwargs.get("executed_tools"):
|
||||
for idx, executed_tool in enumerate(executed_tools):
|
||||
args: dict[str, Any] | None = None
|
||||
if arguments := executed_tool.get("arguments"):
|
||||
try:
|
||||
args = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
if executed_tool.get("type") == "python":
|
||||
try:
|
||||
args = _parse_code_json(arguments)
|
||||
except ValueError:
|
||||
continue
|
||||
elif (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
# GPT-OSS
|
||||
args = {"code": arguments}
|
||||
else:
|
||||
continue
|
||||
if isinstance(args, dict):
|
||||
name = ""
|
||||
if executed_tool.get("type") == "search":
|
||||
name = "web_search"
|
||||
elif executed_tool.get("type") == "python" or (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
name = "code_interpreter"
|
||||
server_tool_call: types.ServerToolCall = {
|
||||
"type": "server_tool_call",
|
||||
"name": name,
|
||||
"id": str(idx),
|
||||
"args": args,
|
||||
}
|
||||
content_blocks.append(server_tool_call)
|
||||
if tool_output := executed_tool.get("output"):
|
||||
tool_result: types.ServerToolResult = {
|
||||
"type": "server_tool_result",
|
||||
"tool_call_id": str(idx),
|
||||
"output": tool_output,
|
||||
"status": "success",
|
||||
}
|
||||
known_fields = {"type", "arguments", "index", "output"}
|
||||
_populate_extras(tool_result, executed_tool, known_fields)
|
||||
content_blocks.append(tool_result)
|
||||
|
||||
if isinstance(message.content, str) and message.content:
|
||||
content_blocks.append({"type": "text", "text": message.content})
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
content_blocks.append( # noqa: PERF401
|
||||
{
|
||||
"type": "tool_call",
|
||||
"name": tool_call["name"],
|
||||
"args": tool_call["args"],
|
||||
"id": tool_call.get("id"),
|
||||
}
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
return content_blocks
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message chunk with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def _register_groq_translator() -> None:
|
||||
"""Register the Groq translator with the central registry.
|
||||
"""Register the groq translator with the central registry.
|
||||
|
||||
Run automatically when the module is imported.
|
||||
"""
|
||||
|
||||
@@ -10,7 +10,7 @@ def _convert_v0_multimodal_input_to_v1(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert v0 multimodal blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any v0 format
|
||||
blocks to v1 format.
|
||||
|
||||
@@ -155,7 +155,7 @@ def _convert_to_v1_from_chat_completions_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert OpenAI Chat Completions format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be OpenAI format to v1 ContentBlocks.
|
||||
|
||||
@@ -19,7 +19,7 @@ class ChatMessage(BaseMessage):
|
||||
"""The speaker / role of the Message."""
|
||||
|
||||
type: Literal["chat"] = "chat"
|
||||
"""The type of the message (used during serialization). Defaults to "chat"."""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
|
||||
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
@@ -29,11 +29,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used during serialization).
|
||||
|
||||
Defaults to `'ChatMessageChunk'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -143,7 +143,7 @@ class Citation(TypedDict):
|
||||
not the source text. This means that the indices are relative to the model's
|
||||
response, not the original document (as specified in the `url`).
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_citation` may also be used as a factory to create a `Citation`.
|
||||
Benefits include:
|
||||
|
||||
@@ -156,7 +156,9 @@ class Citation(TypedDict):
|
||||
"""Type of the content block. Used for discrimination."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
@@ -201,6 +203,7 @@ class NonStandardAnnotation(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -211,6 +214,7 @@ class NonStandardAnnotation(TypedDict):
|
||||
|
||||
|
||||
Annotation = Citation | NonStandardAnnotation
|
||||
"""A union of all defined `Annotation` types."""
|
||||
|
||||
|
||||
class TextContentBlock(TypedDict):
|
||||
@@ -219,7 +223,7 @@ class TextContentBlock(TypedDict):
|
||||
This typically represents the main text content of a message, such as the response
|
||||
from a language model or the text of a user message.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_text_block` may also be used as a factory to create a
|
||||
`TextContentBlock`. Benefits include:
|
||||
|
||||
@@ -235,6 +239,7 @@ class TextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -254,7 +259,7 @@ class TextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -264,7 +269,7 @@ class ToolCall(TypedDict):
|
||||
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
||||
and an identifier of "123".
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_tool_call` may also be used as a factory to create a
|
||||
`ToolCall`. Benefits include:
|
||||
|
||||
@@ -299,7 +304,7 @@ class ToolCall(TypedDict):
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
@@ -381,7 +386,10 @@ class InvalidToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCall(TypedDict):
|
||||
"""Tool call that is executed server-side."""
|
||||
"""Tool call that is executed server-side.
|
||||
|
||||
For example: code execution, web search, etc.
|
||||
"""
|
||||
|
||||
type: Literal["server_tool_call"]
|
||||
"""Used for discrimination."""
|
||||
@@ -403,7 +411,7 @@ class ServerToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (as part of a stream)."""
|
||||
"""A chunk of a server-side tool call (yielded when streaming)."""
|
||||
|
||||
type: Literal["server_tool_call_chunk"]
|
||||
"""Used for discrimination."""
|
||||
@@ -452,7 +460,7 @@ class ServerToolResult(TypedDict):
|
||||
class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning output from a LLM.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_reasoning_block` may also be used as a factory to create a
|
||||
`ReasoningContentBlock`. Benefits include:
|
||||
|
||||
@@ -468,6 +476,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -494,7 +503,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
class ImageContentBlock(TypedDict):
|
||||
"""Image data.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_image_block` may also be used as a factory to create a
|
||||
`ImageContentBlock`. Benefits include:
|
||||
|
||||
@@ -510,6 +519,7 @@ class ImageContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -541,7 +551,7 @@ class ImageContentBlock(TypedDict):
|
||||
class VideoContentBlock(TypedDict):
|
||||
"""Video data.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_video_block` may also be used as a factory to create a
|
||||
`VideoContentBlock`. Benefits include:
|
||||
|
||||
@@ -557,6 +567,7 @@ class VideoContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -588,7 +599,7 @@ class VideoContentBlock(TypedDict):
|
||||
class AudioContentBlock(TypedDict):
|
||||
"""Audio data.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_audio_block` may also be used as a factory to create an
|
||||
`AudioContentBlock`. Benefits include:
|
||||
* Automatic ID generation (when not provided)
|
||||
@@ -603,6 +614,7 @@ class AudioContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -642,9 +654,9 @@ class PlainTextContentBlock(TypedDict):
|
||||
|
||||
!!! note
|
||||
Title and context are optional fields that may be passed to the model. See
|
||||
Anthropic [example](https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
|
||||
Anthropic [example](https://docs.claude.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_plaintext_block` may also be used as a factory to create a
|
||||
`PlainTextContentBlock`. Benefits include:
|
||||
|
||||
@@ -660,6 +672,7 @@ class PlainTextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -694,7 +707,7 @@ class PlainTextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class FileContentBlock(TypedDict):
|
||||
"""File data that doesn't fit into other multimodal blocks.
|
||||
"""File data that doesn't fit into other multimodal block types.
|
||||
|
||||
This block is intended for files that are not images, audio, or plaintext. For
|
||||
example, it can be used for PDFs, Word documents, etc.
|
||||
@@ -703,7 +716,7 @@ class FileContentBlock(TypedDict):
|
||||
content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
|
||||
`PlainTextContentBlock`).
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_file_block` may also be used as a factory to create a
|
||||
`FileContentBlock`. Benefits include:
|
||||
|
||||
@@ -719,6 +732,7 @@ class FileContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -765,7 +779,7 @@ class NonStandardContentBlock(TypedDict):
|
||||
Has no `extras` field, as provider-specific data should be included in the
|
||||
`value` field.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_non_standard_block` may also be used as a factory to create a
|
||||
`NonStandardContentBlock`. Benefits include:
|
||||
|
||||
@@ -781,6 +795,7 @@ class NonStandardContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -801,6 +816,7 @@ DataContentBlock = (
|
||||
| PlainTextContentBlock
|
||||
| FileContentBlock
|
||||
)
|
||||
"""A union of all defined multimodal data `ContentBlock` types."""
|
||||
|
||||
ToolContentBlock = (
|
||||
ToolCall | ToolCallChunk | ServerToolCall | ServerToolCallChunk | ServerToolResult
|
||||
@@ -814,6 +830,7 @@ ContentBlock = (
|
||||
| DataContentBlock
|
||||
| ToolContentBlock
|
||||
)
|
||||
"""A union of all defined `ContentBlock` types and aliases."""
|
||||
|
||||
|
||||
KNOWN_BLOCK_TYPES = {
|
||||
@@ -877,7 +894,7 @@ def is_data_content_block(block: dict) -> bool:
|
||||
block: The content block to check.
|
||||
|
||||
Returns:
|
||||
True if the content block is a data content block, False otherwise.
|
||||
`True` if the content block is a data content block, `False` otherwise.
|
||||
|
||||
"""
|
||||
if block.get("type") not in _get_data_content_block_types():
|
||||
|
||||
@@ -19,7 +19,7 @@ class FunctionMessage(BaseMessage):
|
||||
do not contain the `tool_call_id` field.
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
|
||||
"""The name of the function that was executed."""
|
||||
|
||||
type: Literal["function"] = "function"
|
||||
"""The type of the message (used for serialization). Defaults to `'function'`."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
|
||||
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
@@ -38,11 +38,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'FunctionMessageChunk'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -7,9 +7,9 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
|
||||
|
||||
class HumanMessage(BaseMessage):
|
||||
"""Message from a human.
|
||||
"""Message from the user.
|
||||
|
||||
`HumanMessage`s are messages that are passed in from a human to the model.
|
||||
A `HumanMessage` is a message that is passed in from a user to the model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -27,11 +27,7 @@ class HumanMessage(BaseMessage):
|
||||
"""
|
||||
|
||||
type: Literal["human"] = "human"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'human'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -71,5 +67,4 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
Defaults to "HumanMessageChunk"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -9,7 +9,7 @@ class RemoveMessage(BaseMessage):
|
||||
"""Message responsible for deleting other messages."""
|
||||
|
||||
type: Literal["remove"] = "remove"
|
||||
"""The type of the message (used for serialization). Defaults to "remove"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -27,11 +27,7 @@ class SystemMessage(BaseMessage):
|
||||
"""
|
||||
|
||||
type: Literal["system"] = "system"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'system'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -71,8 +67,4 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'SystemMessageChunk'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -31,36 +31,34 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
|
||||
Example: A `ToolMessage` representing a result of `42` from a tool call with id
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```
|
||||
|
||||
Example: A `ToolMessage` where only part of the tool output is sent to the model
|
||||
and the full output is passed in to artifact.
|
||||
and the full output is passed in to artifact.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
```
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
```
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -69,11 +67,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
"""Tool call that this message is responding to."""
|
||||
|
||||
type: Literal["tool"] = "tool"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'tool'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
artifact: Any = None
|
||||
"""Artifact of the Tool execution which is not meant to be sent to the model.
|
||||
@@ -82,21 +76,15 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
a subset of the full tool output is being passed as message content but the full
|
||||
output is needed in other parts of the code.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
|
||||
"""
|
||||
|
||||
status: Literal["success", "error"] = "success"
|
||||
"""Status of the tool invocation.
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
|
||||
"""
|
||||
"""Status of the tool invocation."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
response_metadata: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -164,12 +152,12 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `ToolMessage`.
|
||||
"""Initialize a `ToolMessage`.
|
||||
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
**kwargs: Additional fields.
|
||||
"""
|
||||
@@ -215,7 +203,7 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -261,7 +249,7 @@ def tool_call(
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
|
||||
@@ -86,6 +86,7 @@ AnyMessage = Annotated[
|
||||
| Annotated[ToolMessageChunk, Tag(tag="ToolMessageChunk")],
|
||||
Field(discriminator=Discriminator(_get_type)),
|
||||
]
|
||||
""""A type representing any defined `Message` or `MessageChunk` type."""
|
||||
|
||||
|
||||
def get_buffer_string(
|
||||
@@ -96,9 +97,7 @@ def get_buffer_string(
|
||||
Args:
|
||||
messages: Messages to be converted to strings.
|
||||
human_prefix: The prefix to prepend to contents of `HumanMessage`s.
|
||||
Default is `'Human'`.
|
||||
ai_prefix: The prefix to prepend to contents of `AIMessage`. Default is
|
||||
`'AI'`.
|
||||
ai_prefix: The prefix to prepend to contents of `AIMessage`.
|
||||
|
||||
Returns:
|
||||
A single string concatenation of all input messages.
|
||||
@@ -211,6 +210,7 @@ def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
||||
MessageLikeRepresentation = (
|
||||
BaseMessage | list[str] | tuple[str, str] | str | dict[str, Any]
|
||||
)
|
||||
"""A type representing the various ways a message can be represented."""
|
||||
|
||||
|
||||
def _create_message_from_message_type(
|
||||
@@ -227,10 +227,10 @@ def _create_message_from_message_type(
|
||||
Args:
|
||||
message_type: (str) the type of the message (e.g., `'human'`, `'ai'`, etc.).
|
||||
content: (str) the content string.
|
||||
name: (str) the name of the message. Default is None.
|
||||
tool_call_id: (str) the tool call id. Default is None.
|
||||
tool_calls: (list[dict[str, Any]]) the tool calls. Default is None.
|
||||
id: (str) the id of the message. Default is None.
|
||||
name: (str) the name of the message.
|
||||
tool_call_id: (str) the tool call id.
|
||||
tool_calls: (list[dict[str, Any]]) the tool calls.
|
||||
id: (str) the id of the message.
|
||||
additional_kwargs: (dict[str, Any]) additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -319,7 +319,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
|
||||
Returns:
|
||||
an instance of a message or a message template.
|
||||
An instance of a message or a message template.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: if the message type is not supported.
|
||||
@@ -425,22 +425,22 @@ def filter_messages(
|
||||
|
||||
Args:
|
||||
messages: Sequence Message-like objects to filter.
|
||||
include_names: Message names to include. Default is None.
|
||||
exclude_names: Messages names to exclude. Default is None.
|
||||
include_names: Message names to include.
|
||||
exclude_names: Messages names to exclude.
|
||||
include_types: Message types to include. Can be specified as string names
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
|
||||
exclude_types: Message types to exclude. Can be specified as string names
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
include_ids: Message IDs to include. Default is None.
|
||||
exclude_ids: Message IDs to exclude. Default is None.
|
||||
exclude_tool_calls: Tool call IDs to exclude. Default is None.
|
||||
|
||||
include_ids: Message IDs to include.
|
||||
exclude_ids: Message IDs to exclude.
|
||||
exclude_tool_calls: Tool call IDs to exclude.
|
||||
Can be one of the following:
|
||||
- `True`: all `AIMessage`s with tool calls and all
|
||||
`ToolMessage` objects will be excluded.
|
||||
- `True`: All `AIMessage` objects with tool calls and all `ToolMessage`
|
||||
objects will be excluded.
|
||||
- a sequence of tool call IDs to exclude:
|
||||
- `ToolMessage` objects with the corresponding tool call ID will be
|
||||
excluded.
|
||||
@@ -568,7 +568,6 @@ def merge_message_runs(
|
||||
Args:
|
||||
messages: Sequence Message-like objects to merge.
|
||||
chunk_separator: Specify the string to be inserted between message chunks.
|
||||
Defaults to `'\n'`.
|
||||
|
||||
Returns:
|
||||
list of BaseMessages with consecutive runs of message types merged into single
|
||||
@@ -703,7 +702,7 @@ def trim_messages(
|
||||
r"""Trim messages to be below a token count.
|
||||
|
||||
`trim_messages` can be used to reduce the size of a chat history to a specified
|
||||
token count or specified message count.
|
||||
token or message count.
|
||||
|
||||
In either case, if passing the trimmed chat history back into a chat model
|
||||
directly, the resulting chat history should usually satisfy the following
|
||||
@@ -714,8 +713,6 @@ def trim_messages(
|
||||
followed by a `HumanMessage`. To achieve this, set `start_on='human'`.
|
||||
In addition, generally a `ToolMessage` can only appear after an `AIMessage`
|
||||
that involved a tool call.
|
||||
Please see the following link for more information about messages:
|
||||
https://python.langchain.com/docs/concepts/#messages
|
||||
2. It includes recent messages and drops old messages in the chat history.
|
||||
To achieve this set the `strategy='last'`.
|
||||
3. Usually, the new chat history should include the `SystemMessage` if it
|
||||
@@ -745,12 +742,10 @@ def trim_messages(
|
||||
strategy: Strategy for trimming.
|
||||
- `'first'`: Keep the first `<= n_count` tokens of the messages.
|
||||
- `'last'`: Keep the last `<= n_count` tokens of the messages.
|
||||
Default is `'last'`.
|
||||
allow_partial: Whether to split a message if only part of the message can be
|
||||
included. If `strategy='last'` then the last partial contents of a message
|
||||
are included. If `strategy='first'` then the first partial contents of a
|
||||
message are included.
|
||||
Default is False.
|
||||
end_on: The message type to end on. If specified then every message after the
|
||||
last occurrence of this type is ignored. If `strategy='last'` then this
|
||||
is done before we attempt to get the last `max_tokens`. If
|
||||
@@ -759,7 +754,7 @@ def trim_messages(
|
||||
`'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g.
|
||||
`SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single
|
||||
type or a list of types.
|
||||
Default is None.
|
||||
|
||||
start_on: The message type to start on. Should only be specified if
|
||||
`strategy='last'`. If specified then every message before
|
||||
the first occurrence of this type is ignored. This is done after we trim
|
||||
@@ -768,10 +763,9 @@ def trim_messages(
|
||||
specified as string names (e.g. `'system'`, `'human'`, `'ai'`, ...) or
|
||||
as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`,
|
||||
`AIMessage`, ...). Can be a single type or a list of types.
|
||||
Default is None.
|
||||
include_system: Whether to keep the SystemMessage if there is one at index 0.
|
||||
Should only be specified if `strategy="last"`.
|
||||
Default is False.
|
||||
|
||||
include_system: Whether to keep the `SystemMessage` if there is one at index
|
||||
`0`. Should only be specified if `strategy="last"`.
|
||||
text_splitter: Function or `langchain_text_splitters.TextSplitter` for
|
||||
splitting the string contents of a message. Only used if
|
||||
`allow_partial=True`. If `strategy='last'` then the last split tokens
|
||||
@@ -782,7 +776,7 @@ def trim_messages(
|
||||
newlines.
|
||||
|
||||
Returns:
|
||||
list of trimmed `BaseMessage`.
|
||||
List of trimmed `BaseMessage`.
|
||||
|
||||
Raises:
|
||||
ValueError: if two incompatible arguments are specified or an unrecognized
|
||||
@@ -1683,12 +1677,12 @@ def count_tokens_approximately(
|
||||
Args:
|
||||
messages: List of messages to count tokens for.
|
||||
chars_per_token: Number of characters per token to use for the approximation.
|
||||
Default is 4 (one token corresponds to ~4 chars for common English text).
|
||||
You can also specify float values for more fine-grained control.
|
||||
One token corresponds to ~4 chars for common English text.
|
||||
You can also specify `float` values for more fine-grained control.
|
||||
[See more here](https://platform.openai.com/tokenizer).
|
||||
extra_tokens_per_message: Number of extra tokens to add per message.
|
||||
Default is 3 (special tokens, including beginning/end of message).
|
||||
You can also specify float values for more fine-grained control.
|
||||
extra_tokens_per_message: Number of extra tokens to add per message, e.g.
|
||||
special tokens, including beginning/end of message.
|
||||
You can also specify `float` values for more fine-grained control.
|
||||
[See more here](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb).
|
||||
count_name: Whether to include message names in the count.
|
||||
Enabled by default.
|
||||
|
||||
@@ -31,13 +31,13 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
|
||||
@abstractmethod
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -46,17 +46,17 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
result: A list of `Generation` to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
|
||||
@@ -172,7 +172,7 @@ class BaseOutputParser(
|
||||
This property is inferred from the first type argument of the class.
|
||||
|
||||
Raises:
|
||||
TypeError: If the class doesn't have an inferable OutputType.
|
||||
TypeError: If the class doesn't have an inferable `OutputType`.
|
||||
"""
|
||||
for base in self.__class__.mro():
|
||||
if hasattr(base, "__pydantic_generic_metadata__"):
|
||||
@@ -234,16 +234,16 @@ class BaseOutputParser(
|
||||
|
||||
@override
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -264,20 +264,20 @@ class BaseOutputParser(
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
async def aparse(self, text: str) -> T:
|
||||
@@ -299,13 +299,13 @@ class BaseOutputParser(
|
||||
) -> Any:
|
||||
"""Parse the output of an LLM call with the input prompt for context.
|
||||
|
||||
The prompt is largely provided in the event the OutputParser wants
|
||||
The prompt is largely provided in the event the `OutputParser` wants
|
||||
to retry or fix the output in some way, and needs information from
|
||||
the prompt to do so.
|
||||
|
||||
Args:
|
||||
completion: String output of a language model.
|
||||
prompt: Input PromptValue.
|
||||
prompt: Input `PromptValue`.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
|
||||
@@ -62,7 +62,6 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -146,7 +146,7 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "output_parsers", "list"]`
|
||||
|
||||
@@ -31,13 +31,13 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
`OutputParserException`: If the output is not valid JSON.
|
||||
"""
|
||||
generation = result[0]
|
||||
if not isinstance(generation, ChatGeneration):
|
||||
@@ -56,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
"""Parse an output as the Json object."""
|
||||
"""Parse an output as the JSON object."""
|
||||
|
||||
strict: bool = False
|
||||
"""Whether to allow non-JSON-compliant strings.
|
||||
@@ -82,13 +82,13 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
OutputParserExcept`ion: If the output is not valid JSON.
|
||||
"""
|
||||
if len(result) != 1:
|
||||
msg = f"Expected exactly one result, but got {len(result)}"
|
||||
@@ -155,7 +155,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
"""Parse an output as the element of the Json object."""
|
||||
"""Parse an output as the element of the JSON object."""
|
||||
|
||||
key_name: str
|
||||
"""The name of the key to return."""
|
||||
@@ -165,7 +165,7 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -177,16 +177,15 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
|
||||
class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
"""Parse an output as a pydantic object.
|
||||
"""Parse an output as a Pydantic object.
|
||||
|
||||
This parser is used to parse the output of a ChatModel that uses
|
||||
OpenAI function format to invoke functions.
|
||||
This parser is used to parse the output of a chat model that uses OpenAI function
|
||||
format to invoke functions.
|
||||
|
||||
The parser extracts the function call invocation and matches
|
||||
them to the pydantic schema provided.
|
||||
The parser extracts the function call invocation and matches them to the Pydantic
|
||||
schema provided.
|
||||
|
||||
An exception will be raised if the function call does not match
|
||||
the provided schema.
|
||||
An exception will be raised if the function call does not match the provided schema.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -221,7 +220,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
"""
|
||||
|
||||
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
||||
"""The pydantic schema to parse the output with.
|
||||
"""The Pydantic schema to parse the output with.
|
||||
|
||||
If multiple schemas are provided, then the function name will be used to
|
||||
determine which schema to use.
|
||||
@@ -230,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_schema(cls, values: dict) -> Any:
|
||||
"""Validate the pydantic schema.
|
||||
"""Validate the Pydantic schema.
|
||||
|
||||
Args:
|
||||
values: The values to validate.
|
||||
@@ -239,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
The validated values.
|
||||
|
||||
Raises:
|
||||
ValueError: If the schema is not a pydantic schema.
|
||||
ValueError: If the schema is not a Pydantic schema.
|
||||
"""
|
||||
schema = values["pydantic_schema"]
|
||||
if "args_only" not in values:
|
||||
@@ -262,10 +261,10 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Raises:
|
||||
ValueError: If the pydantic schema is not valid.
|
||||
ValueError: If the Pydantic schema is not valid.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -288,13 +287,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
elif issubclass(pydantic_schema, BaseModelV1):
|
||||
pydantic_args = pydantic_schema.parse_raw(args)
|
||||
else:
|
||||
msg = f"Unsupported pydantic schema: {pydantic_schema}"
|
||||
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
|
||||
raise ValueError(msg)
|
||||
return pydantic_args
|
||||
|
||||
|
||||
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
"""Parse an output as an attribute of a pydantic object."""
|
||||
"""Parse an output as an attribute of a Pydantic object."""
|
||||
|
||||
attr_name: str
|
||||
"""The name of the attribute to return."""
|
||||
@@ -305,7 +304,7 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -31,10 +31,9 @@ def parse_tool_call(
|
||||
|
||||
Args:
|
||||
raw_tool_call: The raw tool call to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool call.
|
||||
@@ -105,10 +104,9 @@ def parse_tool_calls(
|
||||
|
||||
Args:
|
||||
raw_tool_calls: The raw tool calls to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -165,7 +163,6 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -229,7 +226,6 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the generation is not a chat generation.
|
||||
@@ -313,7 +309,6 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed Pydantic objects.
|
||||
|
||||
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
|
||||
|
||||
|
||||
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
"""Parse an output using a pydantic model."""
|
||||
"""Parse an output using a Pydantic model."""
|
||||
|
||||
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
|
||||
"""The pydantic model to parse."""
|
||||
"""The Pydantic model to parse."""
|
||||
|
||||
def _parse_obj(self, obj: dict) -> TBaseModel:
|
||||
try:
|
||||
@@ -45,21 +45,20 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> TBaseModel | None:
|
||||
"""Parse the result of an LLM call to a pydantic object.
|
||||
"""Parse the result of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the result is not valid JSON
|
||||
or does not conform to the pydantic model.
|
||||
`OutputParserException`: If the result is not valid JSON
|
||||
or does not conform to the Pydantic model.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
try:
|
||||
json_object = super().parse_result(result)
|
||||
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
raise
|
||||
|
||||
def parse(self, text: str) -> TBaseModel:
|
||||
"""Parse the output of an LLM call to a pydantic object.
|
||||
"""Parse the output of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
text: The output of the LLM call.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
return super().parse(text)
|
||||
|
||||
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> type[TBaseModel]:
|
||||
"""Return the pydantic model."""
|
||||
"""Return the Pydantic model."""
|
||||
return self.pydantic_object
|
||||
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "output_parser"]`
|
||||
|
||||
@@ -44,7 +44,7 @@ class Generation(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "output"]`
|
||||
|
||||
@@ -97,7 +97,7 @@ class LLMResult(BaseModel):
|
||||
other: Another `LLMResult` object to compare against.
|
||||
|
||||
Returns:
|
||||
True if the generations and `llm_output` are equal, False otherwise.
|
||||
`True` if the generations and `llm_output` are equal, `False` otherwise.
|
||||
"""
|
||||
if not isinstance(other, LLMResult):
|
||||
return NotImplemented
|
||||
|
||||
@@ -24,8 +24,8 @@ from langchain_core.messages import (
|
||||
class PromptValue(Serializable, ABC):
|
||||
"""Base abstract class for inputs to any language model.
|
||||
|
||||
PromptValues can be converted to both LLM (pure text-generation) inputs and
|
||||
ChatModel inputs.
|
||||
`PromptValues` can be converted to both LLM (pure text-generation) inputs and
|
||||
chat model inputs.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@@ -35,7 +35,7 @@ class PromptValue(Serializable, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
@@ -62,7 +62,7 @@ class StringPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
@@ -99,7 +99,7 @@ class ChatPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
@@ -113,11 +113,11 @@ class ImageURL(TypedDict, total=False):
|
||||
"""Image URL."""
|
||||
|
||||
detail: Literal["auto", "low", "high"]
|
||||
"""Specifies the detail level of the image. Defaults to `'auto'`.
|
||||
"""Specifies the detail level of the image.
|
||||
|
||||
Can be `'auto'`, `'low'`, or `'high'`.
|
||||
|
||||
This follows OpenAI's Chat Completion API's image URL format.
|
||||
|
||||
"""
|
||||
|
||||
url: str
|
||||
|
||||
@@ -96,7 +96,7 @@ class BasePromptTemplate(
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "prompt_template"]`
|
||||
|
||||
@@ -135,7 +135,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
|
||||
n_messages: PositiveInt | None = None
|
||||
"""Maximum number of messages to include. If `None`, then will include all.
|
||||
Defaults to `None`."""
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, variable_name: str, *, optional: bool = False, **kwargs: Any
|
||||
@@ -147,7 +147,6 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
optional: If `True` format_messages can be called with no arguments and will
|
||||
return an empty list. If `False` then a named argument with name
|
||||
`variable_name` must be passed in, even if the value is an empty list.
|
||||
Defaults to `False`.]
|
||||
"""
|
||||
# mypy can't detect the init which is defined in the parent class
|
||||
# b/c these are BaseModel classes.
|
||||
@@ -195,7 +194,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -235,13 +234,13 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
|
||||
Args:
|
||||
template: a template.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
Defaults to `None`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
@@ -330,7 +329,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -412,9 +411,9 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
Args:
|
||||
template: a template.
|
||||
template_format: format of the template.
|
||||
Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
|
||||
Options are: 'f-string', 'mustache', 'jinja2'.
|
||||
partial_variables: A dictionary of variables that can be used too partially.
|
||||
Defaults to `None`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
@@ -637,7 +636,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -750,7 +749,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -905,7 +904,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
input_variables: A list of the names of the variables whose values are
|
||||
required as inputs to the prompt.
|
||||
optional_variables: A list of the names of the variables for placeholder
|
||||
@@ -971,7 +970,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "chat"]`
|
||||
@@ -1128,7 +1127,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
a chat prompt template.
|
||||
@@ -1287,7 +1286,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -1306,7 +1305,7 @@ def _create_template_from_message_type(
|
||||
Args:
|
||||
message_type: str the type of the message template (e.g., "human", "ai", etc.)
|
||||
template: str the template string.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
a message prompt template of the appropriate type.
|
||||
@@ -1383,7 +1382,7 @@ def _convert_to_message_template(
|
||||
|
||||
Args:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
an instance of a message or a message template.
|
||||
|
||||
@@ -74,7 +74,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain_core", "prompts", "dict"]`
|
||||
@@ -85,7 +85,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
|
||||
@@ -46,7 +46,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "few_shot_with_templates"]`
|
||||
|
||||
@@ -49,7 +49,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "image"]`
|
||||
|
||||
@@ -139,7 +139,7 @@ def load_prompt(path: str | Path, encoding: str | None = None) -> BasePromptTemp
|
||||
|
||||
Args:
|
||||
path: Path to the prompt file.
|
||||
encoding: Encoding of the file. Defaults to `None`.
|
||||
encoding: Encoding of the file.
|
||||
|
||||
Returns:
|
||||
A PromptTemplate object.
|
||||
|
||||
@@ -23,7 +23,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "chat"]`
|
||||
@@ -68,7 +68,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
|
||||
@@ -66,7 +66,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "prompt"]`
|
||||
@@ -220,7 +220,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
example_separator: The separator to use in between examples. Defaults
|
||||
to two new line characters.
|
||||
prefix: String that should go before any examples. Generally includes
|
||||
examples. Default to an empty string.
|
||||
examples.
|
||||
|
||||
Returns:
|
||||
The final prompt generated.
|
||||
@@ -275,13 +275,12 @@ class PromptTemplate(StringPromptTemplate):
|
||||
Args:
|
||||
template: The template to load.
|
||||
template_format: The format of the template. Use `jinja2` for jinja2,
|
||||
`mustache` for mustache, and `f-string` for f-strings.
|
||||
Defaults to `f-string`.
|
||||
`mustache` for mustache, and `f-string` for f-strings.
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`. Defaults to `None`.
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
**kwargs: Any other arguments to pass to the prompt template.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from string import Formatter
|
||||
from typing import Any, Literal
|
||||
|
||||
@@ -122,13 +122,16 @@ def mustache_formatter(template: str, /, **kwargs: Any) -> str:
|
||||
def mustache_template_vars(
|
||||
template: str,
|
||||
) -> set[str]:
|
||||
"""Get the variables from a mustache template.
|
||||
"""Get the top-level variables from a mustache template.
|
||||
|
||||
For nested variables like `{{person.name}}`, only the top-level
|
||||
key (`person`) is returned.
|
||||
|
||||
Args:
|
||||
template: The template string.
|
||||
|
||||
Returns:
|
||||
The variables from the template.
|
||||
The top-level variables from the template.
|
||||
"""
|
||||
variables: set[str] = set()
|
||||
section_depth = 0
|
||||
@@ -149,9 +152,7 @@ def mustache_template_vars(
|
||||
Defs = dict[str, "Defs"]
|
||||
|
||||
|
||||
def mustache_schema(
|
||||
template: str,
|
||||
) -> type[BaseModel]:
|
||||
def mustache_schema(template: str) -> type[BaseModel]:
|
||||
"""Get the variables from a mustache template.
|
||||
|
||||
Args:
|
||||
@@ -175,6 +176,11 @@ def mustache_schema(
|
||||
fields[prefix] = False
|
||||
elif type_ in {"variable", "no escape"}:
|
||||
fields[prefix + tuple(key.split("."))] = True
|
||||
|
||||
for fkey, fval in fields.items():
|
||||
fields[fkey] = fval and not any(
|
||||
is_subsequence(fkey, k) for k in fields if k != fkey
|
||||
)
|
||||
defs: Defs = {} # None means leaf node
|
||||
while fields:
|
||||
field, is_leaf = fields.popitem()
|
||||
@@ -273,7 +279,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "base"]`
|
||||
@@ -327,3 +333,12 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
def pretty_print(self) -> None:
|
||||
"""Print a pretty representation of the prompt."""
|
||||
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
|
||||
|
||||
|
||||
def is_subsequence(child: Sequence, parent: Sequence) -> bool:
|
||||
"""Return True if child is subsequence of parent."""
|
||||
if len(child) == 0 or len(parent) == 0:
|
||||
return False
|
||||
if len(parent) < len(child):
|
||||
return False
|
||||
return all(child[i] == parent[i] for i in range(len(child)))
|
||||
|
||||
@@ -63,13 +63,13 @@ class StructuredPrompt(ChatPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
||||
namespace is `["langchain", "llms", "openai"]`
|
||||
|
||||
Returns:
|
||||
The namespace of the langchain object.
|
||||
The namespace of the LangChain object.
|
||||
"""
|
||||
return cls.__module__.split(".")
|
||||
|
||||
@@ -144,7 +144,7 @@ class StructuredPrompt(ChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
others: The language model to pipe the structured prompt to.
|
||||
name: The name of the pipeline. Defaults to `None`.
|
||||
name: The name of the pipeline.
|
||||
|
||||
Returns:
|
||||
A RunnableSequence object.
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from importlib import metadata
|
||||
|
||||
from pydantic.v1 import * # noqa: F403
|
||||
|
||||
from langchain_core._api.deprecation import warn_deprecated
|
||||
|
||||
try:
|
||||
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
||||
except metadata.PackageNotFoundError:
|
||||
_PYDANTIC_MAJOR_VERSION = 0
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from pydantic.v1.dataclasses import * # noqa: F403
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from pydantic.v1.main import * # noqa: F403
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -21,11 +21,8 @@ class BaseRateLimiter(abc.ABC):
|
||||
Current limitations:
|
||||
|
||||
- Rate limiting information is not surfaced in tracing or callbacks. This means
|
||||
that the total time it takes to invoke a chat model will encompass both
|
||||
the time spent waiting for tokens and the time spent making the request.
|
||||
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
that the total time it takes to invoke a chat model will encompass both
|
||||
the time spent waiting for tokens and the time spent making the request.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -33,18 +30,18 @@ class BaseRateLimiter(abc.ABC):
|
||||
"""Attempt to acquire the necessary tokens for the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -52,18 +49,18 @@ class BaseRateLimiter(abc.ABC):
|
||||
"""Attempt to acquire the necessary tokens for the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
|
||||
@@ -84,7 +81,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
not enough tokens in the bucket, the request is blocked until there are
|
||||
enough tokens.
|
||||
|
||||
These *tokens* have NOTHING to do with LLM tokens. They are just
|
||||
These tokens have nothing to do with LLM tokens. They are just
|
||||
a way to keep track of how many requests can be made at a given time.
|
||||
|
||||
Current limitations:
|
||||
@@ -108,9 +105,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(
|
||||
model_name="claude-3-opus-20240229", rate_limiter=rate_limiter
|
||||
)
|
||||
model = ChatAnthropic(model_name="claude-sonnet-4-5", rate_limiter=rate_limiter)
|
||||
|
||||
for _ in range(5):
|
||||
tic = time.time()
|
||||
@@ -118,9 +113,6 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
toc = time.time()
|
||||
print(toc - tic)
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
@@ -132,7 +124,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
) -> None:
|
||||
"""A rate limiter based on a token bucket.
|
||||
|
||||
These *tokens* have NOTHING to do with LLM tokens. They are just
|
||||
These tokens have nothing to do with LLM tokens. They are just
|
||||
a way to keep track of how many requests can be made at a given time.
|
||||
|
||||
This rate limiter is designed to work in a threaded environment.
|
||||
@@ -145,11 +137,11 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
Args:
|
||||
requests_per_second: The number of tokens to add per second to the bucket.
|
||||
The tokens represent "credit" that can be used to make requests.
|
||||
check_every_n_seconds: check whether the tokens are available
|
||||
check_every_n_seconds: Check whether the tokens are available
|
||||
every this many seconds. Can be a float to represent
|
||||
fractions of a second.
|
||||
max_bucket_size: The maximum number of tokens that can be in the bucket.
|
||||
Must be at least 1. Used to prevent bursts of requests.
|
||||
Must be at least `1`. Used to prevent bursts of requests.
|
||||
"""
|
||||
# Number of requests that we can make per second.
|
||||
self.requests_per_second = requests_per_second
|
||||
@@ -199,18 +191,18 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
"""Attempt to acquire a token from the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
@@ -223,18 +215,18 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
"""Attempt to acquire a token from the rate limiter. Async version.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
|
||||
@@ -7,7 +7,6 @@ the backbone of a retriever, but there are other types of retrievers as well.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from inspect import signature
|
||||
from typing import TYPE_CHECKING, Any
|
||||
@@ -15,8 +14,6 @@ from typing import TYPE_CHECKING, Any
|
||||
from pydantic import ConfigDict
|
||||
from typing_extensions import Self, TypedDict, override
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.callbacks import Callbacks
|
||||
from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.runnables import (
|
||||
@@ -73,45 +70,45 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Example: A retriever that returns the first 5 documents from a list of documents
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
|
||||
class SimpleRetriever(BaseRetriever):
|
||||
docs: list[Document]
|
||||
k: int = 5
|
||||
class SimpleRetriever(BaseRetriever):
|
||||
docs: list[Document]
|
||||
k: int = 5
|
||||
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"Return the first k documents from the list of documents\"\"\"
|
||||
return self.docs[:self.k]
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"Return the first k documents from the list of documents\"\"\"
|
||||
return self.docs[:self.k]
|
||||
|
||||
async def _aget_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"(Optional) async native implementation.\"\"\"
|
||||
return self.docs[:self.k]
|
||||
```
|
||||
async def _aget_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"(Optional) async native implementation.\"\"\"
|
||||
return self.docs[:self.k]
|
||||
```
|
||||
|
||||
Example: A simple retriever based on a scikit-learn vectorizer
|
||||
|
||||
```python
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
```python
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
|
||||
|
||||
class TFIDFRetriever(BaseRetriever, BaseModel):
|
||||
vectorizer: Any
|
||||
docs: list[Document]
|
||||
tfidf_array: Any
|
||||
k: int = 4
|
||||
class TFIDFRetriever(BaseRetriever, BaseModel):
|
||||
vectorizer: Any
|
||||
docs: list[Document]
|
||||
tfidf_array: Any
|
||||
k: int = 4
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
# Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
|
||||
query_vec = self.vectorizer.transform([query])
|
||||
# Op -- (n_docs,1) -- Cosine Sim with each doc
|
||||
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
|
||||
return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
|
||||
```
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
# Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
|
||||
query_vec = self.vectorizer.transform([query])
|
||||
# Op -- (n_docs,1) -- Cosine Sim with each doc
|
||||
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
|
||||
return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
|
||||
```
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
@@ -121,14 +118,14 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
_new_arg_supported: bool = False
|
||||
_expects_other_args: bool = False
|
||||
tags: list[str] | None = None
|
||||
"""Optional list of tags associated with the retriever. Defaults to `None`.
|
||||
"""Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
use case.
|
||||
"""
|
||||
metadata: dict[str, Any] | None = None
|
||||
"""Optional metadata associated with the retriever. Defaults to `None`.
|
||||
"""Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
@@ -138,35 +135,6 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
@override
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
# Version upgrade for old retrievers that implemented the public
|
||||
# methods directly.
|
||||
if cls.get_relevant_documents != BaseRetriever.get_relevant_documents:
|
||||
warnings.warn(
|
||||
"Retrievers must implement abstract `_get_relevant_documents` method"
|
||||
" instead of `get_relevant_documents`",
|
||||
DeprecationWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
swap = cls.get_relevant_documents
|
||||
cls.get_relevant_documents = ( # type: ignore[method-assign]
|
||||
BaseRetriever.get_relevant_documents
|
||||
)
|
||||
cls._get_relevant_documents = swap # type: ignore[method-assign]
|
||||
if (
|
||||
hasattr(cls, "aget_relevant_documents")
|
||||
and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents
|
||||
):
|
||||
warnings.warn(
|
||||
"Retrievers must implement abstract `_aget_relevant_documents` method"
|
||||
" instead of `aget_relevant_documents`",
|
||||
DeprecationWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
aswap = cls.aget_relevant_documents
|
||||
cls.aget_relevant_documents = ( # type: ignore[method-assign]
|
||||
BaseRetriever.aget_relevant_documents
|
||||
)
|
||||
cls._aget_relevant_documents = aswap # type: ignore[method-assign]
|
||||
parameters = signature(cls._get_relevant_documents).parameters
|
||||
cls._new_arg_supported = parameters.get("run_manager") is not None
|
||||
if (
|
||||
@@ -207,7 +175,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Args:
|
||||
input: The query string.
|
||||
config: Configuration for the retriever. Defaults to `None`.
|
||||
config: Configuration for the retriever.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
@@ -268,7 +236,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Args:
|
||||
input: The query string.
|
||||
config: Configuration for the retriever. Defaults to `None`.
|
||||
config: Configuration for the retriever.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
@@ -348,91 +316,3 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
query,
|
||||
run_manager=run_manager.get_sync(),
|
||||
)
|
||||
|
||||
@deprecated(since="0.1.46", alternative="invoke", removal="1.0")
|
||||
def get_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
callbacks: Callbacks = None,
|
||||
tags: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
run_name: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> list[Document]:
|
||||
"""Retrieve documents relevant to a query.
|
||||
|
||||
Users should favor using `.invoke` or `.batch` rather than
|
||||
`get_relevant_documents directly`.
|
||||
|
||||
Args:
|
||||
query: string to find relevant documents for.
|
||||
callbacks: Callback manager or list of callbacks. Defaults to `None`.
|
||||
tags: Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
metadata: Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
run_name: Optional name for the run. Defaults to `None`.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
"""
|
||||
config: RunnableConfig = {}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
if tags:
|
||||
config["tags"] = tags
|
||||
if metadata:
|
||||
config["metadata"] = metadata
|
||||
if run_name:
|
||||
config["run_name"] = run_name
|
||||
return self.invoke(query, config, **kwargs)
|
||||
|
||||
@deprecated(since="0.1.46", alternative="ainvoke", removal="1.0")
|
||||
async def aget_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
callbacks: Callbacks = None,
|
||||
tags: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
run_name: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> list[Document]:
|
||||
"""Asynchronously get documents relevant to a query.
|
||||
|
||||
Users should favor using `.ainvoke` or `.abatch` rather than
|
||||
`aget_relevant_documents directly`.
|
||||
|
||||
Args:
|
||||
query: string to find relevant documents for.
|
||||
callbacks: Callback manager or list of callbacks.
|
||||
tags: Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
metadata: Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
run_name: Optional name for the run. Defaults to `None`.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
"""
|
||||
config: RunnableConfig = {}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
if tags:
|
||||
config["tags"] = tags
|
||||
if metadata:
|
||||
config["metadata"] = metadata
|
||||
if run_name:
|
||||
config["run_name"] = run_name
|
||||
return await self.ainvoke(query, config, **kwargs)
|
||||
|
||||
@@ -304,7 +304,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
TypeError: If the input type cannot be inferred.
|
||||
"""
|
||||
# First loop through all parent classes and if any of them is
|
||||
# a pydantic model, we will pick up the generic parameterization
|
||||
# a Pydantic model, we will pick up the generic parameterization
|
||||
# from that model via the __pydantic_generic_metadata__ attribute.
|
||||
for base in self.__class__.mro():
|
||||
if hasattr(base, "__pydantic_generic_metadata__"):
|
||||
@@ -312,7 +312,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
if "args" in metadata and len(metadata["args"]) == 2:
|
||||
return metadata["args"][0]
|
||||
|
||||
# If we didn't find a pydantic model in the parent classes,
|
||||
# If we didn't find a Pydantic model in the parent classes,
|
||||
# then loop through __orig_bases__. This corresponds to
|
||||
# Runnables that are not pydantic models.
|
||||
for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined]
|
||||
@@ -390,7 +390,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
self.get_name("Input"),
|
||||
root=root_type,
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -433,7 +433,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
def output_schema(self) -> type[BaseModel]:
|
||||
"""Output schema.
|
||||
|
||||
The type of output this `Runnable` produces specified as a pydantic model.
|
||||
The type of output this `Runnable` produces specified as a Pydantic model.
|
||||
"""
|
||||
return self.get_output_schema()
|
||||
|
||||
@@ -468,7 +468,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
self.get_name("Output"),
|
||||
root=root_type,
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -776,11 +776,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
||||
+ "{question}"
|
||||
)
|
||||
llm = FakeStreamingListLLM(responses=["foo-lish"])
|
||||
model = FakeStreamingListLLM(responses=["foo-lish"])
|
||||
|
||||
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
|
||||
chain: Runnable = prompt | model | {"str": StrOutputParser()}
|
||||
|
||||
chain_with_assign = chain.assign(hello=itemgetter("str") | llm)
|
||||
chain_with_assign = chain.assign(hello=itemgetter("str") | model)
|
||||
|
||||
print(chain_with_assign.input_schema.model_json_schema())
|
||||
# {'title': 'PromptInput', 'type': 'object', 'properties':
|
||||
@@ -821,7 +821,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
|
||||
Returns:
|
||||
The output of the `Runnable`.
|
||||
@@ -841,7 +841,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
|
||||
Returns:
|
||||
The output of the `Runnable`.
|
||||
@@ -860,7 +860,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
The default implementation of batch works well for IO bound runnables.
|
||||
|
||||
Subclasses should override this method if they can batch more efficiently;
|
||||
Subclasses must override this method if they can batch more efficiently;
|
||||
e.g., if the underlying `Runnable` uses an API which supports a batch mode.
|
||||
|
||||
Args:
|
||||
@@ -869,9 +869,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work
|
||||
to do in parallel, and other keys. Please refer to the
|
||||
`RunnableConfig` for more details. Defaults to `None`.
|
||||
`RunnableConfig` for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Returns:
|
||||
@@ -936,9 +935,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -994,7 +992,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
The default implementation of `batch` works well for IO bound runnables.
|
||||
|
||||
Subclasses should override this method if they can batch more efficiently;
|
||||
Subclasses must override this method if they can batch more efficiently;
|
||||
e.g., if the underlying `Runnable` uses an API which supports a batch mode.
|
||||
|
||||
Args:
|
||||
@@ -1003,9 +1001,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Returns:
|
||||
@@ -1067,9 +1064,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1116,11 +1112,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
) -> Iterator[Output]:
|
||||
"""Default implementation of `stream`, which calls `invoke`.
|
||||
|
||||
Subclasses should override this method if they support streaming output.
|
||||
Subclasses must override this method if they support streaming output.
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1137,11 +1133,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
) -> AsyncIterator[Output]:
|
||||
"""Default implementation of `astream`, which calls `ainvoke`.
|
||||
|
||||
Subclasses should override this method if they support streaming output.
|
||||
Subclasses must override this method if they support streaming output.
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1273,22 +1269,20 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
A `StreamEvent` is a dictionary with the following schema:
|
||||
|
||||
- `event`: **str** - Event names are of the format:
|
||||
- `event`: Event names are of the format:
|
||||
`on_[runnable_type]_(start|stream|end)`.
|
||||
- `name`: **str** - The name of the `Runnable` that generated the event.
|
||||
- `run_id`: **str** - randomly generated ID associated with the given
|
||||
execution of the `Runnable` that emitted the event. A child `Runnable` that gets
|
||||
invoked as part of the execution of a parent `Runnable` is assigned its own
|
||||
unique ID.
|
||||
- `parent_ids`: **list[str]** - The IDs of the parent runnables that generated
|
||||
the event. The root `Runnable` will have an empty list. The order of the parent
|
||||
IDs is from the root to the immediate parent. Only available for v2 version of
|
||||
the API. The v1 version of the API will return an empty list.
|
||||
- `tags`: **list[str] | None** - The tags of the `Runnable` that generated
|
||||
the event.
|
||||
- `metadata`: **dict[str, Any] | None** - The metadata of the `Runnable` that
|
||||
generated the event.
|
||||
- `data`: **dict[str, Any]**
|
||||
- `name`: The name of the `Runnable` that generated the event.
|
||||
- `run_id`: Randomly generated ID associated with the given execution of the
|
||||
`Runnable` that emitted the event. A child `Runnable` that gets invoked as
|
||||
part of the execution of a parent `Runnable` is assigned its own unique ID.
|
||||
- `parent_ids`: The IDs of the parent runnables that generated the event. The
|
||||
root `Runnable` will have an empty list. The order of the parent IDs is from
|
||||
the root to the immediate parent. Only available for v2 version of the API.
|
||||
The v1 version of the API will return an empty list.
|
||||
- `tags`: The tags of the `Runnable` that generated the event.
|
||||
- `metadata`: The metadata of the `Runnable` that generated the event.
|
||||
- `data`: The data associated with the event. The contents of this field
|
||||
depend on the type of event. See the table below for more details.
|
||||
|
||||
Below is a table that illustrates some events that might be emitted by various
|
||||
chains. Metadata fields have been omitted from the table for brevity.
|
||||
@@ -1297,39 +1291,23 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
!!! note
|
||||
This reference table is for the v2 version of the schema.
|
||||
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| event | name | chunk | input | output |
|
||||
+==========================+==================+=====================================+===================================================+=====================================================+
|
||||
| `on_chat_model_start` | [model name] | | `{"messages": [[SystemMessage, HumanMessage]]}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chat_model_stream` | [model name] | `AIMessageChunk(content="hello")` | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chat_model_end` | [model name] | | `{"messages": [[SystemMessage, HumanMessage]]}` | `AIMessageChunk(content="hello world")` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_llm_start` | [model name] | | `{'input': 'hello'}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_llm_stream` | [model name] | `'Hello' ` | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_llm_end` | [model name] | | `'Hello human!'` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chain_start` | format_docs | | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chain_stream` | format_docs | `'hello world!, goodbye world!'` | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chain_end` | format_docs | | `[Document(...)]` | `'hello world!, goodbye world!'` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_tool_start` | some_tool | | `{"x": 1, "y": "2"}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_tool_end` | some_tool | | | `{"x": 1, "y": "2"}` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_retriever_start` | [retriever name] | | `{"query": "hello"}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_retriever_end` | [retriever name] | | `{"query": "hello"}` | `[Document(...), ..]` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_prompt_start` | [template_name] | | `{"question": "hello"}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_prompt_end` | [template_name] | | `{"question": "hello"}` | `ChatPromptValue(messages: [SystemMessage, ...])` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| event | name | chunk | input | output |
|
||||
| ---------------------- | -------------------- | ----------------------------------- | ------------------------------------------------- | --------------------------------------------------- |
|
||||
| `on_chat_model_start` | `'[model name]'` | | `{"messages": [[SystemMessage, HumanMessage]]}` | |
|
||||
| `on_chat_model_stream` | `'[model name]'` | `AIMessageChunk(content="hello")` | | |
|
||||
| `on_chat_model_end` | `'[model name]'` | | `{"messages": [[SystemMessage, HumanMessage]]}` | `AIMessageChunk(content="hello world")` |
|
||||
| `on_llm_start` | `'[model name]'` | | `{'input': 'hello'}` | |
|
||||
| `on_llm_stream` | `'[model name]'` | `'Hello' ` | | |
|
||||
| `on_llm_end` | `'[model name]'` | | `'Hello human!'` | |
|
||||
| `on_chain_start` | `'format_docs'` | | | |
|
||||
| `on_chain_stream` | `'format_docs'` | `'hello world!, goodbye world!'` | | |
|
||||
| `on_chain_end` | `'format_docs'` | | `[Document(...)]` | `'hello world!, goodbye world!'` |
|
||||
| `on_tool_start` | `'some_tool'` | | `{"x": 1, "y": "2"}` | |
|
||||
| `on_tool_end` | `'some_tool'` | | | `{"x": 1, "y": "2"}` |
|
||||
| `on_retriever_start` | `'[retriever name]'` | | `{"query": "hello"}` | |
|
||||
| `on_retriever_end` | `'[retriever name]'` | | `{"query": "hello"}` | `[Document(...), ..]` |
|
||||
| `on_prompt_start` | `'[template_name]'` | | `{"question": "hello"}` | |
|
||||
| `on_prompt_end` | `'[template_name]'` | | `{"question": "hello"}` | `ChatPromptValue(messages: [SystemMessage, ...])` |
|
||||
|
||||
In addition to the standard events, users can also dispatch custom events (see example below).
|
||||
|
||||
@@ -1337,13 +1315,10 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
A custom event has following format:
|
||||
|
||||
+-----------+------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Attribute | Type | Description |
|
||||
+===========+======+===========================================================================================================+
|
||||
| name | str | A user defined name for the event. |
|
||||
+-----------+------+-----------------------------------------------------------------------------------------------------------+
|
||||
| data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |
|
||||
+-----------+------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Attribute | Type | Description |
|
||||
| ----------- | ------ | --------------------------------------------------------------------------------------------------------- |
|
||||
| `name` | `str` | A user defined name for the event. |
|
||||
| `data` | `Any` | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |
|
||||
|
||||
Here are declarations associated with the standard events shown above:
|
||||
|
||||
@@ -1378,7 +1353,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
).with_config({"run_name": "my_template", "tags": ["my_template"]})
|
||||
```
|
||||
|
||||
Example:
|
||||
For instance:
|
||||
|
||||
```python
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
|
||||
@@ -1391,8 +1367,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
events = [event async for event in chain.astream_events("hello", version="v2")]
|
||||
|
||||
# will produce the following events (run_id, and parent_ids
|
||||
# has been omitted for brevity):
|
||||
# Will produce the following events
|
||||
# (run_id, and parent_ids has been omitted for brevity):
|
||||
[
|
||||
{
|
||||
"data": {"input": "hello"},
|
||||
@@ -1447,7 +1423,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
async for event in slow_thing.astream_events("some_input", version="v2"):
|
||||
print(event)
|
||||
``
|
||||
```
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
@@ -1521,12 +1497,12 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Default implementation of transform, which buffers input and calls `astream`.
|
||||
|
||||
Subclasses should override this method if they can start producing output while
|
||||
Subclasses must override this method if they can start producing output while
|
||||
input is still being generated.
|
||||
|
||||
Args:
|
||||
input: An iterator of inputs to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1566,12 +1542,12 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Default implementation of atransform, which buffers input and calls `astream`.
|
||||
|
||||
Subclasses should override this method if they can start producing output while
|
||||
Subclasses must override this method if they can start producing output while
|
||||
input is still being generated.
|
||||
|
||||
Args:
|
||||
input: An async iterator of inputs to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1619,16 +1595,16 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
from langchain_ollama import ChatOllama
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
|
||||
llm = ChatOllama(model="llama3.1")
|
||||
model = ChatOllama(model="llama3.1")
|
||||
|
||||
# Without bind
|
||||
chain = llm | StrOutputParser()
|
||||
chain = model | StrOutputParser()
|
||||
|
||||
chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
|
||||
# Output is 'One two three four five.'
|
||||
|
||||
# With bind
|
||||
chain = llm.bind(stop=["three"]) | StrOutputParser()
|
||||
chain = model.bind(stop=["three"]) | StrOutputParser()
|
||||
|
||||
chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
|
||||
# Output is 'One two'
|
||||
@@ -1682,11 +1658,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called before the `Runnable` starts running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_end: Called after the `Runnable` finishes running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_error: Called if the `Runnable` throws an error, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -1750,11 +1726,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called asynchronously before the `Runnable` starts running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_end: Called asynchronously after the `Runnable` finishes running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_error: Called asynchronously if the `Runnable` throws an error,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -1833,11 +1809,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
"""Bind input and output types to a `Runnable`, returning a new `Runnable`.
|
||||
|
||||
Args:
|
||||
input_type: The input type to bind to the `Runnable`. Defaults to `None`.
|
||||
output_type: The output type to bind to the `Runnable`. Defaults to `None`.
|
||||
input_type: The input type to bind to the `Runnable`.
|
||||
output_type: The output type to bind to the `Runnable`.
|
||||
|
||||
Returns:
|
||||
A new Runnable with the types bound.
|
||||
A new `Runnable` with the types bound.
|
||||
"""
|
||||
return RunnableBinding(
|
||||
bound=self,
|
||||
@@ -1858,14 +1834,13 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
retry_if_exception_type: A tuple of exception types to retry on.
|
||||
Defaults to (Exception,).
|
||||
wait_exponential_jitter: Whether to add jitter to the wait
|
||||
time between retries. Defaults to `True`.
|
||||
time between retries.
|
||||
stop_after_attempt: The maximum number of attempts to make before
|
||||
giving up. Defaults to 3.
|
||||
giving up.
|
||||
exponential_jitter_params: Parameters for
|
||||
`tenacity.wait_exponential_jitter`. Namely: `initial`, `max`,
|
||||
`exp_base`, and `jitter` (all float values).
|
||||
`exp_base`, and `jitter` (all `float` values).
|
||||
|
||||
Returns:
|
||||
A new Runnable that retries the original Runnable on exceptions.
|
||||
@@ -1950,16 +1925,15 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
fallbacks: A sequence of runnables to try if the original `Runnable`
|
||||
fails.
|
||||
exceptions_to_handle: A tuple of exception types to handle.
|
||||
Defaults to `(Exception,)`.
|
||||
exception_key: If string is specified then handled exceptions will be passed
|
||||
to fallbacks as part of the input under the specified key.
|
||||
exception_key: If `string` is specified then handled exceptions will be
|
||||
passed to fallbacks as part of the input under the specified key.
|
||||
If `None`, exceptions will not be passed to fallbacks.
|
||||
If used, the base `Runnable` and its fallbacks must accept a
|
||||
dictionary as input. Defaults to `None`.
|
||||
dictionary as input.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` that will try the original `Runnable`, and then each
|
||||
Fallback in order, upon failures.
|
||||
Fallback in order, upon failures.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -1987,16 +1961,15 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
fallbacks: A sequence of runnables to try if the original `Runnable`
|
||||
fails.
|
||||
exceptions_to_handle: A tuple of exception types to handle.
|
||||
exception_key: If string is specified then handled exceptions will be passed
|
||||
to fallbacks as part of the input under the specified key.
|
||||
exception_key: If `string` is specified then handled exceptions will be
|
||||
passed to fallbacks as part of the input under the specified key.
|
||||
If `None`, exceptions will not be passed to fallbacks.
|
||||
If used, the base `Runnable` and its fallbacks must accept a
|
||||
dictionary as input.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` that will try the original `Runnable`, and then each
|
||||
Fallback in order, upon failures.
|
||||
|
||||
Fallback in order, upon failures.
|
||||
"""
|
||||
# Import locally to prevent circular import
|
||||
from langchain_core.runnables.fallbacks import ( # noqa: PLC0415
|
||||
@@ -2462,10 +2435,10 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
pass `arg_types` to just specify the required arguments and their types.
|
||||
|
||||
Args:
|
||||
args_schema: The schema for the tool. Defaults to `None`.
|
||||
name: The name of the tool. Defaults to `None`.
|
||||
description: The description of the tool. Defaults to `None`.
|
||||
arg_types: A dictionary of argument names to types. Defaults to `None`.
|
||||
args_schema: The schema for the tool.
|
||||
name: The name of the tool.
|
||||
description: The description of the tool.
|
||||
arg_types: A dictionary of argument names to types.
|
||||
|
||||
Returns:
|
||||
A `BaseTool` instance.
|
||||
@@ -2546,9 +2519,6 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
as_tool = runnable.as_tool()
|
||||
as_tool.invoke("b")
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.14"
|
||||
|
||||
"""
|
||||
# Avoid circular import
|
||||
from langchain_core.tools import convert_runnable_to_tool # noqa: PLC0415
|
||||
@@ -2654,9 +2624,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
|
||||
which: The `ConfigurableField` instance that will be used to select the
|
||||
alternative.
|
||||
default_key: The default key to use if no alternative is selected.
|
||||
Defaults to `'default'`.
|
||||
prefix_keys: Whether to prefix the keys with the `ConfigurableField` id.
|
||||
Defaults to `False`.
|
||||
**kwargs: A dictionary of keys to `Runnable` instances or callables that
|
||||
return `Runnable` instances.
|
||||
|
||||
@@ -2888,10 +2856,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
steps: The steps to include in the sequence.
|
||||
name: The name of the `Runnable`. Defaults to `None`.
|
||||
first: The first `Runnable` in the sequence. Defaults to `None`.
|
||||
middle: The middle `Runnable` objects in the sequence. Defaults to `None`.
|
||||
last: The last Runnable in the sequence. Defaults to `None`.
|
||||
name: The name of the `Runnable`.
|
||||
first: The first `Runnable` in the sequence.
|
||||
middle: The middle `Runnable` objects in the sequence.
|
||||
last: The last Runnable in the sequence.
|
||||
|
||||
Raises:
|
||||
ValueError: If the sequence has less than 2 steps.
|
||||
@@ -2917,7 +2885,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -2960,7 +2928,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
"""Get the input schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The input schema of the `Runnable`.
|
||||
@@ -2975,7 +2943,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
"""Get the output schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The output schema of the `Runnable`.
|
||||
@@ -3002,7 +2970,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
"""Get the graph representation of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The graph representation of the `Runnable`.
|
||||
@@ -3629,7 +3597,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Create a `RunnableParallel`.
|
||||
|
||||
Args:
|
||||
steps__: The steps to include. Defaults to `None`.
|
||||
steps__: The steps to include.
|
||||
**kwargs: Additional steps to include.
|
||||
|
||||
"""
|
||||
@@ -3648,7 +3616,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -3664,8 +3632,8 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the name of the `Runnable`.
|
||||
|
||||
Args:
|
||||
suffix: The suffix to use. Defaults to `None`.
|
||||
name: The name to use. Defaults to `None`.
|
||||
suffix: The suffix to use.
|
||||
name: The name to use.
|
||||
|
||||
Returns:
|
||||
The name of the `Runnable`.
|
||||
@@ -3689,7 +3657,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the input schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The input schema of the `Runnable`.
|
||||
@@ -3720,7 +3688,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the output schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The output schema of the `Runnable`.
|
||||
@@ -3747,7 +3715,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the graph representation of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The graph representation of the `Runnable`.
|
||||
@@ -4157,8 +4125,8 @@ class RunnableGenerator(Runnable[Input, Output]):
|
||||
|
||||
Args:
|
||||
transform: The transform function.
|
||||
atransform: The async transform function. Defaults to `None`.
|
||||
name: The name of the `Runnable`. Defaults to `None`.
|
||||
atransform: The async transform function.
|
||||
name: The name of the `Runnable`.
|
||||
|
||||
Raises:
|
||||
TypeError: If the transform is not a generator function.
|
||||
@@ -4435,8 +4403,8 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
Args:
|
||||
func: Either sync or async callable
|
||||
afunc: An async callable that takes an input and returns an output.
|
||||
Defaults to `None`.
|
||||
name: The name of the `Runnable`. Defaults to `None`.
|
||||
|
||||
name: The name of the `Runnable`.
|
||||
|
||||
Raises:
|
||||
TypeError: If the `func` is not a callable type.
|
||||
@@ -4493,10 +4461,10 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
|
||||
@override
|
||||
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
|
||||
"""The pydantic schema for the input to this `Runnable`.
|
||||
"""The Pydantic schema for the input to this `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The input schema for this `Runnable`.
|
||||
@@ -4830,7 +4798,7 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to this `Runnable`.
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -4861,7 +4829,7 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to this `Runnable`.
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -5127,7 +5095,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
|
||||
None,
|
||||
),
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -5150,7 +5118,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
|
||||
self.get_name("Output"),
|
||||
root=list[schema], # type: ignore[valid-type]
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -5177,7 +5145,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -5303,11 +5271,11 @@ class RunnableEach(RunnableEachBase[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called before the `Runnable` starts running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_end: Called after the `Runnable` finishes running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_error: Called if the `Runnable` throws an error, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -5336,11 +5304,11 @@ class RunnableEach(RunnableEachBase[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called asynchronously before the `Runnable` starts running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_end: Called asynchronously after the `Runnable` finishes running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_error: Called asynchronously if the `Runnable` throws an error,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -5387,13 +5355,13 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
|
||||
custom_input_type: Any | None = None
|
||||
"""Override the input type of the underlying `Runnable` with a custom type.
|
||||
|
||||
The type can be a pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
The type can be a Pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
"""
|
||||
# Union[Type[Output], BaseModel] + things like list[str]
|
||||
custom_output_type: Any | None = None
|
||||
"""Override the output type of the underlying `Runnable` with a custom type.
|
||||
|
||||
The type can be a pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
The type can be a Pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
@@ -5420,16 +5388,16 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
|
||||
kwargs: optional kwargs to pass to the underlying `Runnable`, when running
|
||||
the underlying `Runnable` (e.g., via `invoke`, `batch`,
|
||||
`transform`, or `stream` or async variants)
|
||||
Defaults to `None`.
|
||||
|
||||
config: optional config to bind to the underlying `Runnable`.
|
||||
Defaults to `None`.
|
||||
|
||||
config_factories: optional list of config factories to apply to the
|
||||
config before binding to the underlying `Runnable`.
|
||||
Defaults to `None`.
|
||||
|
||||
custom_input_type: Specify to override the input type of the underlying
|
||||
`Runnable` with a custom type. Defaults to `None`.
|
||||
`Runnable` with a custom type.
|
||||
custom_output_type: Specify to override the output type of the underlying
|
||||
`Runnable` with a custom type. Defaults to `None`.
|
||||
`Runnable` with a custom type.
|
||||
**other_kwargs: Unpacked into the base class.
|
||||
"""
|
||||
super().__init__(
|
||||
@@ -5500,7 +5468,7 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -5782,7 +5750,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
|
||||
`bind`: Bind kwargs to pass to the underlying `Runnable` when running it.
|
||||
|
||||
```python
|
||||
# Create a Runnable binding that invokes the ChatModel with the
|
||||
# Create a Runnable binding that invokes the chat model with the
|
||||
# additional kwarg `stop=['-']` when running it.
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
|
||||
@@ -5866,11 +5834,11 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
|
||||
|
||||
Args:
|
||||
on_start: Called before the `Runnable` starts running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_end: Called after the `Runnable` finishes running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_error: Called if the `Runnable` throws an error, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -6077,10 +6045,10 @@ def chain(
|
||||
@chain
|
||||
def my_func(fields):
|
||||
prompt = PromptTemplate("Hello, {name}!")
|
||||
llm = OpenAI()
|
||||
model = OpenAI()
|
||||
formatted = prompt.invoke(**fields)
|
||||
|
||||
for chunk in llm.stream(formatted):
|
||||
for chunk in model.stream(formatted):
|
||||
yield chunk
|
||||
```
|
||||
"""
|
||||
|
||||
@@ -40,13 +40,13 @@ from langchain_core.runnables.utils import (
|
||||
class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
"""Runnable that selects which branch to run based on a condition.
|
||||
|
||||
The Runnable is initialized with a list of (condition, Runnable) pairs and
|
||||
The Runnable is initialized with a list of `(condition, Runnable)` pairs and
|
||||
a default branch.
|
||||
|
||||
When operating on an input, the first condition that evaluates to True is
|
||||
selected, and the corresponding Runnable is run on the input.
|
||||
selected, and the corresponding `Runnable` is run on the input.
|
||||
|
||||
If no condition evaluates to True, the default branch is run on the input.
|
||||
If no condition evaluates to `True`, the default branch is run on the input.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
@@ -65,9 +65,9 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
"""
|
||||
|
||||
branches: Sequence[tuple[Runnable[Input, bool], Runnable[Input, Output]]]
|
||||
"""A list of (condition, Runnable) pairs."""
|
||||
"""A list of `(condition, Runnable)` pairs."""
|
||||
default: Runnable[Input, Output]
|
||||
"""A Runnable to run if no condition is met."""
|
||||
"""A `Runnable` to run if no condition is met."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -79,15 +79,15 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
]
|
||||
| RunnableLike,
|
||||
) -> None:
|
||||
"""A Runnable that runs one of two branches based on a condition.
|
||||
"""A `Runnable` that runs one of two branches based on a condition.
|
||||
|
||||
Args:
|
||||
*branches: A list of (condition, Runnable) pairs.
|
||||
Defaults a Runnable to run if no condition is met.
|
||||
*branches: A list of `(condition, Runnable)` pairs.
|
||||
Defaults a `Runnable` to run if no condition is met.
|
||||
|
||||
Raises:
|
||||
ValueError: If the number of branches is less than 2.
|
||||
TypeError: If the default branch is not Runnable, Callable or Mapping.
|
||||
TypeError: If the default branch is not `Runnable`, `Callable` or `Mapping`.
|
||||
TypeError: If a branch is not a tuple or list.
|
||||
ValueError: If a branch is not of length 2.
|
||||
"""
|
||||
@@ -146,7 +146,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -191,7 +191,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Returns:
|
||||
@@ -301,7 +301,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Yields:
|
||||
@@ -385,7 +385,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Yields:
|
||||
|
||||
@@ -75,26 +75,26 @@ class RunnableConfig(TypedDict, total=False):
|
||||
max_concurrency: int | None
|
||||
"""
|
||||
Maximum number of parallel calls to make. If not provided, defaults to
|
||||
ThreadPoolExecutor's default.
|
||||
`ThreadPoolExecutor`'s default.
|
||||
"""
|
||||
|
||||
recursion_limit: int
|
||||
"""
|
||||
Maximum number of times a call can recurse. If not provided, defaults to 25.
|
||||
Maximum number of times a call can recurse. If not provided, defaults to `25`.
|
||||
"""
|
||||
|
||||
configurable: dict[str, Any]
|
||||
"""
|
||||
Runtime values for attributes previously made configurable on this Runnable,
|
||||
or sub-Runnables, through .configurable_fields() or .configurable_alternatives().
|
||||
Check .output_schema() for a description of the attributes that have been made
|
||||
Runtime values for attributes previously made configurable on this `Runnable`,
|
||||
or sub-Runnables, through `configurable_fields` or `configurable_alternatives`.
|
||||
Check `output_schema` for a description of the attributes that have been made
|
||||
configurable.
|
||||
"""
|
||||
|
||||
run_id: uuid.UUID | None
|
||||
"""
|
||||
Unique identifier for the tracer run for this call. If not provided, a new UUID
|
||||
will be generated.
|
||||
will be generated.
|
||||
"""
|
||||
|
||||
|
||||
@@ -193,7 +193,7 @@ def ensure_config(config: RunnableConfig | None = None) -> RunnableConfig:
|
||||
"""Ensure that a config is a dict with all keys present.
|
||||
|
||||
Args:
|
||||
config: The config to ensure. Defaults to `None`.
|
||||
config: The config to ensure.
|
||||
|
||||
Returns:
|
||||
The ensured config.
|
||||
@@ -412,7 +412,7 @@ def call_func_with_variable_args(
|
||||
func: The function to call.
|
||||
input: The input to the function.
|
||||
config: The config to pass to the function.
|
||||
run_manager: The run manager to pass to the function. Defaults to `None`.
|
||||
run_manager: The run manager to pass to the function.
|
||||
**kwargs: The keyword arguments to pass to the function.
|
||||
|
||||
Returns:
|
||||
@@ -446,7 +446,7 @@ def acall_func_with_variable_args(
|
||||
func: The function to call.
|
||||
input: The input to the function.
|
||||
config: The config to pass to the function.
|
||||
run_manager: The run manager to pass to the function. Defaults to `None`.
|
||||
run_manager: The run manager to pass to the function.
|
||||
**kwargs: The keyword arguments to pass to the function.
|
||||
|
||||
Returns:
|
||||
@@ -527,16 +527,15 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
self,
|
||||
fn: Callable[..., T],
|
||||
*iterables: Iterable[Any],
|
||||
timeout: float | None = None,
|
||||
chunksize: int = 1,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[T]:
|
||||
"""Map a function to multiple iterables.
|
||||
|
||||
Args:
|
||||
fn: The function to map.
|
||||
*iterables: The iterables to map over.
|
||||
timeout: The timeout for the map. Defaults to `None`.
|
||||
chunksize: The chunksize for the map. Defaults to 1.
|
||||
timeout: The timeout for the map.
|
||||
chunksize: The chunksize for the map.
|
||||
|
||||
Returns:
|
||||
The iterator for the mapped function.
|
||||
@@ -549,8 +548,7 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
return super().map(
|
||||
_wrapped_fn,
|
||||
*iterables,
|
||||
timeout=timeout,
|
||||
chunksize=chunksize,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -123,7 +123,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
||||
"""Prepare the Runnable for invocation.
|
||||
|
||||
Args:
|
||||
config: The configuration to use. Defaults to `None`.
|
||||
config: The configuration to use.
|
||||
|
||||
Returns:
|
||||
The prepared Runnable and configuration.
|
||||
@@ -540,7 +540,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
||||
"""The alternatives to choose from."""
|
||||
|
||||
default_key: str = "default"
|
||||
"""The enum value to use for the default option. Defaults to `'default'`."""
|
||||
"""The enum value to use for the default option."""
|
||||
|
||||
prefix_keys: bool
|
||||
"""Whether to prefix configurable fields of each alternative with a namespace
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user