Compare commits

..

2 Commits

Author SHA1 Message Date
Eugene Yurtsev
c21b43fb4e x 2025-10-09 17:01:09 -04:00
Eugene Yurtsev
05eed19605 x 2025-10-09 16:44:57 -04:00
706 changed files with 32116 additions and 63272 deletions

View File

@@ -0,0 +1,18 @@
{
"permissions": {
"allow": [
"Bash(uv run:*)",
"Bash(make:*)",
"WebSearch",
"WebFetch(domain:ai.pydantic.dev)",
"WebFetch(domain:openai.github.io)",
"Bash(uv run:*)",
"Bash(python3:*)",
"WebFetch(domain:github.com)",
"Bash(gh pr view:*)",
"Bash(gh pr diff:*)"
],
"deny": [],
"ask": []
}
}

View File

@@ -2,7 +2,7 @@ blank_issues_enabled: false
version: 2.1
contact_links:
- name: 📚 Documentation
url: https://github.com/langchain-ai/docs/issues/new?template=01-langchain.yml
url: https://github.com/langchain-ai/docs/issues/new?template=langchain.yml
about: Report an issue related to the LangChain documentation
- name: 💬 LangChain Forum
url: https://forum.langchain.com/

93
.github/actions/poetry_setup/action.yml vendored Normal file
View File

@@ -0,0 +1,93 @@
# An action for setting up poetry install with caching.
# Using a custom action since the default action does not
# take poetry install groups into account.
# Action code from:
# https://github.com/actions/setup-python/issues/505#issuecomment-1273013236
name: poetry-install-with-caching
description: Poetry install with support for caching of dependency groups.
inputs:
python-version:
description: Python version, supporting MAJOR.MINOR only
required: true
poetry-version:
description: Poetry version
required: true
cache-key:
description: Cache key to use for manual handling of caching
required: true
working-directory:
description: Directory whose poetry.lock file should be cached
required: true
runs:
using: composite
steps:
- uses: actions/setup-python@v5
name: Setup python ${{ inputs.python-version }}
id: setup-python
with:
python-version: ${{ inputs.python-version }}
- uses: actions/cache@v4
id: cache-bin-poetry
name: Cache Poetry binary - Python ${{ inputs.python-version }}
env:
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
with:
path: |
/opt/pipx/venvs/poetry
# This step caches the poetry installation, so make sure it's keyed on the poetry version as well.
key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }}
- name: Refresh shell hashtable and fixup softlinks
if: steps.cache-bin-poetry.outputs.cache-hit == 'true'
shell: bash
env:
POETRY_VERSION: ${{ inputs.poetry-version }}
PYTHON_VERSION: ${{ inputs.python-version }}
run: |
set -eux
# Refresh the shell hashtable, to ensure correct `which` output.
hash -r
# `actions/cache@v3` doesn't always seem able to correctly unpack softlinks.
# Delete and recreate the softlinks pipx expects to have.
rm /opt/pipx/venvs/poetry/bin/python
cd /opt/pipx/venvs/poetry/bin
ln -s "$(which "python$PYTHON_VERSION")" python
chmod +x python
cd /opt/pipx_bin/
ln -s /opt/pipx/venvs/poetry/bin/poetry poetry
chmod +x poetry
# Ensure everything got set up correctly.
/opt/pipx/venvs/poetry/bin/python --version
/opt/pipx_bin/poetry --version
- name: Install poetry
if: steps.cache-bin-poetry.outputs.cache-hit != 'true'
shell: bash
env:
POETRY_VERSION: ${{ inputs.poetry-version }}
PYTHON_VERSION: ${{ inputs.python-version }}
# Install poetry using the python version installed by setup-python step.
run: pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
- name: Restore pip and poetry cached dependencies
uses: actions/cache@v4
env:
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
with:
path: |
~/.cache/pip
~/.cache/pypoetry/virtualenvs
~/.cache/pypoetry/cache
~/.cache/pypoetry/artifacts
${{ env.WORKDIR }}/.venv
key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/**/poetry.lock', env.WORKDIR)) }}

View File

@@ -7,12 +7,13 @@ core:
- any-glob-to-any-file:
- "libs/core/**/*"
langchain-classic:
langchain:
- changed-files:
- any-glob-to-any-file:
- "libs/langchain/**/*"
- "libs/langchain_v1/**/*"
langchain:
v1:
- changed-files:
- any-glob-to-any-file:
- "libs/langchain_v1/**/*"
@@ -27,11 +28,6 @@ standard-tests:
- any-glob-to-any-file:
- "libs/standard-tests/**/*"
model-profiles:
- changed-files:
- any-glob-to-any-file:
- "libs/model-profiles/**/*"
text-splitters:
- changed-files:
- any-glob-to-any-file:
@@ -43,81 +39,6 @@ integration:
- any-glob-to-any-file:
- "libs/partners/**/*"
anthropic:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/anthropic/**/*"
chroma:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/chroma/**/*"
deepseek:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/deepseek/**/*"
exa:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/exa/**/*"
fireworks:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/fireworks/**/*"
groq:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/groq/**/*"
huggingface:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/huggingface/**/*"
mistralai:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/mistralai/**/*"
nomic:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/nomic/**/*"
ollama:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/ollama/**/*"
openai:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/openai/**/*"
perplexity:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/perplexity/**/*"
prompty:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/prompty/**/*"
qdrant:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/qdrant/**/*"
xai:
- changed-files:
- any-glob-to-any-file:
- "libs/partners/xai/**/*"
# Infrastructure and DevOps
infra:
- changed-files:

41
.github/pr-title-labeler.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
# PR title labeler config
#
# Labels PRs based on conventional commit patterns in titles
#
# Format: type(scope): description or type!: description (breaking)
add-missing-labels: true
clear-prexisting: false
include-commits: false
include-title: true
label-for-breaking-changes: breaking
label-mapping:
documentation: ["docs"]
feature: ["feat"]
fix: ["fix"]
infra: ["build", "ci", "chore"]
integration:
[
"anthropic",
"chroma",
"deepseek",
"exa",
"fireworks",
"groq",
"huggingface",
"mistralai",
"nomic",
"ollama",
"openai",
"perplexity",
"prompty",
"qdrant",
"xai",
]
linting: ["style"]
performance: ["perf"]
refactor: ["refactor"]
release: ["release"]
revert: ["revert"]
tests: ["test"]

View File

@@ -30,7 +30,6 @@ LANGCHAIN_DIRS = [
"libs/text-splitters",
"libs/langchain",
"libs/langchain_v1",
"libs/model-profiles",
]
# When set to True, we are ignoring core dependents
@@ -131,20 +130,29 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
return _get_pydantic_test_configs(dir_)
if job == "codspeed":
py_versions = ["3.13"]
py_versions = ["3.12"] # 3.13 is not yet supported
elif dir_ == "libs/core":
py_versions = ["3.10", "3.11", "3.12", "3.13", "3.14"]
py_versions = ["3.10", "3.11", "3.12", "3.13"]
# custom logic for specific directories
elif dir_ in {"libs/partners/chroma"}:
elif dir_ == "libs/langchain" and job == "extended-tests":
py_versions = ["3.10", "3.13"]
elif dir_ == "libs/langchain_v1":
py_versions = ["3.10", "3.13"]
elif dir_ in {"libs/cli"}:
py_versions = ["3.10", "3.13"]
elif dir_ == ".":
# unable to install with 3.13 because tokenizers doesn't support 3.13 yet
py_versions = ["3.10", "3.12"]
else:
py_versions = ["3.10", "3.14"]
py_versions = ["3.10", "3.13"]
return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]
def _get_pydantic_test_configs(
dir_: str, *, python_version: str = "3.12"
dir_: str, *, python_version: str = "3.11"
) -> List[Dict[str, str]]:
with open("./libs/core/uv.lock", "rb") as f:
core_uv_lock_data = tomllib.load(f)
@@ -298,9 +306,7 @@ if __name__ == "__main__":
if not filename.startswith(".")
] != ["README.md"]:
dirs_to_run["test"].add(f"libs/partners/{partner_dir}")
# Skip codspeed for partners without benchmarks or in IGNORED_PARTNERS
if partner_dir not in IGNORED_PARTNERS:
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
# Skip if the directory was deleted or is just a tombstone readme
elif file.startswith("libs/"):
# Check if this is a root-level file in libs/ (e.g., libs/README.md)

View File

@@ -98,7 +98,7 @@ def _check_python_version_from_requirement(
return True
else:
marker_str = str(requirement.marker)
if "python_version" in marker_str or "python_full_version" in marker_str:
if "python_version" or "python_full_version" in marker_str:
python_version_str = "".join(
char
for char in marker_str

View File

@@ -77,7 +77,7 @@ jobs:
working-directory: ${{ inputs.working-directory }}
- name: Upload build
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: dist
path: ${{ inputs.working-directory }}/dist/
@@ -149,8 +149,8 @@ jobs:
fi
fi
# if PREV_TAG is empty or came out to 0.0.0, let it be empty
if [ -z "$PREV_TAG" ] || [ "$PREV_TAG" = "$PKG_NAME==0.0.0" ]; then
# if PREV_TAG is empty, let it be empty
if [ -z "$PREV_TAG" ]; then
echo "No previous tag found - first release"
else
# confirm prev-tag actually exists in git repo with git tag
@@ -179,8 +179,8 @@ jobs:
PREV_TAG: ${{ steps.check-tags.outputs.prev-tag }}
run: |
PREAMBLE="Changes since $PREV_TAG"
# if PREV_TAG is empty or 0.0.0, then we are releasing the first version
if [ -z "$PREV_TAG" ] || [ "$PREV_TAG" = "$PKG_NAME==0.0.0" ]; then
# if PREV_TAG is empty, then we are releasing the first version
if [ -z "$PREV_TAG" ]; then
PREAMBLE="Initial release"
PREV_TAG=$(git rev-list --max-parents=0 HEAD)
fi
@@ -208,7 +208,7 @@ jobs:
steps:
- uses: actions/checkout@v5
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v5
with:
name: dist
path: ${{ inputs.working-directory }}/dist/
@@ -258,7 +258,7 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v5
with:
name: dist
path: ${{ inputs.working-directory }}/dist/
@@ -377,7 +377,6 @@ jobs:
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
PPLX_API_KEY: ${{ secrets.PPLX_API_KEY }}
LANGCHAIN_TESTS_USER_AGENT: ${{ secrets.LANGCHAIN_TESTS_USER_AGENT }}
run: make integration_tests
working-directory: ${{ inputs.working-directory }}
@@ -410,7 +409,6 @@ jobs:
AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME }}
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
LANGCHAIN_TESTS_USER_AGENT: ${{ secrets.LANGCHAIN_TESTS_USER_AGENT }}
steps:
- uses: actions/checkout@v5
@@ -430,7 +428,7 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v5
if: startsWith(inputs.working-directory, 'libs/core')
with:
name: dist
@@ -444,7 +442,7 @@ jobs:
git ls-remote --tags origin "langchain-${{ matrix.partner }}*" \
| awk '{print $2}' \
| sed 's|refs/tags/||' \
| grep -E '[0-9]+\.[0-9]+\.[0-9]+$' \
| grep -E '[0-9]+\.[0-9]+\.[0-9]+([a-zA-Z]+[0-9]+)?$' \
| sort -Vr \
| head -n 1
)"
@@ -499,7 +497,7 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v5
with:
name: dist
path: ${{ inputs.working-directory }}/dist/
@@ -539,7 +537,7 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v5
with:
name: dist
path: ${{ inputs.working-directory }}/dist/

View File

@@ -13,7 +13,7 @@ on:
required: false
type: string
description: "Python version to use"
default: "3.12"
default: "3.11"
pydantic-version:
required: true
type: string
@@ -51,9 +51,7 @@ jobs:
- name: "🔄 Install Specific Pydantic Version"
shell: bash
env:
PYDANTIC_VERSION: ${{ inputs.pydantic-version }}
run: VIRTUAL_ENV=.venv uv pip install "pydantic~=$PYDANTIC_VERSION"
run: VIRTUAL_ENV=.venv uv pip install pydantic~=${{ inputs.pydantic-version }}
- name: "🧪 Run Core Tests"
shell: bash

View File

@@ -184,14 +184,15 @@ jobs:
steps:
- uses: actions/checkout@v5
# We have to use 3.12 as 3.13 is not yet supported
- name: "📦 Install UV Package Manager"
uses: astral-sh/setup-uv@v7
uses: astral-sh/setup-uv@v6
with:
python-version: "3.13"
python-version: "3.12"
- uses: actions/setup-python@v6
with:
python-version: "3.13"
python-version: "3.12"
- name: "📦 Install Test Dependencies"
run: uv sync --group test

View File

@@ -23,8 +23,10 @@ permissions:
contents: read
env:
POETRY_VERSION: "1.8.4"
UV_FROZEN: "true"
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
POETRY_LIBS: ("libs/partners/aws")
jobs:
# Generate dynamic test matrix based on input parameters or defaults
@@ -58,6 +60,7 @@ jobs:
echo $matrix
echo "matrix=$matrix" >> $GITHUB_OUTPUT
# Run integration tests against partner libraries with live API credentials
# Tests are run with Poetry or UV depending on the library's setup
build:
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
name: "🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}"
@@ -92,7 +95,17 @@ jobs:
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
mv langchain-aws/libs/aws langchain/libs/partners/aws
- name: "🐍 Set up Python ${{ matrix.python-version }} + Poetry"
if: contains(env.POETRY_LIBS, matrix.working-directory)
uses: "./langchain/.github/actions/poetry_setup"
with:
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: langchain/${{ matrix.working-directory }}
cache-key: scheduled
- name: "🐍 Set up Python ${{ matrix.python-version }} + UV"
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
uses: "./langchain/.github/actions/uv_setup"
with:
python-version: ${{ matrix.python-version }}
@@ -110,7 +123,15 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_REGION }}
- name: "📦 Install Dependencies"
- name: "📦 Install Dependencies (Poetry)"
if: contains(env.POETRY_LIBS, matrix.working-directory)
run: |
echo "Running scheduled tests, installing dependencies with poetry..."
cd langchain/${{ matrix.working-directory }}
poetry install --with=test_integration,test
- name: "📦 Install Dependencies (UV)"
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
run: |
echo "Running scheduled tests, installing dependencies with uv..."
cd langchain/${{ matrix.working-directory }}
@@ -155,7 +176,6 @@ jobs:
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
LANGCHAIN_TESTS_USER_AGENT: ${{ secrets.LANGCHAIN_TESTS_USER_AGENT }}
run: |
cd langchain/${{ matrix.working-directory }}
make integration_tests

View File

@@ -27,10 +27,10 @@
# * release — prepare a new release
#
# Allowed Scopes (optional):
# core, cli, langchain, langchain_v1, langchain-classic, standard-tests,
# core, cli, langchain, langchain_v1, langchain_legacy, standard-tests,
# text-splitters, docs, anthropic, chroma, deepseek, exa, fireworks, groq,
# huggingface, mistralai, nomic, ollama, openai, perplexity, prompty, qdrant,
# xai, infra, deps
# xai, infra
#
# Rules:
# 1. The 'Type' must start with a lowercase letter.
@@ -79,8 +79,8 @@ jobs:
core
cli
langchain
langchain-classic
model-profiles
langchain_v1
langchain_legacy
standard-tests
text-splitters
docs

2
.gitignore vendored
View File

@@ -1,8 +1,6 @@
.vs/
.claude/
.idea/
#Emacs backup
*~
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]

View File

@@ -1,8 +0,0 @@
{
"mcpServers": {
"docs-langchain": {
"type": "http",
"url": "https://docs.langchain.com/mcp"
}
}
}

View File

@@ -149,25 +149,23 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
Args:
to: The email address of the recipient.
msg: The message body to send.
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
priority: Email priority level (`'low'`, ``'normal'``, `'high'`).
Returns:
`True` if email was sent successfully, `False` otherwise.
True if email was sent successfully, False otherwise.
Raises:
`InvalidEmailError`: If the email address format is invalid.
`SMTPConnectionError`: If unable to connect to email server.
InvalidEmailError: If the email address format is invalid.
SMTPConnectionError: If unable to connect to email server.
"""
```
**Documentation Guidelines:**
- Types go in function signatures, NOT in docstrings
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
- Focus on "why" rather than "what" in descriptions
- Document all parameters, return values, and exceptions
- Keep descriptions concise but clear
- Ensure American English spelling (e.g., "behavior", not "behaviour")
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.

View File

@@ -149,25 +149,23 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
Args:
to: The email address of the recipient.
msg: The message body to send.
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
priority: Email priority level (`'low'`, ``'normal'``, `'high'`).
Returns:
`True` if email was sent successfully, `False` otherwise.
True if email was sent successfully, False otherwise.
Raises:
`InvalidEmailError`: If the email address format is invalid.
`SMTPConnectionError`: If unable to connect to email server.
InvalidEmailError: If the email address format is invalid.
SMTPConnectionError: If unable to connect to email server.
"""
```
**Documentation Guidelines:**
- Types go in function signatures, NOT in docstrings
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
- Focus on "why" rather than "what" in descriptions
- Document all parameters, return values, and exceptions
- Keep descriptions concise but clear
- Ensure American English spelling (e.g., "behavior", not "behaviour")
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.

View File

@@ -2,7 +2,6 @@
Please see the following guides for migrating LangChain code:
* Migrate to [LangChain v1.0](https://docs.langchain.com/oss/python/migrate/langchain-v1)
* Migrate to [LangChain v0.3](https://python.langchain.com/docs/versions/v0_3/)
* Migrate to [LangChain v0.2](https://python.langchain.com/docs/versions/v0_2/)
* Migrating from [LangChain 0.0.x Chains](https://python.langchain.com/docs/versions/migrating_chains/)

View File

@@ -1,43 +1,47 @@
<div align="center">
<a href="https://www.langchain.com/">
<picture>
<source media="(prefers-color-scheme: light)" srcset=".github/images/logo-dark.svg">
<source media="(prefers-color-scheme: dark)" srcset=".github/images/logo-light.svg">
<img alt="LangChain Logo" src=".github/images/logo-dark.svg" width="80%">
</picture>
<p align="center">
<picture>
<source media="(prefers-color-scheme: light)" srcset=".github/images/logo-dark.svg">
<source media="(prefers-color-scheme: dark)" srcset=".github/images/logo-light.svg">
<img alt="LangChain Logo" src=".github/images/logo-dark.svg" width="80%">
</picture>
</p>
<p align="center">
The platform for reliable agents.
</p>
<p align="center">
<a href="https://opensource.org/licenses/MIT" target="_blank">
<img src="https://img.shields.io/pypi/l/langchain-core?style=flat-square" alt="PyPI - License">
</a>
</div>
<a href="https://pypistats.org/packages/langchain-core" target="_blank">
<img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads">
</a>
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank">
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square" alt="Open in Dev Containers">
</a>
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank">
<img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20">
</a>
<a href="https://codspeed.io/langchain-ai/langchain" target="_blank">
<img src="https://img.shields.io/endpoint?url=https://codspeed.io/badge.json" alt="CodSpeed Badge">
</a>
<a href="https://twitter.com/langchainai" target="_blank">
<img src="https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI" alt="Twitter / X">
</a>
</p>
<div align="center">
<h3>The platform for reliable agents.</h3>
</div>
<div align="center">
<a href="https://opensource.org/licenses/MIT" target="_blank"><img src="https://img.shields.io/pypi/l/langchain" alt="PyPI - License"></a>
<a href="https://pypistats.org/packages/langchain" target="_blank"><img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads"></a>
<a href="https://pypi.org/project/langchain/#history" target="_blank"><img src="https://img.shields.io/pypi/v/langchain?label=%20" alt="Version"></a>
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank"><img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode" alt="Open in Dev Containers"></a>
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank"><img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20"></a>
<a href="https://codspeed.io/langchain-ai/langchain" target="_blank"><img src="https://img.shields.io/endpoint?url=https://codspeed.io/badge.json" alt="CodSpeed Badge"></a>
<a href="https://twitter.com/langchainai" target="_blank"><img src="https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI" alt="Twitter / X"></a>
</div>
LangChain is a framework for building agents and LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development all while future-proofing decisions as the underlying technology evolves.
LangChain is a framework for building LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves.
```bash
pip install langchain
pip install -U langchain
```
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our framework for building controllable agent workflows.
---
**Documentation**:
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/).
- [docs.langchain.com](https://docs.langchain.com/oss/python/langchain/overview) Comprehensive documentation, including conceptual overviews and guides
- [reference.langchain.com/python](https://reference.langchain.com/python) API reference docs for LangChain packages
**Discussions**: Visit the [LangChain Forum](https://forum.langchain.com) to connect with the community and share all of your technical questions, ideas, and feedback.
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building controllable agent workflows.
> [!NOTE]
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
@@ -48,27 +52,26 @@ LangChain helps developers build applications powered by LLMs through a standard
Use LangChain for:
- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and external/internal systems, drawing from LangChain's vast library of integrations with model providers, tools, vector stores, retrievers, and more.
- **Model interoperability**. Swap models in and out as your engineering team experiments to find the best choice for your application's needs. As the industry frontier evolves, adapt quickly LangChain's abstractions keep you moving without losing momentum.
- **Rapid prototyping**. Quickly build and iterate on LLM applications with LangChain's modular, component-based architecture. Test different approaches and workflows without rebuilding from scratch, accelerating your development cycle.
- **Production-ready features**. Deploy reliable applications with built-in support for monitoring, evaluation, and debugging through integrations like LangSmith. Scale with confidence using battle-tested patterns and best practices.
- **Vibrant community and ecosystem**. Leverage a rich ecosystem of integrations, templates, and community-contributed components. Benefit from continuous improvements and stay up-to-date with the latest AI developments through an active open-source community.
- **Flexible abstraction layers**. Work at the level of abstraction that suits your needs - from high-level chains for quick starts to low-level components for fine-grained control. LangChain grows with your application's complexity.
- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and external/internal systems, drawing from LangChains vast library of integrations with model providers, tools, vector stores, retrievers, and more.
- **Model interoperability**. Swap models in and out as your engineering team experiments to find the best choice for your applications needs. As the industry frontier evolves, adapt quickly LangChains abstractions keep you moving without losing momentum.
## LangChain ecosystem
## LangChains ecosystem
While the LangChain framework can be used standalone, it also integrates seamlessly with any LangChain product, giving developers a full suite of tools when building LLM applications.
To improve your LLM application development, pair LangChain with:
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
- [Integrations](https://docs.langchain.com/oss/python/integrations/providers/overview) List of LangChain integrations, including chat & embedding models, tools & toolkits, and more
- [LangSmith](https://www.langchain.com/langsmith) Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
- [LangSmith Deployment](https://docs.langchain.com/langsmith/deployments) Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams and iterate quickly with visual prototyping in [LangSmith Studio](https://docs.langchain.com/langsmith/studio).
- [Deep Agents](https://github.com/langchain-ai/deepagents) *(new!)* Build agents that can plan, use subagents, and leverage file systems for complex tasks
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
## Additional resources
- [API Reference](https://reference.langchain.com/python) Detailed reference on navigating base packages and integrations for LangChain.
- [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview) Learn how to contribute to LangChain projects and find good first issues.
- [Code of Conduct](https://github.com/langchain-ai/langchain/blob/master/.github/CODE_OF_CONDUCT.md) Our community guidelines and standards for participation.
- [Conceptual Guides](https://docs.langchain.com/oss/python/langchain/overview): Explanations of key
concepts behind the LangChain framework.
- [Tutorials](https://docs.langchain.com/oss/python/learn): Simple walkthroughs with
guided examples on getting started with LangChain.
- [API Reference](https://reference.langchain.com/python/): Detailed reference on
navigating base packages and integrations for LangChain.
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.

View File

@@ -55,10 +55,10 @@ All out of scope targets defined by huntr as well as:
* **langchain-experimental**: This repository is for experimental code and is not
eligible for bug bounties (see [package warning](https://pypi.org/project/langchain-experimental/)), bug reports to it will be marked as interesting or waste of
time and published with no bounty attached.
* **tools**: Tools in either `langchain` or `langchain-community` are not eligible for bug
* **tools**: Tools in either langchain or langchain-community are not eligible for bug
bounties. This includes the following directories
* `libs/langchain/langchain/tools`
* `libs/community/langchain_community/tools`
* libs/langchain/langchain/tools
* libs/community/langchain_community/tools
* Please review the [Best Practices](#best-practices)
for more details, but generally tools interact with the real world. Developers are
expected to understand the security implications of their code and are responsible

View File

@@ -1,30 +1,6 @@
# langchain-cli
[![PyPI - Version](https://img.shields.io/pypi/v/langchain-cli?label=%20)](https://pypi.org/project/langchain-cli/#history)
[![PyPI - License](https://img.shields.io/pypi/l/langchain-cli)](https://opensource.org/licenses/MIT)
[![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain-cli)](https://pypistats.org/packages/langchain-cli)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
## Quick Install
```bash
pip install langchain-cli
```
## 🤔 What is this?
This package implements the official CLI for LangChain. Right now, it is most useful for getting started with LangChain Templates!
## 📖 Documentation
This package implements the official CLI for LangChain. Right now, it is most useful
for getting started with LangChain Templates!
[CLI Docs](https://github.com/langchain-ai/langchain/blob/master/libs/cli/DOCS.md)
## 📕 Releases & Versioning
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
## 💁 Contributing
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).

View File

@@ -19,8 +19,8 @@ And you should configure credentials by setting the following environment variab
```python
from __module_name__ import Chat__ModuleName__
model = Chat__ModuleName__()
model.invoke("Sing a ballad of LangChain.")
llm = Chat__ModuleName__()
llm.invoke("Sing a ballad of LangChain.")
```
## Embeddings
@@ -41,6 +41,6 @@ embeddings.embed_query("What is the meaning of life?")
```python
from __module_name__ import __ModuleName__LLM
model = __ModuleName__LLM()
model.invoke("The meaning of life is")
llm = __ModuleName__LLM()
llm.invoke("The meaning of life is")
```

View File

@@ -1,264 +1,262 @@
{
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"metadata": {},
"source": [
"# Chat__ModuleName__\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": [
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import Chat__ModuleName__\n",
"\n",
"model = Chat__ModuleName__(\n",
" model=\"model-name\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = model.invoke(messages)\n",
"ai_msg"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"metadata": {},
"outputs": [],
"source": [
"print(ai_msg.content)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"chain = prompt | model\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this model provider\n",
"\n",
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
"nbformat": 4,
"nbformat_minor": 5
{
"cell_type": "markdown",
"id": "e49f1e0d",
"metadata": {},
"source": [
"# Chat__ModuleName__\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import Chat__ModuleName__\n",
"\n",
"llm = Chat__ModuleName__(\n",
" model=\"model-name\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"metadata": {},
"outputs": [],
"source": [
"print(ai_msg.content)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this model provider\n",
"\n",
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,238 +1,236 @@
{
"cells": [
{
"cell_type": "raw",
"id": "67db2992",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "9597802c",
"metadata": {},
"source": [
"# __ModuleName__LLM\n",
"\n",
"- [ ] TODO: Make sure API reference link is correct\n",
"\n",
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__&label=%20) |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bc51e756",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "4b6e1ca6",
"metadata": {},
"source": [
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "196c2b41",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
]
},
{
"cell_type": "markdown",
"id": "809c6577",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59c710c4",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "0a760037",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a0562a13",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__LLM\n",
"\n",
"model = __ModuleName__LLM(\n",
" model=\"model-name\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0ee90032",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"- [ ] TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "035dea0f",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"input_text = \"__ModuleName__ is an AI company that \"\n",
"\n",
"completion = model.invoke(input_text)\n",
"completion"
]
},
{
"cell_type": "markdown",
"id": "add38532",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "078e9db2",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
"\n",
"chain = prompt | model\n",
"chain.invoke(\n",
" {\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "e99eef30",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this model provider\n",
"\n",
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
]
},
{
"cell_type": "markdown",
"id": "e9bdfcef",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.11.1 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"vscode": {
"interpreter": {
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
}
}
"cells": [
{
"cell_type": "raw",
"id": "67db2992",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
"nbformat": 4,
"nbformat_minor": 5
{
"cell_type": "markdown",
"id": "9597802c",
"metadata": {},
"source": [
"# __ModuleName__LLM\n",
"\n",
"- [ ] TODO: Make sure API reference link is correct\n",
"\n",
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__?style=flat-square&label=%20) |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bc51e756",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "4b6e1ca6",
"metadata": {},
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
},
{
"cell_type": "code",
"execution_count": null,
"id": "196c2b41",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
]
},
{
"cell_type": "markdown",
"id": "809c6577",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59c710c4",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "0a760037",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a0562a13",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__LLM\n",
"\n",
"llm = __ModuleName__LLM(\n",
" model=\"model-name\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0ee90032",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"- [ ] TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "035dea0f",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"input_text = \"__ModuleName__ is an AI company that \"\n",
"\n",
"completion = llm.invoke(input_text)\n",
"completion"
]
},
{
"cell_type": "markdown",
"id": "add38532",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "078e9db2",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "e99eef30",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this model provider\n",
"\n",
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
]
},
{
"cell_type": "markdown",
"id": "e9bdfcef",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.11.1 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"vscode": {
"interpreter": {
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -155,7 +155,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
@@ -185,7 +185,7 @@
"chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | model\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]

View File

@@ -1,204 +1,204 @@
{
"cells": [
{
"cell_type": "raw",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_label: __ModuleName__ByteStore\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# __ModuleName__ByteStore\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
"\n",
"## Overview\n",
"\n",
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
"\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__&label=%20) |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our byte store:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__ByteStore\n",
"\n",
"kv_store = __ModuleName__ByteStore(\n",
" # params...\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Usage\n",
"\n",
"- TODO: Run cells so output can be seen.\n",
"\n",
"You can set data under keys like this using the `mset` method:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"kv_store.mset(\n",
" [\n",
" [\"key1\", b\"value1\"],\n",
" [\"key2\", b\"value2\"],\n",
" ]\n",
")\n",
"\n",
"kv_store.mget(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And you can delete data using the `mdelete` method:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"kv_store.mdelete(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")\n",
"\n",
"kv_store.mget(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this key-value store provider\n",
"\n",
"E.g. extra initialization. Delete if not relevant."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python",
"version": "3.10.5"
"cells": [
{
"cell_type": "raw",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_label: __ModuleName__ByteStore\n",
"---"
]
},
"nbformat": 4,
"nbformat_minor": 2
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# __ModuleName__ByteStore\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
"\n",
"## Overview\n",
"\n",
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
"\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__?style=flat-square&label=%20) |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our byte store:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__ByteStore\n",
"\n",
"kv_store = __ModuleName__ByteStore(\n",
" # params...\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Usage\n",
"\n",
"- TODO: Run cells so output can be seen.\n",
"\n",
"You can set data under keys like this using the `mset` method:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"kv_store.mset(\n",
" [\n",
" [\"key1\", b\"value1\"],\n",
" [\"key2\", b\"value2\"],\n",
" ]\n",
")\n",
"\n",
"kv_store.mget(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And you can delete data using the `mdelete` method:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"kv_store.mdelete(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")\n",
"\n",
"kv_store.mget(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this key-value store provider\n",
"\n",
"E.g. extra initialization. Delete if not relevant."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,271 +1,271 @@
{
"cells": [
{
"cell_type": "raw",
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "a6f91f20",
"metadata": {},
"source": [
"# __ModuleName__\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
"\n",
"## Overview\n",
"\n",
"### Integration details\n",
"\n",
"- TODO: Make sure links and features are correct\n",
"\n",
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: |\n",
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-community&label=%20) |\n",
"\n",
"### Tool features\n",
"\n",
"- TODO: Add feature table if it makes sense\n",
"\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Add any additional deps\n",
"\n",
"The integration lives in the `langchain-community` package."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f85b4089",
"metadata": {},
"outputs": [],
"source": [
"%pip install --quiet -U langchain-community"
]
},
{
"cell_type": "markdown",
"id": "b15e9266",
"metadata": {},
"source": [
"### Credentials\n",
"\n",
"- TODO: Add any credentials that are needed"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
]
},
{
"cell_type": "markdown",
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
"metadata": {},
"source": [
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"- TODO: Fill in instantiation params\n",
"\n",
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.tools import __ModuleName__\n",
"\n",
"\n",
"tool = __ModuleName__(...)"
]
},
{
"cell_type": "markdown",
"id": "74147a1a",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
"\n",
"- TODO: Describe what the tool args are, fill them in, run cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
"metadata": {},
"outputs": [],
"source": [
"tool.invoke({...})"
]
},
{
"cell_type": "markdown",
"id": "d6e73897",
"metadata": {},
"source": [
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
"\n",
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
"\n",
"- TODO: Fill in tool args and run cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f90e33a7",
"metadata": {},
"outputs": [],
"source": [
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
"model_generated_tool_call = {\n",
" \"args\": {...}, # TODO: FILL IN\n",
" \"id\": \"1\",\n",
" \"name\": tool.name,\n",
" \"type\": \"tool_call\",\n",
"}\n",
"tool.invoke(model_generated_tool_call)"
]
},
{
"cell_type": "markdown",
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
"metadata": {},
"source": [
"## Use within an agent\n",
"\n",
"- TODO: Add user question and run cells\n",
"\n",
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
"\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"# !pip install -qU langchain langchain-openai\n",
"from langchain.chat_models import init_chat_model\n",
"\n",
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bea35fa1",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"tools = [tool]\n",
"agent = create_react_agent(model, tools)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
"metadata": {},
"outputs": [],
"source": [
"example_query = \"...\"\n",
"\n",
"events = agent.stream(\n",
" {\"messages\": [(\"user\", example_query)]},\n",
" stream_mode=\"values\",\n",
")\n",
"for event in events:\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "4ac8146c",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"language": "python",
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
"cells": [
{
"cell_type": "raw",
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
"nbformat": 4,
"nbformat_minor": 5
{
"cell_type": "markdown",
"id": "a6f91f20",
"metadata": {},
"source": [
"# __ModuleName__\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
"\n",
"## Overview\n",
"\n",
"### Integration details\n",
"\n",
"- TODO: Make sure links and features are correct\n",
"\n",
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: |\n",
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-community?style=flat-square&label=%20) |\n",
"\n",
"### Tool features\n",
"\n",
"- TODO: Add feature table if it makes sense\n",
"\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Add any additional deps\n",
"\n",
"The integration lives in the `langchain-community` package."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f85b4089",
"metadata": {},
"outputs": [],
"source": [
"%pip install --quiet -U langchain-community"
]
},
{
"cell_type": "markdown",
"id": "b15e9266",
"metadata": {},
"source": [
"### Credentials\n",
"\n",
"- TODO: Add any credentials that are needed"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
]
},
{
"cell_type": "markdown",
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
"metadata": {},
"source": [
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"- TODO: Fill in instantiation params\n",
"\n",
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.tools import __ModuleName__\n",
"\n",
"\n",
"tool = __ModuleName__(...)"
]
},
{
"cell_type": "markdown",
"id": "74147a1a",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
"\n",
"- TODO: Describe what the tool args are, fill them in, run cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
"metadata": {},
"outputs": [],
"source": [
"tool.invoke({...})"
]
},
{
"cell_type": "markdown",
"id": "d6e73897",
"metadata": {},
"source": [
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
"\n",
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
"\n",
"- TODO: Fill in tool args and run cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f90e33a7",
"metadata": {},
"outputs": [],
"source": [
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
"model_generated_tool_call = {\n",
" \"args\": {...}, # TODO: FILL IN\n",
" \"id\": \"1\",\n",
" \"name\": tool.name,\n",
" \"type\": \"tool_call\",\n",
"}\n",
"tool.invoke(model_generated_tool_call)"
]
},
{
"cell_type": "markdown",
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
"metadata": {},
"source": [
"## Use within an agent\n",
"\n",
"- TODO: Add user question and run cells\n",
"\n",
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
"\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"# !pip install -qU langchain langchain-openai\n",
"from langchain.chat_models import init_chat_model\n",
"\n",
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bea35fa1",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"tools = [tool]\n",
"agent = create_react_agent(llm, tools)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
"metadata": {},
"outputs": [],
"source": [
"example_query = \"...\"\n",
"\n",
"events = agent.stream(\n",
" {\"messages\": [(\"user\", example_query)]},\n",
" stream_mode=\"values\",\n",
")\n",
"for event in events:\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "4ac8146c",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"language": "python",
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -295,7 +295,7 @@
"source": [
"## TODO: Any functionality specific to this vector store\n",
"\n",
"E.g. creating a persistent database to save to your disk, etc."
"E.g. creating a persisten database to save to your disk, etc."
]
},
{

View File

@@ -26,30 +26,30 @@ class Chat__ModuleName__(BaseChatModel):
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
Install ``__package_name__`` and set environment variable
``__MODULE_NAME___API_KEY``.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
# TODO: Populate with relevant params.
Key init args — completion params:
model:
model: str
Name of __ModuleName__ model to use.
temperature:
temperature: float
Sampling temperature.
max_tokens:
max_tokens: int | None
Max number of tokens to generate.
# TODO: Populate with relevant params.
Key init args — client params:
timeout:
timeout: float | None
Timeout for requests.
max_retries:
max_retries: int
Max number of retries.
api_key:
api_key: str | None
__ModuleName__ API key. If not passed in will be read from env var
__MODULE_NAME___API_KEY.
@@ -57,214 +57,216 @@ class Chat__ModuleName__(BaseChatModel):
# TODO: Replace with relevant init params.
Instantiate:
```python
from __module_name__ import Chat__ModuleName__
.. code-block:: python
model = Chat__ModuleName__(
model="...",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
# api_key="...",
# other params...
)
```
from __module_name__ import Chat__ModuleName__
llm = Chat__ModuleName__(
model="...",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
# api_key="...",
# other params...
)
Invoke:
```python
messages = [
("system", "You are a helpful translator. Translate the user sentence to French."),
("human", "I love programming."),
]
model.invoke(messages)
```
.. code-block:: python
```python
# TODO: Example output.
```
messages = [
("system", "You are a helpful translator. Translate the user sentence to French."),
("human", "I love programming."),
]
llm.invoke(messages)
.. code-block:: python
# TODO: Example output.
# TODO: Delete if token-level streaming isn't supported.
Stream:
```python
for chunk in model.stream(messages):
print(chunk.text, end="")
```
.. code-block:: python
```python
# TODO: Example output.
```
for chunk in llm.stream(messages):
print(chunk.text, end="")
```python
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
```
.. code-block:: python
```python
# TODO: Example output.
```
# TODO: Example output.
.. code-block:: python
stream = llm.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block:: python
# TODO: Example output.
# TODO: Delete if native async isn't supported.
Async:
```python
await model.ainvoke(messages)
.. code-block:: python
# stream:
# async for chunk in (await model.astream(messages))
await llm.ainvoke(messages)
# batch:
# await model.abatch([messages])
```
# stream:
# async for chunk in (await llm.astream(messages))
# batch:
# await llm.abatch([messages])
.. code-block:: python
# TODO: Example output.
```python
# TODO: Example output.
```
# TODO: Delete if .bind_tools() isn't supported.
Tool calling:
```python
from pydantic import BaseModel, Field
.. code-block:: python
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
from pydantic import BaseModel, Field
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
ai_msg.tool_calls
```
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
```python
# TODO: Example output.
```
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
ai_msg.tool_calls
See `Chat__ModuleName__.bind_tools()` method for more.
.. code-block:: python
# TODO: Example output.
See ``Chat__ModuleName__.bind_tools()`` method for more.
# TODO: Delete if .with_structured_output() isn't supported.
Structured output:
```python
from typing import Optional
.. code-block:: python
from pydantic import BaseModel, Field
from typing import Optional
class Joke(BaseModel):
'''Joke to tell user.'''
from pydantic import BaseModel, Field
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
class Joke(BaseModel):
'''Joke to tell user.'''
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
```
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
```python
# TODO: Example output.
```
structured_llm = llm.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats")
See `Chat__ModuleName__.with_structured_output()` for more.
.. code-block:: python
# TODO: Example output.
See ``Chat__ModuleName__.with_structured_output()`` for more.
# TODO: Delete if JSON mode response format isn't supported.
JSON mode:
```python
# TODO: Replace with appropriate bind arg.
json_model = model.bind(response_format={"type": "json_object"})
ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
ai_msg.content
```
.. code-block:: python
```python
# TODO: Example output.
```
# TODO: Replace with appropriate bind arg.
json_llm = llm.bind(response_format={"type": "json_object"})
ai_msg = json_llm.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
ai_msg.content
.. code-block:: python
# TODO: Example output.
# TODO: Delete if image inputs aren't supported.
Image input:
```python
import base64
import httpx
from langchain_core.messages import HumanMessage
.. code-block:: python
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
# TODO: Replace with appropriate message content format.
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
],
)
ai_msg = model.invoke([message])
ai_msg.content
```
import base64
import httpx
from langchain_core.messages import HumanMessage
```python
# TODO: Example output.
```
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
# TODO: Replace with appropriate message content format.
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
],
)
ai_msg = llm.invoke([message])
ai_msg.content
.. code-block:: python
# TODO: Example output.
# TODO: Delete if audio inputs aren't supported.
Audio input:
```python
# TODO: Example input
```
.. code-block:: python
```python
# TODO: Example output
```
# TODO: Example input
.. code-block:: python
# TODO: Example output
# TODO: Delete if video inputs aren't supported.
Video input:
```python
# TODO: Example input
```
.. code-block:: python
```python
# TODO: Example output
```
# TODO: Example input
.. code-block:: python
# TODO: Example output
# TODO: Delete if token usage metadata isn't supported.
Token usage:
```python
ai_msg = model.invoke(messages)
ai_msg.usage_metadata
```
.. code-block:: python
```python
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
```
ai_msg = llm.invoke(messages)
ai_msg.usage_metadata
.. code-block:: python
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
# TODO: Delete if logprobs aren't supported.
Logprobs:
```python
# TODO: Replace with appropriate bind arg.
logprobs_model = model.bind(logprobs=True)
ai_msg = logprobs_model.invoke(messages)
ai_msg.response_metadata["logprobs"]
```
.. code-block:: python
# TODO: Replace with appropriate bind arg.
logprobs_llm = llm.bind(logprobs=True)
ai_msg = logprobs_llm.invoke(messages)
ai_msg.response_metadata["logprobs"]
.. code-block:: python
# TODO: Example output.
```python
# TODO: Example output.
```
Response metadata
```python
ai_msg = model.invoke(messages)
ai_msg.response_metadata
```
.. code-block:: python
```python
# TODO: Example output.
ai_msg = llm.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
# TODO: Example output.
```
""" # noqa: E501
model_name: str = Field(alias="model")
@@ -312,11 +314,11 @@ class Chat__ModuleName__(BaseChatModel):
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
# Replace this with actual logic to generate a response from a list
@@ -360,11 +362,11 @@ class Chat__ModuleName__(BaseChatModel):
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
last_message = messages[-1]

View File

@@ -14,55 +14,55 @@ class __ModuleName__Loader(BaseLoader):
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
Install ``__package_name__`` and set environment variable
``__MODULE_NAME___API_KEY``.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
# TODO: Replace with relevant init params.
Instantiate:
```python
from langchain_community.document_loaders import __ModuleName__Loader
.. code-block:: python
loader = __ModuleName__Loader(
# required params = ...
# other params = ...
)
```
from langchain_community.document_loaders import __ModuleName__Loader
loader = __ModuleName__Loader(
# required params = ...
# other params = ...
)
Lazy load:
```python
docs = []
docs_lazy = loader.lazy_load()
.. code-block:: python
# async variant:
# docs_lazy = await loader.alazy_load()
docs = []
docs_lazy = loader.lazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
```
# async variant:
# docs_lazy = await loader.alazy_load()
```python
TODO: Example output
```
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
TODO: Example output
# TODO: Delete if async load is not implemented
Async load:
```python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
```
.. code-block:: python
```python
TODO: Example output
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
TODO: Example output
```
"""
# TODO: This method must be implemented to load documents.

View File

@@ -8,13 +8,13 @@ class __ModuleName__Embeddings(Embeddings):
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
Install ``__package_name__`` and set environment variable
``__MODULE_NAME___API_KEY``.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
# TODO: Populate with relevant params.
Key init args — completion params:
@@ -25,50 +25,50 @@ class __ModuleName__Embeddings(Embeddings):
# TODO: Replace with relevant init params.
Instantiate:
```python
from __module_name__ import __ModuleName__Embeddings
.. code-block:: python
embed = __ModuleName__Embeddings(
model="...",
# api_key="...",
# other params...
)
```
from __module_name__ import __ModuleName__Embeddings
embed = __ModuleName__Embeddings(
model="...",
# api_key="...",
# other params...
)
Embed single text:
```python
input_text = "The meaning of life is 42"
embed.embed_query(input_text)
```
.. code-block:: python
```python
# TODO: Example output.
```
input_text = "The meaning of life is 42"
embed.embed_query(input_text)
.. code-block:: python
# TODO: Example output.
# TODO: Delete if token-level streaming isn't supported.
Embed multiple text:
```python
input_texts = ["Document 1...", "Document 2..."]
embed.embed_documents(input_texts)
```
.. code-block:: python
```python
# TODO: Example output.
```
input_texts = ["Document 1...", "Document 2..."]
embed.embed_documents(input_texts)
.. code-block:: python
# TODO: Example output.
# TODO: Delete if native async isn't supported.
Async:
```python
await embed.aembed_query(input_text)
.. code-block:: python
# multiple:
# await embed.aembed_documents(input_texts)
```
await embed.aembed_query(input_text)
```python
# TODO: Example output.
# multiple:
# await embed.aembed_documents(input_texts)
.. code-block:: python
# TODO: Example output.
```
"""
def __init__(self, model: str):

View File

@@ -14,13 +14,13 @@ class __ModuleName__Retriever(BaseRetriever):
# TODO: Replace with relevant packages, env vars, etc.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
Install ``__package_name__`` and set environment variable
``__MODULE_NAME___API_KEY``.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
# TODO: Populate with relevant params.
Key init args:
@@ -31,58 +31,58 @@ class __ModuleName__Retriever(BaseRetriever):
# TODO: Replace with relevant init params.
Instantiate:
```python
from __package_name__ import __ModuleName__Retriever
.. code-block:: python
retriever = __ModuleName__Retriever(
# ...
)
```
from __package_name__ import __ModuleName__Retriever
retriever = __ModuleName__Retriever(
# ...
)
Usage:
```python
query = "..."
.. code-block:: python
retriever.invoke(query)
```
query = "..."
```txt
# TODO: Example output.
```
retriever.invoke(query)
.. code-block::
# TODO: Example output.
Use within a chain:
```python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
.. code-block:: python
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
Context: {context}
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Question: {question}\"\"\"
)
Context: {context}
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
Question: {question}\"\"\"
)
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain.invoke("...")
```
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
```
# TODO: Example output.
```
chain.invoke("...")
.. code-block::
# TODO: Example output.
"""

View File

@@ -12,13 +12,13 @@ class __ModuleName__Toolkit(BaseToolkit):
# TODO: Replace with relevant packages, env vars, etc.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
Install ``__package_name__`` and set environment variable
``__MODULE_NAME___API_KEY``.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
# TODO: Populate with relevant params.
Key init args:
@@ -29,42 +29,42 @@ class __ModuleName__Toolkit(BaseToolkit):
# TODO: Replace with relevant init params.
Instantiate:
```python
from __package_name__ import __ModuleName__Toolkit
.. code-block:: python
toolkit = __ModuleName__Toolkit(
# ...
)
```
from __package_name__ import __ModuleName__Toolkit
toolkit = __ModuleName__Toolkit(
# ...
)
Tools:
```python
toolkit.get_tools()
```
.. code-block:: python
```txt
# TODO: Example output.
```
toolkit.get_tools()
.. code-block::
# TODO: Example output.
Use within an agent:
```python
from langgraph.prebuilt import create_react_agent
.. code-block:: python
agent_executor = create_react_agent(llm, tools)
from langgraph.prebuilt import create_react_agent
example_query = "..."
agent_executor = create_react_agent(llm, tools)
events = agent_executor.stream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
for event in events:
event["messages"][-1].pretty_print()
```
example_query = "..."
```txt
# TODO: Example output.
```
events = agent_executor.stream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
for event in events:
event["messages"][-1].pretty_print()
.. code-block::
# TODO: Example output.
"""

View File

@@ -27,42 +27,42 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override]
Setup:
# TODO: Replace with relevant packages, env vars.
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
Install ``__package_name__`` and set environment variable
``__MODULE_NAME___API_KEY``.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
Instantiation:
```python
tool = __ModuleName__Tool(
# TODO: init params
)
```
.. code-block:: python
tool = __ModuleName__Tool(
# TODO: init params
)
Invocation with args:
```python
# TODO: invoke args
tool.invoke({...})
```
.. code-block:: python
```python
# TODO: output of invocation
```
# TODO: invoke args
tool.invoke({...})
.. code-block:: python
# TODO: output of invocation
Invocation with ToolCall:
```python
# TODO: invoke args
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
```
.. code-block:: python
```python
# TODO: output of invocation
# TODO: invoke args
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
# TODO: output of invocation
```
""" # noqa: E501
# TODO: Set tool name and description

View File

@@ -28,133 +28,133 @@ class __ModuleName__VectorStore(VectorStore):
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable `__MODULE_NAME___API_KEY`.
Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
# TODO: Populate with relevant params.
Key init args — indexing params:
collection_name:
collection_name: str
Name of the collection.
embedding_function:
embedding_function: Embeddings
Embedding function to use.
# TODO: Populate with relevant params.
Key init args — client params:
client:
client: Client | None
Client to use.
connection_args:
connection_args: dict | None
Connection arguments.
# TODO: Replace with relevant init params.
Instantiate:
```python
from __module_name__.vectorstores import __ModuleName__VectorStore
from langchain_openai import OpenAIEmbeddings
.. code-block:: python
vector_store = __ModuleName__VectorStore(
collection_name="foo",
embedding_function=OpenAIEmbeddings(),
connection_args={"uri": "./foo.db"},
# other params...
)
```
from __module_name__.vectorstores import __ModuleName__VectorStore
from langchain_openai import OpenAIEmbeddings
vector_store = __ModuleName__VectorStore(
collection_name="foo",
embedding_function=OpenAIEmbeddings(),
connection_args={"uri": "./foo.db"},
# other params...
)
# TODO: Populate with relevant variables.
Add Documents:
```python
from langchain_core.documents import Document
.. code-block:: python
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
document_3 = Document(page_content="i will be deleted :(")
from langchain_core.documents import Document
documents = [document_1, document_2, document_3]
ids = ["1", "2", "3"]
vector_store.add_documents(documents=documents, ids=ids)
```
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
document_3 = Document(page_content="i will be deleted :(")
documents = [document_1, document_2, document_3]
ids = ["1", "2", "3"]
vector_store.add_documents(documents=documents, ids=ids)
# TODO: Populate with relevant variables.
Delete Documents:
```python
vector_store.delete(ids=["3"])
```
.. code-block:: python
vector_store.delete(ids=["3"])
# TODO: Fill out with relevant variables and example output.
Search:
```python
results = vector_store.similarity_search(query="thud",k=1)
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
```
.. code-block:: python
```python
# TODO: Example output
```
results = vector_store.similarity_search(query="thud",k=1)
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
# TODO: Example output
# TODO: Fill out with relevant variables and example output.
Search with filter:
```python
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
```
.. code-block:: python
```python
# TODO: Example output
```
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
# TODO: Example output
# TODO: Fill out with relevant variables and example output.
Search with score:
```python
results = vector_store.similarity_search_with_score(query="qux",k=1)
for doc, score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
```
.. code-block:: python
```python
# TODO: Example output
```
results = vector_store.similarity_search_with_score(query="qux",k=1)
for doc, score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
# TODO: Example output
# TODO: Fill out with relevant variables and example output.
Async:
```python
# add documents
# await vector_store.aadd_documents(documents=documents, ids=ids)
.. code-block:: python
# delete documents
# await vector_store.adelete(ids=["3"])
# add documents
# await vector_store.aadd_documents(documents=documents, ids=ids)
# search
# results = vector_store.asimilarity_search(query="thud",k=1)
# delete documents
# await vector_store.adelete(ids=["3"])
# search with score
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
for doc,score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
```
# search
# results = vector_store.asimilarity_search(query="thud",k=1)
```python
# TODO: Example output
```
# search with score
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
for doc,score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
# TODO: Example output
# TODO: Fill out with relevant variables and example output.
Use as Retriever:
```python
retriever = vector_store.as_retriever(
search_type="mmr",
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
)
retriever.invoke("thud")
```
.. code-block:: python
```python
# TODO: Example output
retriever = vector_store.as_retriever(
search_type="mmr",
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
)
retriever.invoke("thud")
.. code-block:: python
# TODO: Example output
```
""" # noqa: E501
def __init__(self, embedding: Embeddings) -> None:

View File

@@ -24,7 +24,7 @@ def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
This code works
Args:
pkg_name: The name of the partner package.
pkg_name (str): The name of the partner package.
Returns:
List of 2-tuples containing old and new import paths.

View File

@@ -65,7 +65,7 @@ def is_subclass(class_obj: type, classes_: list[type]) -> bool:
classes_: A list of classes to check against.
Returns:
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
True if `class_obj` is a subclass of any class in `classes_`, False otherwise.
"""
return any(
issubclass(class_obj, kls)

View File

@@ -182,7 +182,7 @@ def parse_dependencies(
inner_branches = _list_arg_to_length(branch, num_deps)
return list(
map( # type: ignore[call-overload, unused-ignore]
map( # type: ignore[call-overload]
parse_dependency_string,
inner_deps,
inner_repos,

View File

@@ -20,13 +20,12 @@ description = "CLI for interacting with LangChain"
readme = "README.md"
[project.urls]
Homepage = "https://docs.langchain.com/"
Documentation = "https://docs.langchain.com/"
Source = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
Changelog = "https://github.com/langchain-ai/langchain/releases?q=%22langchain-cli%3D%3D1%22"
Twitter = "https://x.com/LangChainAI"
Slack = "https://www.langchain.com/join-community"
Reddit = "https://www.reddit.com/r/LangChain/"
homepage = "https://docs.langchain.com/"
repository = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
changelog = "https://github.com/langchain-ai/langchain/releases?q=%22langchain-cli%3D%3D1%22"
twitter = "https://x.com/LangChainAI"
slack = "https://www.langchain.com/join-community"
reddit = "https://www.reddit.com/r/LangChain/"
[project.scripts]
langchain = "langchain_cli.cli:app"
@@ -43,14 +42,14 @@ lint = [
]
test = [
"langchain-core",
"langchain-classic"
"langchain"
]
typing = ["langchain-classic"]
typing = ["langchain"]
test_integration = []
[tool.uv.sources]
langchain-core = { path = "../core", editable = true }
langchain-classic = { path = "../langchain", editable = true }
langchain = { path = "../langchain", editable = true }
[tool.ruff.format]
docstring-code-format = true

View File

@@ -1,5 +1,5 @@
import pytest
from langchain_classic._api import suppress_langchain_deprecation_warning as sup2
from langchain._api import suppress_langchain_deprecation_warning as sup2
from langchain_core._api import suppress_langchain_deprecation_warning as sup1
from langchain_cli.namespaces.migrate.generate.generic import (

466
libs/cli/uv.lock generated
View File

@@ -327,21 +327,7 @@ wheels = [
[[package]]
name = "langchain"
version = "1.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "langchain-core" },
{ name = "langgraph" },
{ name = "pydantic" },
]
sdist = { url = "https://files.pythonhosted.org/packages/7d/b8/36078257ba52351608129ee983079a4d77ee69eb1470ee248cd8f5728a31/langchain-1.0.0.tar.gz", hash = "sha256:56bf90d935ac1dda864519372d195ca58757b755dd4c44b87840b67d069085b7", size = 466932, upload-time = "2025-10-17T20:53:20.319Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c4/4d/2758a16ad01716c0fb3fe9ec205fd530eae4528b35a27ff44837c399e032/langchain-1.0.0-py3-none-any.whl", hash = "sha256:8c95e41250fc86d09a978fbdf999f86c18d50a28a2addc5da88546af00a1ad15", size = 106202, upload-time = "2025-10-17T20:53:18.685Z" },
]
[[package]]
name = "langchain-classic"
version = "1.0.0"
version = "0.3.27"
source = { editable = "../langchain" }
dependencies = [
{ name = "async-timeout", marker = "python_full_version < '3.11'" },
@@ -358,28 +344,20 @@ dependencies = [
requires-dist = [
{ name = "async-timeout", marker = "python_full_version < '3.11'", specifier = ">=4.0.0,<5.0.0" },
{ name = "langchain-anthropic", marker = "extra == 'anthropic'" },
{ name = "langchain-aws", marker = "extra == 'aws'" },
{ name = "langchain-community", marker = "extra == 'community'" },
{ name = "langchain-core", editable = "../core" },
{ name = "langchain-deepseek", marker = "extra == 'deepseek'" },
{ name = "langchain-fireworks", marker = "extra == 'fireworks'" },
{ name = "langchain-google-genai", marker = "extra == 'google-genai'" },
{ name = "langchain-google-vertexai", marker = "extra == 'google-vertexai'" },
{ name = "langchain-groq", marker = "extra == 'groq'" },
{ name = "langchain-huggingface", marker = "extra == 'huggingface'" },
{ name = "langchain-mistralai", marker = "extra == 'mistralai'" },
{ name = "langchain-ollama", marker = "extra == 'ollama'" },
{ name = "langchain-openai", marker = "extra == 'openai'", editable = "../partners/openai" },
{ name = "langchain-perplexity", marker = "extra == 'perplexity'" },
{ name = "langchain-text-splitters", editable = "../text-splitters" },
{ name = "langchain-together", marker = "extra == 'together'" },
{ name = "langchain-xai", marker = "extra == 'xai'" },
{ name = "langsmith", specifier = ">=0.1.17,<1.0.0" },
{ name = "pydantic", specifier = ">=2.7.4,<3.0.0" },
{ name = "pyyaml", specifier = ">=5.3.0,<7.0.0" },
{ name = "requests", specifier = ">=2.0.0,<3.0.0" },
{ name = "sqlalchemy", specifier = ">=1.4.0,<3.0.0" },
]
provides-extras = ["anthropic", "openai", "google-vertexai", "google-genai", "fireworks", "ollama", "together", "mistralai", "huggingface", "groq", "aws", "deepseek", "xai", "perplexity"]
provides-extras = ["community", "anthropic", "openai", "google-vertexai", "google-genai", "together"]
[package.metadata.requires-dev]
dev = [
@@ -398,6 +376,7 @@ test = [
{ name = "blockbuster", specifier = ">=1.5.18,<1.6.0" },
{ name = "cffi", marker = "python_full_version < '3.10'", specifier = "<1.17.1" },
{ name = "cffi", marker = "python_full_version >= '3.10'" },
{ name = "duckdb-engine", specifier = ">=0.9.2,<1.0.0" },
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
{ name = "langchain-core", editable = "../core" },
{ name = "langchain-openai", editable = "../partners/openai" },
@@ -432,10 +411,9 @@ test-integration = [
{ name = "wrapt", specifier = ">=1.15.0,<2.0.0" },
]
typing = [
{ name = "fastapi", specifier = ">=0.116.1,<1.0.0" },
{ name = "langchain-core", editable = "../core" },
{ name = "langchain-text-splitters", editable = "../text-splitters" },
{ name = "mypy", specifier = ">=1.18.2,<1.19.0" },
{ name = "mypy", specifier = ">=1.15.0,<1.16.0" },
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
{ name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.4" },
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
@@ -470,11 +448,11 @@ lint = [
{ name = "ruff" },
]
test = [
{ name = "langchain-classic" },
{ name = "langchain" },
{ name = "langchain-core" },
]
typing = [
{ name = "langchain-classic" },
{ name = "langchain" },
]
[package.metadata]
@@ -497,15 +475,15 @@ lint = [
{ name = "ruff", specifier = ">=0.13.1,<0.14" },
]
test = [
{ name = "langchain-classic", editable = "../langchain" },
{ name = "langchain", editable = "../langchain" },
{ name = "langchain-core", editable = "../core" },
]
test-integration = []
typing = [{ name = "langchain-classic", editable = "../langchain" }]
typing = [{ name = "langchain", editable = "../langchain" }]
[[package]]
name = "langchain-core"
version = "1.0.0"
version = "1.0.0a6"
source = { editable = "../core" }
dependencies = [
{ name = "jsonpatch" },
@@ -563,7 +541,7 @@ typing = [
[[package]]
name = "langchain-text-splitters"
version = "1.0.0"
version = "1.0.0a1"
source = { editable = "../text-splitters" }
dependencies = [
{ name = "langchain-core" },
@@ -596,8 +574,8 @@ test-integration = [
{ name = "nltk", specifier = ">=3.9.1,<4.0.0" },
{ name = "scipy", marker = "python_full_version == '3.12.*'", specifier = ">=1.7.0,<2.0.0" },
{ name = "scipy", marker = "python_full_version >= '3.13'", specifier = ">=1.14.1,<2.0.0" },
{ name = "sentence-transformers", marker = "python_full_version < '3.14'", specifier = ">=3.0.1,<4.0.0" },
{ name = "spacy", marker = "python_full_version < '3.14'", specifier = ">=3.8.7,<4.0.0" },
{ name = "sentence-transformers", specifier = ">=3.0.1,<4.0.0" },
{ name = "spacy", specifier = ">=3.8.7,<4.0.0" },
{ name = "thinc", specifier = ">=8.3.6,<9.0.0" },
{ name = "tiktoken", specifier = ">=0.8.0,<1.0.0" },
{ name = "transformers", specifier = ">=4.51.3,<5.0.0" },
@@ -610,62 +588,6 @@ typing = [
{ name = "types-requests", specifier = ">=2.31.0.20240218,<3.0.0.0" },
]
[[package]]
name = "langgraph"
version = "1.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "langchain-core" },
{ name = "langgraph-checkpoint" },
{ name = "langgraph-prebuilt" },
{ name = "langgraph-sdk" },
{ name = "pydantic" },
{ name = "xxhash" },
]
sdist = { url = "https://files.pythonhosted.org/packages/57/f7/7ae10f1832ab1a6a402f451e54d6dab277e28e7d4e4204e070c7897ca71c/langgraph-1.0.0.tar.gz", hash = "sha256:5f83ed0e9bbcc37635bc49cbc9b3d9306605fa07504f955b7a871ed715f9964c", size = 472835, upload-time = "2025-10-17T20:23:38.263Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/07/42/6f6d0fe4eb661b06da8e6c59e58044e9e4221fdbffdcacae864557de961e/langgraph-1.0.0-py3-none-any.whl", hash = "sha256:4d478781832a1bc67e06c3eb571412ec47d7c57a5467d1f3775adf0e9dd4042c", size = 155416, upload-time = "2025-10-17T20:23:36.978Z" },
]
[[package]]
name = "langgraph-checkpoint"
version = "2.1.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "langchain-core" },
{ name = "ormsgpack" },
]
sdist = { url = "https://files.pythonhosted.org/packages/29/83/6404f6ed23a91d7bc63d7df902d144548434237d017820ceaa8d014035f2/langgraph_checkpoint-2.1.2.tar.gz", hash = "sha256:112e9d067a6eff8937caf198421b1ffba8d9207193f14ac6f89930c1260c06f9", size = 142420, upload-time = "2025-10-07T17:45:17.129Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c4/f2/06bf5addf8ee664291e1b9ffa1f28fc9d97e59806dc7de5aea9844cbf335/langgraph_checkpoint-2.1.2-py3-none-any.whl", hash = "sha256:911ebffb069fd01775d4b5184c04aaafc2962fcdf50cf49d524cd4367c4d0c60", size = 45763, upload-time = "2025-10-07T17:45:16.19Z" },
]
[[package]]
name = "langgraph-prebuilt"
version = "1.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "langchain-core" },
{ name = "langgraph-checkpoint" },
]
sdist = { url = "https://files.pythonhosted.org/packages/02/2d/934b1129e217216a0dfaf0f7df0a10cedf2dfafe6cc8e1ee238cafaaa4a7/langgraph_prebuilt-1.0.0.tar.gz", hash = "sha256:eb75dad9aca0137451ca0395aa8541a665b3f60979480b0431d626fd195dcda2", size = 119927, upload-time = "2025-10-17T20:15:21.429Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/33/2e/ffa698eedc4c355168a9207ee598b2cc74ede92ce2b55c3469ea06978b6e/langgraph_prebuilt-1.0.0-py3-none-any.whl", hash = "sha256:ceaae4c5cee8c1f9b6468f76c114cafebb748aed0c93483b7c450e5a89de9c61", size = 28455, upload-time = "2025-10-17T20:15:20.043Z" },
]
[[package]]
name = "langgraph-sdk"
version = "0.2.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "httpx" },
{ name = "orjson" },
]
sdist = { url = "https://files.pythonhosted.org/packages/23/d8/40e01190a73c564a4744e29a6c902f78d34d43dad9b652a363a92a67059c/langgraph_sdk-0.2.9.tar.gz", hash = "sha256:b3bd04c6be4fa382996cd2be8fbc1e7cc94857d2bc6b6f4599a7f2a245975303", size = 99802, upload-time = "2025-09-20T18:49:14.734Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/66/05/b2d34e16638241e6f27a6946d28160d4b8b641383787646d41a3727e0896/langgraph_sdk-0.2.9-py3-none-any.whl", hash = "sha256:fbf302edadbf0fb343596f91c597794e936ef68eebc0d3e1d358b6f9f72a1429", size = 56752, upload-time = "2025-09-20T18:49:13.346Z" },
]
[[package]]
name = "langserve"
version = "0.0.51"
@@ -858,61 +780,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/28/01/d6b274a0635be0468d4dbd9cafe80c47105937a0d42434e805e67cd2ed8b/orjson-3.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:e8f6a7a27d7b7bec81bd5924163e9af03d49bbb63013f107b48eb5d16db711bc", size = 125985, upload-time = "2025-08-26T17:46:16.67Z" },
]
[[package]]
name = "ormsgpack"
version = "1.11.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/65/f8/224c342c0e03e131aaa1a1f19aa2244e167001783a433f4eed10eedd834b/ormsgpack-1.11.0.tar.gz", hash = "sha256:7c9988e78fedba3292541eb3bb274fa63044ef4da2ddb47259ea70c05dee4206", size = 49357, upload-time = "2025-10-08T17:29:15.621Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ff/3d/6996193cb2babc47fc92456223bef7d141065357ad4204eccf313f47a7b3/ormsgpack-1.11.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:03d4e658dd6e1882a552ce1d13cc7b49157414e7d56a4091fbe7823225b08cba", size = 367965, upload-time = "2025-10-08T17:28:06.736Z" },
{ url = "https://files.pythonhosted.org/packages/35/89/c83b805dd9caebb046f4ceeed3706d0902ed2dbbcf08b8464e89f2c52e05/ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bb67eb913c2b703f0ed39607fc56e50724dd41f92ce080a586b4d6149eb3fe4", size = 195209, upload-time = "2025-10-08T17:28:08.395Z" },
{ url = "https://files.pythonhosted.org/packages/3a/17/427d9c4f77b120f0af01d7a71d8144771c9388c2a81f712048320e31353b/ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e54175b92411f73a238e5653a998627f6660de3def37d9dd7213e0fd264ca56", size = 205868, upload-time = "2025-10-08T17:28:09.688Z" },
{ url = "https://files.pythonhosted.org/packages/82/32/a9ce218478bdbf3fee954159900e24b314ab3064f7b6a217ccb1e3464324/ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca2b197f4556e1823d1319869d4c5dc278be335286d2308b0ed88b59a5afcc25", size = 207391, upload-time = "2025-10-08T17:28:11.031Z" },
{ url = "https://files.pythonhosted.org/packages/7a/d3/4413fe7454711596fdf08adabdfa686580e4656702015108e4975f00a022/ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bc62388262f58c792fe1e450e1d9dbcc174ed2fb0b43db1675dd7c5ff2319d6a", size = 377078, upload-time = "2025-10-08T17:28:12.39Z" },
{ url = "https://files.pythonhosted.org/packages/f0/ad/13fae555a45e35ca1ca929a27c9ee0a3ecada931b9d44454658c543f9b9c/ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c48bc10af74adfbc9113f3fb160dc07c61ad9239ef264c17e449eba3de343dc2", size = 470776, upload-time = "2025-10-08T17:28:13.484Z" },
{ url = "https://files.pythonhosted.org/packages/36/60/51178b093ffc4e2ef3381013a67223e7d56224434fba80047249f4a84b26/ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a608d3a1d4fa4acdc5082168a54513cff91f47764cef435e81a483452f5f7647", size = 380862, upload-time = "2025-10-08T17:28:14.747Z" },
{ url = "https://files.pythonhosted.org/packages/a6/e3/1cb6c161335e2ae7d711ecfb007a31a3936603626e347c13e5e53b7c7cf8/ormsgpack-1.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:97217b4f7f599ba45916b9c4c4b1d5656e8e2a4d91e2e191d72a7569d3c30923", size = 112058, upload-time = "2025-10-08T17:28:15.777Z" },
{ url = "https://files.pythonhosted.org/packages/a4/7c/90164d00e8e94b48eff8a17bc2f4be6b71ae356a00904bc69d5e8afe80fb/ormsgpack-1.11.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:c7be823f47d8e36648d4bc90634b93f02b7d7cc7480081195f34767e86f181fb", size = 367964, upload-time = "2025-10-08T17:28:16.778Z" },
{ url = "https://files.pythonhosted.org/packages/7b/c2/fb6331e880a3446c1341e72c77bd5a46da3e92a8e2edf7ea84a4c6c14fff/ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68accf15d1b013812755c0eb7a30e1fc2f81eb603a1a143bf0cda1b301cfa797", size = 195209, upload-time = "2025-10-08T17:28:17.796Z" },
{ url = "https://files.pythonhosted.org/packages/18/50/4943fb5df8cc02da6b7b1ee2c2a7fb13aebc9f963d69280b1bb02b1fb178/ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:805d06fb277d9a4e503c0c707545b49cde66cbb2f84e5cf7c58d81dfc20d8658", size = 205869, upload-time = "2025-10-08T17:28:19.01Z" },
{ url = "https://files.pythonhosted.org/packages/1c/fa/e7e06835bfea9adeef43915143ce818098aecab0cbd3df584815adf3e399/ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1e57cdf003e77acc43643bda151dc01f97147a64b11cdee1380bb9698a7601c", size = 207391, upload-time = "2025-10-08T17:28:20.352Z" },
{ url = "https://files.pythonhosted.org/packages/33/f0/f28a19e938a14ec223396e94f4782fbcc023f8c91f2ab6881839d3550f32/ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:37fc05bdaabd994097c62e2f3e08f66b03f856a640ede6dc5ea340bd15b77f4d", size = 377081, upload-time = "2025-10-08T17:28:21.926Z" },
{ url = "https://files.pythonhosted.org/packages/4f/e3/73d1d7287637401b0b6637e30ba9121e1aa1d9f5ea185ed9834ca15d512c/ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:a6e9db6c73eb46b2e4d97bdffd1368a66f54e6806b563a997b19c004ef165e1d", size = 470779, upload-time = "2025-10-08T17:28:22.993Z" },
{ url = "https://files.pythonhosted.org/packages/9c/46/7ba7f9721e766dd0dfe4cedf444439447212abffe2d2f4538edeeec8ccbd/ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e9c44eae5ac0196ffc8b5ed497c75511056508f2303fa4d36b208eb820cf209e", size = 380865, upload-time = "2025-10-08T17:28:24.012Z" },
{ url = "https://files.pythonhosted.org/packages/a7/7d/bb92a0782bbe0626c072c0320001410cf3f6743ede7dc18f034b1a18edef/ormsgpack-1.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:11d0dfaf40ae7c6de4f7dbd1e4892e2e6a55d911ab1774357c481158d17371e4", size = 112058, upload-time = "2025-10-08T17:28:25.015Z" },
{ url = "https://files.pythonhosted.org/packages/28/1a/f07c6f74142815d67e1d9d98c5b2960007100408ade8242edac96d5d1c73/ormsgpack-1.11.0-cp311-cp311-win_arm64.whl", hash = "sha256:0c63a3f7199a3099c90398a1bdf0cb577b06651a442dc5efe67f2882665e5b02", size = 105894, upload-time = "2025-10-08T17:28:25.93Z" },
{ url = "https://files.pythonhosted.org/packages/1e/16/2805ebfb3d2cbb6c661b5fae053960fc90a2611d0d93e2207e753e836117/ormsgpack-1.11.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3434d0c8d67de27d9010222de07fb6810fb9af3bb7372354ffa19257ac0eb83b", size = 368474, upload-time = "2025-10-08T17:28:27.532Z" },
{ url = "https://files.pythonhosted.org/packages/6f/39/6afae47822dca0ce4465d894c0bbb860a850ce29c157882dbdf77a5dd26e/ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2da5bd097e8dbfa4eb0d4ccfe79acd6f538dee4493579e2debfe4fc8f4ca89b", size = 195321, upload-time = "2025-10-08T17:28:28.573Z" },
{ url = "https://files.pythonhosted.org/packages/f6/54/11eda6b59f696d2f16de469bfbe539c9f469c4b9eef5a513996b5879c6e9/ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fdbaa0a5a8606a486960b60c24f2d5235d30ac7a8b98eeaea9854bffef14dc3d", size = 206036, upload-time = "2025-10-08T17:28:29.785Z" },
{ url = "https://files.pythonhosted.org/packages/1e/86/890430f704f84c4699ddad61c595d171ea2fd77a51fbc106f83981e83939/ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3682f24f800c1837017ee90ce321086b2cbaef88db7d4cdbbda1582aa6508159", size = 207615, upload-time = "2025-10-08T17:28:31.076Z" },
{ url = "https://files.pythonhosted.org/packages/b6/b9/77383e16c991c0ecb772205b966fc68d9c519e0b5f9c3913283cbed30ffe/ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcca21202bb05ccbf3e0e92f560ee59b9331182e4c09c965a28155efbb134993", size = 377195, upload-time = "2025-10-08T17:28:32.436Z" },
{ url = "https://files.pythonhosted.org/packages/20/e2/15f9f045d4947f3c8a5e0535259fddf027b17b1215367488b3565c573b9d/ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c30e5c4655ba46152d722ec7468e8302195e6db362ec1ae2c206bc64f6030e43", size = 470960, upload-time = "2025-10-08T17:28:33.556Z" },
{ url = "https://files.pythonhosted.org/packages/b8/61/403ce188c4c495bc99dff921a0ad3d9d352dd6d3c4b629f3638b7f0cf79b/ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7138a341f9e2c08c59368f03d3be25e8b87b3baaf10d30fb1f6f6b52f3d47944", size = 381174, upload-time = "2025-10-08T17:28:34.781Z" },
{ url = "https://files.pythonhosted.org/packages/14/a8/94c94bc48c68da4374870a851eea03fc5a45eb041182ad4c5ed9acfc05a4/ormsgpack-1.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:d4bd8589b78a11026d47f4edf13c1ceab9088bb12451f34396afe6497db28a27", size = 112314, upload-time = "2025-10-08T17:28:36.259Z" },
{ url = "https://files.pythonhosted.org/packages/19/d0/aa4cf04f04e4cc180ce7a8d8ddb5a7f3af883329cbc59645d94d3ba157a5/ormsgpack-1.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:e5e746a1223e70f111d4001dab9585ac8639eee8979ca0c8db37f646bf2961da", size = 106072, upload-time = "2025-10-08T17:28:37.518Z" },
{ url = "https://files.pythonhosted.org/packages/8b/35/e34722edb701d053cf2240f55974f17b7dbfd11fdef72bd2f1835bcebf26/ormsgpack-1.11.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e7b36ab7b45cb95217ae1f05f1318b14a3e5ef73cb00804c0f06233f81a14e8", size = 368502, upload-time = "2025-10-08T17:28:38.547Z" },
{ url = "https://files.pythonhosted.org/packages/2f/6a/c2fc369a79d6aba2aa28c8763856c95337ac7fcc0b2742185cd19397212a/ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43402d67e03a9a35cc147c8c03f0c377cad016624479e1ee5b879b8425551484", size = 195344, upload-time = "2025-10-08T17:28:39.554Z" },
{ url = "https://files.pythonhosted.org/packages/8b/6a/0f8e24b7489885534c1a93bdba7c7c434b9b8638713a68098867db9f254c/ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:64fd992f932764d6306b70ddc755c1bc3405c4c6a69f77a36acf7af1c8f5ada4", size = 206045, upload-time = "2025-10-08T17:28:40.561Z" },
{ url = "https://files.pythonhosted.org/packages/99/71/8b460ba264f3c6f82ef5b1920335720094e2bd943057964ce5287d6df83a/ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0362fb7fe4a29c046c8ea799303079a09372653a1ce5a5a588f3bbb8088368d0", size = 207641, upload-time = "2025-10-08T17:28:41.736Z" },
{ url = "https://files.pythonhosted.org/packages/50/cf/f369446abaf65972424ed2651f2df2b7b5c3b735c93fc7fa6cfb81e34419/ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:de2f7a65a9d178ed57be49eba3d0fc9b833c32beaa19dbd4ba56014d3c20b152", size = 377211, upload-time = "2025-10-08T17:28:43.12Z" },
{ url = "https://files.pythonhosted.org/packages/2f/3f/948bb0047ce0f37c2efc3b9bb2bcfdccc61c63e0b9ce8088d4903ba39dcf/ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:f38cfae95461466055af966fc922d06db4e1654966385cda2828653096db34da", size = 470973, upload-time = "2025-10-08T17:28:44.465Z" },
{ url = "https://files.pythonhosted.org/packages/31/a4/92a8114d1d017c14aaa403445060f345df9130ca532d538094f38e535988/ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c88396189d238f183cea7831b07a305ab5c90d6d29b53288ae11200bd956357b", size = 381161, upload-time = "2025-10-08T17:28:46.063Z" },
{ url = "https://files.pythonhosted.org/packages/d0/64/5b76447da654798bfcfdfd64ea29447ff2b7f33fe19d0e911a83ad5107fc/ormsgpack-1.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:5403d1a945dd7c81044cebeca3f00a28a0f4248b33242a5d2d82111628043725", size = 112321, upload-time = "2025-10-08T17:28:47.393Z" },
{ url = "https://files.pythonhosted.org/packages/46/5e/89900d06db9ab81e7ec1fd56a07c62dfbdcda398c435718f4252e1dc52a0/ormsgpack-1.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:c57357b8d43b49722b876edf317bdad9e6d52071b523fdd7394c30cd1c67d5a0", size = 106084, upload-time = "2025-10-08T17:28:48.305Z" },
{ url = "https://files.pythonhosted.org/packages/4c/0b/c659e8657085c8c13f6a0224789f422620cef506e26573b5434defe68483/ormsgpack-1.11.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d390907d90fd0c908211592c485054d7a80990697ef4dff4e436ac18e1aab98a", size = 368497, upload-time = "2025-10-08T17:28:49.297Z" },
{ url = "https://files.pythonhosted.org/packages/1b/0e/451e5848c7ed56bd287e8a2b5cb5926e54466f60936e05aec6cb299f9143/ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6153c2e92e789509098e04c9aa116b16673bd88ec78fbe0031deeb34ab642d10", size = 195385, upload-time = "2025-10-08T17:28:50.314Z" },
{ url = "https://files.pythonhosted.org/packages/4c/28/90f78cbbe494959f2439c2ec571f08cd3464c05a6a380b0d621c622122a9/ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2b2c2a065a94d742212b2018e1fecd8f8d72f3c50b53a97d1f407418093446d", size = 206114, upload-time = "2025-10-08T17:28:51.336Z" },
{ url = "https://files.pythonhosted.org/packages/fb/db/34163f4c0923bea32dafe42cd878dcc66795a3e85669bc4b01c1e2b92a7b/ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:110e65b5340f3d7ef8b0009deae3c6b169437e6b43ad5a57fd1748085d29d2ac", size = 207679, upload-time = "2025-10-08T17:28:53.627Z" },
{ url = "https://files.pythonhosted.org/packages/b6/14/04ee741249b16f380a9b4a0cc19d4134d0b7c74bab27a2117da09e525eb9/ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c27e186fca96ab34662723e65b420919910acbbc50fc8e1a44e08f26268cb0e0", size = 377237, upload-time = "2025-10-08T17:28:56.12Z" },
{ url = "https://files.pythonhosted.org/packages/89/ff/53e588a6aaa833237471caec679582c2950f0e7e1a8ba28c1511b465c1f4/ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d56b1f877c13d499052d37a3db2378a97d5e1588d264f5040b3412aee23d742c", size = 471021, upload-time = "2025-10-08T17:28:57.299Z" },
{ url = "https://files.pythonhosted.org/packages/a6/f9/f20a6d9ef2be04da3aad05e8f5699957e9a30c6d5c043a10a296afa7e890/ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c88e28cd567c0a3269f624b4ade28142d5e502c8e826115093c572007af5be0a", size = 381205, upload-time = "2025-10-08T17:28:58.872Z" },
{ url = "https://files.pythonhosted.org/packages/f8/64/96c07d084b479ac8b7821a77ffc8d3f29d8b5c95ebfdf8db1c03dff02762/ormsgpack-1.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:8811160573dc0a65f62f7e0792c4ca6b7108dfa50771edb93f9b84e2d45a08ae", size = 112374, upload-time = "2025-10-08T17:29:00Z" },
{ url = "https://files.pythonhosted.org/packages/88/a5/5dcc18b818d50213a3cadfe336bb6163a102677d9ce87f3d2f1a1bee0f8c/ormsgpack-1.11.0-cp314-cp314-win_arm64.whl", hash = "sha256:23e30a8d3c17484cf74e75e6134322255bd08bc2b5b295cc9c442f4bae5f3c2d", size = 106056, upload-time = "2025-10-08T17:29:01.29Z" },
{ url = "https://files.pythonhosted.org/packages/19/2b/776d1b411d2be50f77a6e6e94a25825cca55dcacfe7415fd691a144db71b/ormsgpack-1.11.0-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2905816502adfaf8386a01dd85f936cd378d243f4f5ee2ff46f67f6298dc90d5", size = 368661, upload-time = "2025-10-08T17:29:02.382Z" },
{ url = "https://files.pythonhosted.org/packages/a9/0c/81a19e6115b15764db3d241788f9fac093122878aaabf872cc545b0c4650/ormsgpack-1.11.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c04402fb9a0a9b9f18fbafd6d5f8398ee99b3ec619fb63952d3a954bc9d47daa", size = 195539, upload-time = "2025-10-08T17:29:03.472Z" },
{ url = "https://files.pythonhosted.org/packages/97/86/e5b50247a61caec5718122feb2719ea9d451d30ac0516c288c1dbc6408e8/ormsgpack-1.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a025ec07ac52056ecfd9e57b5cbc6fff163f62cb9805012b56cda599157f8ef2", size = 207718, upload-time = "2025-10-08T17:29:04.545Z" },
]
[[package]]
name = "packaging"
version = "25.0"
@@ -942,7 +809,7 @@ wheels = [
[[package]]
name = "pydantic"
version = "2.12.3"
version = "2.11.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-types" },
@@ -950,123 +817,96 @@ dependencies = [
{ name = "typing-extensions" },
{ name = "typing-inspection" },
]
sdist = { url = "https://files.pythonhosted.org/packages/f3/1e/4f0a3233767010308f2fd6bd0814597e3f63f1dc98304a9112b8759df4ff/pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74", size = 819383, upload-time = "2025-10-17T15:04:21.222Z" }
sdist = { url = "https://files.pythonhosted.org/packages/ff/5d/09a551ba512d7ca404d785072700d3f6727a02f6f3c24ecfd081c7cf0aa8/pydantic-2.11.9.tar.gz", hash = "sha256:6b8ffda597a14812a7975c90b82a8a2e777d9257aba3453f973acd3c032a18e2", size = 788495, upload-time = "2025-09-13T11:26:39.325Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a1/6b/83661fa77dcefa195ad5f8cd9af3d1a7450fd57cc883ad04d65446ac2029/pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf", size = 462431, upload-time = "2025-10-17T15:04:19.346Z" },
{ url = "https://files.pythonhosted.org/packages/3e/d3/108f2006987c58e76691d5ae5d200dd3e0f532cb4e5fa3560751c3a1feba/pydantic-2.11.9-py3-none-any.whl", hash = "sha256:c42dd626f5cfc1c6950ce6205ea58c93efa406da65f479dcb4029d5934857da2", size = 444855, upload-time = "2025-09-13T11:26:36.909Z" },
]
[[package]]
name = "pydantic-core"
version = "2.41.4"
version = "2.33.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/df/18/d0944e8eaaa3efd0a91b0f1fc537d3be55ad35091b6a87638211ba691964/pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5", size = 457557, upload-time = "2025-10-14T10:23:47.909Z" }
sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a7/3d/9b8ca77b0f76fcdbf8bc6b72474e264283f461284ca84ac3fde570c6c49a/pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e", size = 2111197, upload-time = "2025-10-14T10:19:43.303Z" },
{ url = "https://files.pythonhosted.org/packages/59/92/b7b0fe6ed4781642232755cb7e56a86e2041e1292f16d9ae410a0ccee5ac/pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b", size = 1917909, upload-time = "2025-10-14T10:19:45.194Z" },
{ url = "https://files.pythonhosted.org/packages/52/8c/3eb872009274ffa4fb6a9585114e161aa1a0915af2896e2d441642929fe4/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd", size = 1969905, upload-time = "2025-10-14T10:19:46.567Z" },
{ url = "https://files.pythonhosted.org/packages/f4/21/35adf4a753bcfaea22d925214a0c5b880792e3244731b3f3e6fec0d124f7/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945", size = 2051938, upload-time = "2025-10-14T10:19:48.237Z" },
{ url = "https://files.pythonhosted.org/packages/7d/d0/cdf7d126825e36d6e3f1eccf257da8954452934ede275a8f390eac775e89/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706", size = 2250710, upload-time = "2025-10-14T10:19:49.619Z" },
{ url = "https://files.pythonhosted.org/packages/2e/1c/af1e6fd5ea596327308f9c8d1654e1285cc3d8de0d584a3c9d7705bf8a7c/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba", size = 2367445, upload-time = "2025-10-14T10:19:51.269Z" },
{ url = "https://files.pythonhosted.org/packages/d3/81/8cece29a6ef1b3a92f956ea6da6250d5b2d2e7e4d513dd3b4f0c7a83dfea/pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b", size = 2072875, upload-time = "2025-10-14T10:19:52.671Z" },
{ url = "https://files.pythonhosted.org/packages/e3/37/a6a579f5fc2cd4d5521284a0ab6a426cc6463a7b3897aeb95b12f1ba607b/pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d", size = 2191329, upload-time = "2025-10-14T10:19:54.214Z" },
{ url = "https://files.pythonhosted.org/packages/ae/03/505020dc5c54ec75ecba9f41119fd1e48f9e41e4629942494c4a8734ded1/pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700", size = 2151658, upload-time = "2025-10-14T10:19:55.843Z" },
{ url = "https://files.pythonhosted.org/packages/cb/5d/2c0d09fb53aa03bbd2a214d89ebfa6304be7df9ed86ee3dc7770257f41ee/pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6", size = 2316777, upload-time = "2025-10-14T10:19:57.607Z" },
{ url = "https://files.pythonhosted.org/packages/ea/4b/c2c9c8f5e1f9c864b57d08539d9d3db160e00491c9f5ee90e1bfd905e644/pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9", size = 2320705, upload-time = "2025-10-14T10:19:59.016Z" },
{ url = "https://files.pythonhosted.org/packages/28/c3/a74c1c37f49c0a02c89c7340fafc0ba816b29bd495d1a31ce1bdeacc6085/pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57", size = 1975464, upload-time = "2025-10-14T10:20:00.581Z" },
{ url = "https://files.pythonhosted.org/packages/d6/23/5dd5c1324ba80303368f7569e2e2e1a721c7d9eb16acb7eb7b7f85cb1be2/pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc", size = 2024497, upload-time = "2025-10-14T10:20:03.018Z" },
{ url = "https://files.pythonhosted.org/packages/62/4c/f6cbfa1e8efacd00b846764e8484fe173d25b8dab881e277a619177f3384/pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80", size = 2109062, upload-time = "2025-10-14T10:20:04.486Z" },
{ url = "https://files.pythonhosted.org/packages/21/f8/40b72d3868896bfcd410e1bd7e516e762d326201c48e5b4a06446f6cf9e8/pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae", size = 1916301, upload-time = "2025-10-14T10:20:06.857Z" },
{ url = "https://files.pythonhosted.org/packages/94/4d/d203dce8bee7faeca791671c88519969d98d3b4e8f225da5b96dad226fc8/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827", size = 1968728, upload-time = "2025-10-14T10:20:08.353Z" },
{ url = "https://files.pythonhosted.org/packages/65/f5/6a66187775df87c24d526985b3a5d78d861580ca466fbd9d4d0e792fcf6c/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f", size = 2050238, upload-time = "2025-10-14T10:20:09.766Z" },
{ url = "https://files.pythonhosted.org/packages/5e/b9/78336345de97298cf53236b2f271912ce11f32c1e59de25a374ce12f9cce/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def", size = 2249424, upload-time = "2025-10-14T10:20:11.732Z" },
{ url = "https://files.pythonhosted.org/packages/99/bb/a4584888b70ee594c3d374a71af5075a68654d6c780369df269118af7402/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2", size = 2366047, upload-time = "2025-10-14T10:20:13.647Z" },
{ url = "https://files.pythonhosted.org/packages/5f/8d/17fc5de9d6418e4d2ae8c675f905cdafdc59d3bf3bf9c946b7ab796a992a/pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8", size = 2071163, upload-time = "2025-10-14T10:20:15.307Z" },
{ url = "https://files.pythonhosted.org/packages/54/e7/03d2c5c0b8ed37a4617430db68ec5e7dbba66358b629cd69e11b4d564367/pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265", size = 2190585, upload-time = "2025-10-14T10:20:17.3Z" },
{ url = "https://files.pythonhosted.org/packages/be/fc/15d1c9fe5ad9266a5897d9b932b7f53d7e5cfc800573917a2c5d6eea56ec/pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c", size = 2150109, upload-time = "2025-10-14T10:20:19.143Z" },
{ url = "https://files.pythonhosted.org/packages/26/ef/e735dd008808226c83ba56972566138665b71477ad580fa5a21f0851df48/pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a", size = 2315078, upload-time = "2025-10-14T10:20:20.742Z" },
{ url = "https://files.pythonhosted.org/packages/90/00/806efdcf35ff2ac0f938362350cd9827b8afb116cc814b6b75cf23738c7c/pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e", size = 2318737, upload-time = "2025-10-14T10:20:22.306Z" },
{ url = "https://files.pythonhosted.org/packages/41/7e/6ac90673fe6cb36621a2283552897838c020db343fa86e513d3f563b196f/pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03", size = 1974160, upload-time = "2025-10-14T10:20:23.817Z" },
{ url = "https://files.pythonhosted.org/packages/e0/9d/7c5e24ee585c1f8b6356e1d11d40ab807ffde44d2db3b7dfd6d20b09720e/pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e", size = 2021883, upload-time = "2025-10-14T10:20:25.48Z" },
{ url = "https://files.pythonhosted.org/packages/33/90/5c172357460fc28b2871eb4a0fb3843b136b429c6fa827e4b588877bf115/pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db", size = 1968026, upload-time = "2025-10-14T10:20:27.039Z" },
{ url = "https://files.pythonhosted.org/packages/e9/81/d3b3e95929c4369d30b2a66a91db63c8ed0a98381ae55a45da2cd1cc1288/pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887", size = 2099043, upload-time = "2025-10-14T10:20:28.561Z" },
{ url = "https://files.pythonhosted.org/packages/58/da/46fdac49e6717e3a94fc9201403e08d9d61aa7a770fab6190b8740749047/pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2", size = 1910699, upload-time = "2025-10-14T10:20:30.217Z" },
{ url = "https://files.pythonhosted.org/packages/1e/63/4d948f1b9dd8e991a5a98b77dd66c74641f5f2e5225fee37994b2e07d391/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999", size = 1952121, upload-time = "2025-10-14T10:20:32.246Z" },
{ url = "https://files.pythonhosted.org/packages/b2/a7/e5fc60a6f781fc634ecaa9ecc3c20171d238794cef69ae0af79ac11b89d7/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4", size = 2041590, upload-time = "2025-10-14T10:20:34.332Z" },
{ url = "https://files.pythonhosted.org/packages/70/69/dce747b1d21d59e85af433428978a1893c6f8a7068fa2bb4a927fba7a5ff/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f", size = 2219869, upload-time = "2025-10-14T10:20:35.965Z" },
{ url = "https://files.pythonhosted.org/packages/83/6a/c070e30e295403bf29c4df1cb781317b6a9bac7cd07b8d3acc94d501a63c/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b", size = 2345169, upload-time = "2025-10-14T10:20:37.627Z" },
{ url = "https://files.pythonhosted.org/packages/f0/83/06d001f8043c336baea7fd202a9ac7ad71f87e1c55d8112c50b745c40324/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47", size = 2070165, upload-time = "2025-10-14T10:20:39.246Z" },
{ url = "https://files.pythonhosted.org/packages/14/0a/e567c2883588dd12bcbc110232d892cf385356f7c8a9910311ac997ab715/pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970", size = 2189067, upload-time = "2025-10-14T10:20:41.015Z" },
{ url = "https://files.pythonhosted.org/packages/f4/1d/3d9fca34273ba03c9b1c5289f7618bc4bd09c3ad2289b5420481aa051a99/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed", size = 2132997, upload-time = "2025-10-14T10:20:43.106Z" },
{ url = "https://files.pythonhosted.org/packages/52/70/d702ef7a6cd41a8afc61f3554922b3ed8d19dd54c3bd4bdbfe332e610827/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8", size = 2307187, upload-time = "2025-10-14T10:20:44.849Z" },
{ url = "https://files.pythonhosted.org/packages/68/4c/c06be6e27545d08b802127914156f38d10ca287a9e8489342793de8aae3c/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431", size = 2305204, upload-time = "2025-10-14T10:20:46.781Z" },
{ url = "https://files.pythonhosted.org/packages/b0/e5/35ae4919bcd9f18603419e23c5eaf32750224a89d41a8df1a3704b69f77e/pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd", size = 1972536, upload-time = "2025-10-14T10:20:48.39Z" },
{ url = "https://files.pythonhosted.org/packages/1e/c2/49c5bb6d2a49eb2ee3647a93e3dae7080c6409a8a7558b075027644e879c/pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff", size = 2031132, upload-time = "2025-10-14T10:20:50.421Z" },
{ url = "https://files.pythonhosted.org/packages/06/23/936343dbcba6eec93f73e95eb346810fc732f71ba27967b287b66f7b7097/pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8", size = 1969483, upload-time = "2025-10-14T10:20:52.35Z" },
{ url = "https://files.pythonhosted.org/packages/13/d0/c20adabd181a029a970738dfe23710b52a31f1258f591874fcdec7359845/pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746", size = 2105688, upload-time = "2025-10-14T10:20:54.448Z" },
{ url = "https://files.pythonhosted.org/packages/00/b6/0ce5c03cec5ae94cca220dfecddc453c077d71363b98a4bbdb3c0b22c783/pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced", size = 1910807, upload-time = "2025-10-14T10:20:56.115Z" },
{ url = "https://files.pythonhosted.org/packages/68/3e/800d3d02c8beb0b5c069c870cbb83799d085debf43499c897bb4b4aaff0d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a", size = 1956669, upload-time = "2025-10-14T10:20:57.874Z" },
{ url = "https://files.pythonhosted.org/packages/60/a4/24271cc71a17f64589be49ab8bd0751f6a0a03046c690df60989f2f95c2c/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02", size = 2051629, upload-time = "2025-10-14T10:21:00.006Z" },
{ url = "https://files.pythonhosted.org/packages/68/de/45af3ca2f175d91b96bfb62e1f2d2f1f9f3b14a734afe0bfeff079f78181/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1", size = 2224049, upload-time = "2025-10-14T10:21:01.801Z" },
{ url = "https://files.pythonhosted.org/packages/af/8f/ae4e1ff84672bf869d0a77af24fd78387850e9497753c432875066b5d622/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2", size = 2342409, upload-time = "2025-10-14T10:21:03.556Z" },
{ url = "https://files.pythonhosted.org/packages/18/62/273dd70b0026a085c7b74b000394e1ef95719ea579c76ea2f0cc8893736d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84", size = 2069635, upload-time = "2025-10-14T10:21:05.385Z" },
{ url = "https://files.pythonhosted.org/packages/30/03/cf485fff699b4cdaea469bc481719d3e49f023241b4abb656f8d422189fc/pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d", size = 2194284, upload-time = "2025-10-14T10:21:07.122Z" },
{ url = "https://files.pythonhosted.org/packages/f9/7e/c8e713db32405dfd97211f2fc0a15d6bf8adb7640f3d18544c1f39526619/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d", size = 2137566, upload-time = "2025-10-14T10:21:08.981Z" },
{ url = "https://files.pythonhosted.org/packages/04/f7/db71fd4cdccc8b75990f79ccafbbd66757e19f6d5ee724a6252414483fb4/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2", size = 2316809, upload-time = "2025-10-14T10:21:10.805Z" },
{ url = "https://files.pythonhosted.org/packages/76/63/a54973ddb945f1bca56742b48b144d85c9fc22f819ddeb9f861c249d5464/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab", size = 2311119, upload-time = "2025-10-14T10:21:12.583Z" },
{ url = "https://files.pythonhosted.org/packages/f8/03/5d12891e93c19218af74843a27e32b94922195ded2386f7b55382f904d2f/pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c", size = 1981398, upload-time = "2025-10-14T10:21:14.584Z" },
{ url = "https://files.pythonhosted.org/packages/be/d8/fd0de71f39db91135b7a26996160de71c073d8635edfce8b3c3681be0d6d/pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4", size = 2030735, upload-time = "2025-10-14T10:21:16.432Z" },
{ url = "https://files.pythonhosted.org/packages/72/86/c99921c1cf6650023c08bfab6fe2d7057a5142628ef7ccfa9921f2dda1d5/pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564", size = 1973209, upload-time = "2025-10-14T10:21:18.213Z" },
{ url = "https://files.pythonhosted.org/packages/36/0d/b5706cacb70a8414396efdda3d72ae0542e050b591119e458e2490baf035/pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4", size = 1877324, upload-time = "2025-10-14T10:21:20.363Z" },
{ url = "https://files.pythonhosted.org/packages/de/2d/cba1fa02cfdea72dfb3a9babb067c83b9dff0bbcb198368e000a6b756ea7/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2", size = 1884515, upload-time = "2025-10-14T10:21:22.339Z" },
{ url = "https://files.pythonhosted.org/packages/07/ea/3df927c4384ed9b503c9cc2d076cf983b4f2adb0c754578dfb1245c51e46/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf", size = 2042819, upload-time = "2025-10-14T10:21:26.683Z" },
{ url = "https://files.pythonhosted.org/packages/6a/ee/df8e871f07074250270a3b1b82aad4cd0026b588acd5d7d3eb2fcb1471a3/pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2", size = 1995866, upload-time = "2025-10-14T10:21:28.951Z" },
{ url = "https://files.pythonhosted.org/packages/fc/de/b20f4ab954d6d399499c33ec4fafc46d9551e11dc1858fb7f5dca0748ceb/pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89", size = 1970034, upload-time = "2025-10-14T10:21:30.869Z" },
{ url = "https://files.pythonhosted.org/packages/54/28/d3325da57d413b9819365546eb9a6e8b7cbd9373d9380efd5f74326143e6/pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1", size = 2102022, upload-time = "2025-10-14T10:21:32.809Z" },
{ url = "https://files.pythonhosted.org/packages/9e/24/b58a1bc0d834bf1acc4361e61233ee217169a42efbdc15a60296e13ce438/pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac", size = 1905495, upload-time = "2025-10-14T10:21:34.812Z" },
{ url = "https://files.pythonhosted.org/packages/fb/a4/71f759cc41b7043e8ecdaab81b985a9b6cad7cec077e0b92cff8b71ecf6b/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554", size = 1956131, upload-time = "2025-10-14T10:21:36.924Z" },
{ url = "https://files.pythonhosted.org/packages/b0/64/1e79ac7aa51f1eec7c4cda8cbe456d5d09f05fdd68b32776d72168d54275/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e", size = 2052236, upload-time = "2025-10-14T10:21:38.927Z" },
{ url = "https://files.pythonhosted.org/packages/e9/e3/a3ffc363bd4287b80f1d43dc1c28ba64831f8dfc237d6fec8f2661138d48/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616", size = 2223573, upload-time = "2025-10-14T10:21:41.574Z" },
{ url = "https://files.pythonhosted.org/packages/28/27/78814089b4d2e684a9088ede3790763c64693c3d1408ddc0a248bc789126/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af", size = 2342467, upload-time = "2025-10-14T10:21:44.018Z" },
{ url = "https://files.pythonhosted.org/packages/92/97/4de0e2a1159cb85ad737e03306717637842c88c7fd6d97973172fb183149/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12", size = 2063754, upload-time = "2025-10-14T10:21:46.466Z" },
{ url = "https://files.pythonhosted.org/packages/0f/50/8cb90ce4b9efcf7ae78130afeb99fd1c86125ccdf9906ef64b9d42f37c25/pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d", size = 2196754, upload-time = "2025-10-14T10:21:48.486Z" },
{ url = "https://files.pythonhosted.org/packages/34/3b/ccdc77af9cd5082723574a1cc1bcae7a6acacc829d7c0a06201f7886a109/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad", size = 2137115, upload-time = "2025-10-14T10:21:50.63Z" },
{ url = "https://files.pythonhosted.org/packages/ca/ba/e7c7a02651a8f7c52dc2cff2b64a30c313e3b57c7d93703cecea76c09b71/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a", size = 2317400, upload-time = "2025-10-14T10:21:52.959Z" },
{ url = "https://files.pythonhosted.org/packages/2c/ba/6c533a4ee8aec6b812c643c49bb3bd88d3f01e3cebe451bb85512d37f00f/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025", size = 2312070, upload-time = "2025-10-14T10:21:55.419Z" },
{ url = "https://files.pythonhosted.org/packages/22/ae/f10524fcc0ab8d7f96cf9a74c880243576fd3e72bd8ce4f81e43d22bcab7/pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e", size = 1982277, upload-time = "2025-10-14T10:21:57.474Z" },
{ url = "https://files.pythonhosted.org/packages/b4/dc/e5aa27aea1ad4638f0c3fb41132f7eb583bd7420ee63204e2d4333a3bbf9/pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894", size = 2024608, upload-time = "2025-10-14T10:21:59.557Z" },
{ url = "https://files.pythonhosted.org/packages/3e/61/51d89cc2612bd147198e120a13f150afbf0bcb4615cddb049ab10b81b79e/pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d", size = 1967614, upload-time = "2025-10-14T10:22:01.847Z" },
{ url = "https://files.pythonhosted.org/packages/0d/c2/472f2e31b95eff099961fa050c376ab7156a81da194f9edb9f710f68787b/pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da", size = 1876904, upload-time = "2025-10-14T10:22:04.062Z" },
{ url = "https://files.pythonhosted.org/packages/4a/07/ea8eeb91173807ecdae4f4a5f4b150a520085b35454350fc219ba79e66a3/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e", size = 1882538, upload-time = "2025-10-14T10:22:06.39Z" },
{ url = "https://files.pythonhosted.org/packages/1e/29/b53a9ca6cd366bfc928823679c6a76c7a4c69f8201c0ba7903ad18ebae2f/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa", size = 2041183, upload-time = "2025-10-14T10:22:08.812Z" },
{ url = "https://files.pythonhosted.org/packages/c7/3d/f8c1a371ceebcaf94d6dd2d77c6cf4b1c078e13a5837aee83f760b4f7cfd/pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d", size = 1993542, upload-time = "2025-10-14T10:22:11.332Z" },
{ url = "https://files.pythonhosted.org/packages/8a/ac/9fc61b4f9d079482a290afe8d206b8f490e9fd32d4fc03ed4fc698214e01/pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0", size = 1973897, upload-time = "2025-10-14T10:22:13.444Z" },
{ url = "https://files.pythonhosted.org/packages/b0/12/5ba58daa7f453454464f92b3ca7b9d7c657d8641c48e370c3ebc9a82dd78/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b", size = 2122139, upload-time = "2025-10-14T10:22:47.288Z" },
{ url = "https://files.pythonhosted.org/packages/21/fb/6860126a77725c3108baecd10fd3d75fec25191d6381b6eb2ac660228eac/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42", size = 1936674, upload-time = "2025-10-14T10:22:49.555Z" },
{ url = "https://files.pythonhosted.org/packages/de/be/57dcaa3ed595d81f8757e2b44a38240ac5d37628bce25fb20d02c7018776/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee", size = 1956398, upload-time = "2025-10-14T10:22:52.19Z" },
{ url = "https://files.pythonhosted.org/packages/2f/1d/679a344fadb9695f1a6a294d739fbd21d71fa023286daeea8c0ed49e7c2b/pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c", size = 2138674, upload-time = "2025-10-14T10:22:54.499Z" },
{ url = "https://files.pythonhosted.org/packages/c4/48/ae937e5a831b7c0dc646b2ef788c27cd003894882415300ed21927c21efa/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537", size = 2112087, upload-time = "2025-10-14T10:22:56.818Z" },
{ url = "https://files.pythonhosted.org/packages/5e/db/6db8073e3d32dae017da7e0d16a9ecb897d0a4d92e00634916e486097961/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94", size = 1920387, upload-time = "2025-10-14T10:22:59.342Z" },
{ url = "https://files.pythonhosted.org/packages/0d/c1/dd3542d072fcc336030d66834872f0328727e3b8de289c662faa04aa270e/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c", size = 1951495, upload-time = "2025-10-14T10:23:02.089Z" },
{ url = "https://files.pythonhosted.org/packages/2b/c6/db8d13a1f8ab3f1eb08c88bd00fd62d44311e3456d1e85c0e59e0a0376e7/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335", size = 2139008, upload-time = "2025-10-14T10:23:04.539Z" },
{ url = "https://files.pythonhosted.org/packages/5d/d4/912e976a2dd0b49f31c98a060ca90b353f3b73ee3ea2fd0030412f6ac5ec/pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00", size = 2106739, upload-time = "2025-10-14T10:23:06.934Z" },
{ url = "https://files.pythonhosted.org/packages/71/f0/66ec5a626c81eba326072d6ee2b127f8c139543f1bf609b4842978d37833/pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9", size = 1932549, upload-time = "2025-10-14T10:23:09.24Z" },
{ url = "https://files.pythonhosted.org/packages/c4/af/625626278ca801ea0a658c2dcf290dc9f21bb383098e99e7c6a029fccfc0/pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2", size = 2135093, upload-time = "2025-10-14T10:23:11.626Z" },
{ url = "https://files.pythonhosted.org/packages/20/f6/2fba049f54e0f4975fef66be654c597a1d005320fa141863699180c7697d/pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258", size = 2187971, upload-time = "2025-10-14T10:23:14.437Z" },
{ url = "https://files.pythonhosted.org/packages/0e/80/65ab839a2dfcd3b949202f9d920c34f9de5a537c3646662bdf2f7d999680/pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347", size = 2147939, upload-time = "2025-10-14T10:23:16.831Z" },
{ url = "https://files.pythonhosted.org/packages/44/58/627565d3d182ce6dfda18b8e1c841eede3629d59c9d7cbc1e12a03aeb328/pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa", size = 2311400, upload-time = "2025-10-14T10:23:19.234Z" },
{ url = "https://files.pythonhosted.org/packages/24/06/8a84711162ad5a5f19a88cead37cca81b4b1f294f46260ef7334ae4f24d3/pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a", size = 2316840, upload-time = "2025-10-14T10:23:21.738Z" },
{ url = "https://files.pythonhosted.org/packages/aa/8b/b7bb512a4682a2f7fbfae152a755d37351743900226d29bd953aaf870eaa/pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d", size = 2149135, upload-time = "2025-10-14T10:23:24.379Z" },
{ url = "https://files.pythonhosted.org/packages/7e/7d/138e902ed6399b866f7cfe4435d22445e16fff888a1c00560d9dc79a780f/pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5", size = 2104721, upload-time = "2025-10-14T10:23:26.906Z" },
{ url = "https://files.pythonhosted.org/packages/47/13/0525623cf94627f7b53b4c2034c81edc8491cbfc7c28d5447fa318791479/pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2", size = 1931608, upload-time = "2025-10-14T10:23:29.306Z" },
{ url = "https://files.pythonhosted.org/packages/d6/f9/744bc98137d6ef0a233f808bfc9b18cf94624bf30836a18d3b05d08bf418/pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd", size = 2132986, upload-time = "2025-10-14T10:23:32.057Z" },
{ url = "https://files.pythonhosted.org/packages/17/c8/629e88920171173f6049386cc71f893dff03209a9ef32b4d2f7e7c264bcf/pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c", size = 2187516, upload-time = "2025-10-14T10:23:34.871Z" },
{ url = "https://files.pythonhosted.org/packages/2e/0f/4f2734688d98488782218ca61bcc118329bf5de05bb7fe3adc7dd79b0b86/pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405", size = 2146146, upload-time = "2025-10-14T10:23:37.342Z" },
{ url = "https://files.pythonhosted.org/packages/ed/f2/ab385dbd94a052c62224b99cf99002eee99dbec40e10006c78575aead256/pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8", size = 2311296, upload-time = "2025-10-14T10:23:40.145Z" },
{ url = "https://files.pythonhosted.org/packages/fc/8e/e4f12afe1beeb9823bba5375f8f258df0cc61b056b0195fb1cf9f62a1a58/pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308", size = 2315386, upload-time = "2025-10-14T10:23:42.624Z" },
{ url = "https://files.pythonhosted.org/packages/48/f7/925f65d930802e3ea2eb4d5afa4cb8730c8dc0d2cb89a59dc4ed2fcb2d74/pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f", size = 2147775, upload-time = "2025-10-14T10:23:45.406Z" },
{ url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" },
{ url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" },
{ url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" },
{ url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" },
{ url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" },
{ url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" },
{ url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" },
{ url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" },
{ url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" },
{ url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" },
{ url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" },
{ url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" },
{ url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" },
{ url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" },
{ url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" },
{ url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" },
{ url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" },
{ url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" },
{ url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" },
{ url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" },
{ url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" },
{ url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" },
{ url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" },
{ url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" },
{ url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" },
{ url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" },
{ url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" },
{ url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" },
{ url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" },
{ url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" },
{ url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" },
{ url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" },
{ url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" },
{ url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" },
{ url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" },
{ url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" },
{ url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" },
{ url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" },
{ url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" },
{ url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" },
{ url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" },
{ url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" },
{ url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" },
{ url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" },
{ url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" },
{ url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" },
{ url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" },
{ url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" },
{ url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" },
{ url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" },
{ url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" },
{ url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" },
{ url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" },
{ url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" },
{ url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" },
{ url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" },
{ url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" },
{ url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
{ url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" },
{ url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" },
{ url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" },
{ url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" },
{ url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" },
{ url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" },
{ url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" },
{ url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" },
{ url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" },
{ url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" },
{ url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" },
{ url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" },
{ url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" },
{ url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" },
{ url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" },
{ url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" },
{ url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" },
{ url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" },
]
[[package]]
@@ -1487,124 +1327,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
]
[[package]]
name = "xxhash"
version = "3.6.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/02/84/30869e01909fb37a6cc7e18688ee8bf1e42d57e7e0777636bd47524c43c7/xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6", size = 85160, upload-time = "2025-10-02T14:37:08.097Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/34/ee/f9f1d656ad168681bb0f6b092372c1e533c4416b8069b1896a175c46e484/xxhash-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87ff03d7e35c61435976554477a7f4cd1704c3596a89a8300d5ce7fc83874a71", size = 32845, upload-time = "2025-10-02T14:33:51.573Z" },
{ url = "https://files.pythonhosted.org/packages/a3/b1/93508d9460b292c74a09b83d16750c52a0ead89c51eea9951cb97a60d959/xxhash-3.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f572dfd3d0e2eb1a57511831cf6341242f5a9f8298a45862d085f5b93394a27d", size = 30807, upload-time = "2025-10-02T14:33:52.964Z" },
{ url = "https://files.pythonhosted.org/packages/07/55/28c93a3662f2d200c70704efe74aab9640e824f8ce330d8d3943bf7c9b3c/xxhash-3.6.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:89952ea539566b9fed2bbd94e589672794b4286f342254fad28b149f9615fef8", size = 193786, upload-time = "2025-10-02T14:33:54.272Z" },
{ url = "https://files.pythonhosted.org/packages/c1/96/fec0be9bb4b8f5d9c57d76380a366f31a1781fb802f76fc7cda6c84893c7/xxhash-3.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e6f2ffb07a50b52465a1032c3cf1f4a5683f944acaca8a134a2f23674c2058", size = 212830, upload-time = "2025-10-02T14:33:55.706Z" },
{ url = "https://files.pythonhosted.org/packages/c4/a0/c706845ba77b9611f81fd2e93fad9859346b026e8445e76f8c6fd057cc6d/xxhash-3.6.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5b848ad6c16d308c3ac7ad4ba6bede80ed5df2ba8ed382f8932df63158dd4b2", size = 211606, upload-time = "2025-10-02T14:33:57.133Z" },
{ url = "https://files.pythonhosted.org/packages/67/1e/164126a2999e5045f04a69257eea946c0dc3e86541b400d4385d646b53d7/xxhash-3.6.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a034590a727b44dd8ac5914236a7b8504144447a9682586c3327e935f33ec8cc", size = 444872, upload-time = "2025-10-02T14:33:58.446Z" },
{ url = "https://files.pythonhosted.org/packages/2d/4b/55ab404c56cd70a2cf5ecfe484838865d0fea5627365c6c8ca156bd09c8f/xxhash-3.6.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a8f1972e75ebdd161d7896743122834fe87378160c20e97f8b09166213bf8cc", size = 193217, upload-time = "2025-10-02T14:33:59.724Z" },
{ url = "https://files.pythonhosted.org/packages/45/e6/52abf06bac316db33aa269091ae7311bd53cfc6f4b120ae77bac1b348091/xxhash-3.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ee34327b187f002a596d7b167ebc59a1b729e963ce645964bbc050d2f1b73d07", size = 210139, upload-time = "2025-10-02T14:34:02.041Z" },
{ url = "https://files.pythonhosted.org/packages/34/37/db94d490b8691236d356bc249c08819cbcef9273a1a30acf1254ff9ce157/xxhash-3.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:339f518c3c7a850dd033ab416ea25a692759dc7478a71131fe8869010d2b75e4", size = 197669, upload-time = "2025-10-02T14:34:03.664Z" },
{ url = "https://files.pythonhosted.org/packages/b7/36/c4f219ef4a17a4f7a64ed3569bc2b5a9c8311abdb22249ac96093625b1a4/xxhash-3.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bf48889c9630542d4709192578aebbd836177c9f7a4a2778a7d6340107c65f06", size = 210018, upload-time = "2025-10-02T14:34:05.325Z" },
{ url = "https://files.pythonhosted.org/packages/fd/06/bfac889a374fc2fc439a69223d1750eed2e18a7db8514737ab630534fa08/xxhash-3.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5576b002a56207f640636056b4160a378fe36a58db73ae5c27a7ec8db35f71d4", size = 413058, upload-time = "2025-10-02T14:34:06.925Z" },
{ url = "https://files.pythonhosted.org/packages/c9/d1/555d8447e0dd32ad0930a249a522bb2e289f0d08b6b16204cfa42c1f5a0c/xxhash-3.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af1f3278bd02814d6dedc5dec397993b549d6f16c19379721e5a1d31e132c49b", size = 190628, upload-time = "2025-10-02T14:34:08.669Z" },
{ url = "https://files.pythonhosted.org/packages/d1/15/8751330b5186cedc4ed4b597989882ea05e0408b53fa47bcb46a6125bfc6/xxhash-3.6.0-cp310-cp310-win32.whl", hash = "sha256:aed058764db109dc9052720da65fafe84873b05eb8b07e5e653597951af57c3b", size = 30577, upload-time = "2025-10-02T14:34:10.234Z" },
{ url = "https://files.pythonhosted.org/packages/bb/cc/53f87e8b5871a6eb2ff7e89c48c66093bda2be52315a8161ddc54ea550c4/xxhash-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:e82da5670f2d0d98950317f82a0e4a0197150ff19a6df2ba40399c2a3b9ae5fb", size = 31487, upload-time = "2025-10-02T14:34:11.618Z" },
{ url = "https://files.pythonhosted.org/packages/9f/00/60f9ea3bb697667a14314d7269956f58bf56bb73864f8f8d52a3c2535e9a/xxhash-3.6.0-cp310-cp310-win_arm64.whl", hash = "sha256:4a082ffff8c6ac07707fb6b671caf7c6e020c75226c561830b73d862060f281d", size = 27863, upload-time = "2025-10-02T14:34:12.619Z" },
{ url = "https://files.pythonhosted.org/packages/17/d4/cc2f0400e9154df4b9964249da78ebd72f318e35ccc425e9f403c392f22a/xxhash-3.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b47bbd8cf2d72797f3c2772eaaac0ded3d3af26481a26d7d7d41dc2d3c46b04a", size = 32844, upload-time = "2025-10-02T14:34:14.037Z" },
{ url = "https://files.pythonhosted.org/packages/5e/ec/1cc11cd13e26ea8bc3cb4af4eaadd8d46d5014aebb67be3f71fb0b68802a/xxhash-3.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2b6821e94346f96db75abaa6e255706fb06ebd530899ed76d32cd99f20dc52fa", size = 30809, upload-time = "2025-10-02T14:34:15.484Z" },
{ url = "https://files.pythonhosted.org/packages/04/5f/19fe357ea348d98ca22f456f75a30ac0916b51c753e1f8b2e0e6fb884cce/xxhash-3.6.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d0a9751f71a1a65ce3584e9cae4467651c7e70c9d31017fa57574583a4540248", size = 194665, upload-time = "2025-10-02T14:34:16.541Z" },
{ url = "https://files.pythonhosted.org/packages/90/3b/d1f1a8f5442a5fd8beedae110c5af7604dc37349a8e16519c13c19a9a2de/xxhash-3.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b29ee68625ab37b04c0b40c3fafdf24d2f75ccd778333cfb698f65f6c463f62", size = 213550, upload-time = "2025-10-02T14:34:17.878Z" },
{ url = "https://files.pythonhosted.org/packages/c4/ef/3a9b05eb527457d5db13a135a2ae1a26c80fecd624d20f3e8dcc4cb170f3/xxhash-3.6.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6812c25fe0d6c36a46ccb002f40f27ac903bf18af9f6dd8f9669cb4d176ab18f", size = 212384, upload-time = "2025-10-02T14:34:19.182Z" },
{ url = "https://files.pythonhosted.org/packages/0f/18/ccc194ee698c6c623acbf0f8c2969811a8a4b6185af5e824cd27b9e4fd3e/xxhash-3.6.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ccbff013972390b51a18ef1255ef5ac125c92dc9143b2d1909f59abc765540e", size = 445749, upload-time = "2025-10-02T14:34:20.659Z" },
{ url = "https://files.pythonhosted.org/packages/a5/86/cf2c0321dc3940a7aa73076f4fd677a0fb3e405cb297ead7d864fd90847e/xxhash-3.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:297b7fbf86c82c550e12e8fb71968b3f033d27b874276ba3624ea868c11165a8", size = 193880, upload-time = "2025-10-02T14:34:22.431Z" },
{ url = "https://files.pythonhosted.org/packages/82/fb/96213c8560e6f948a1ecc9a7613f8032b19ee45f747f4fca4eb31bb6d6ed/xxhash-3.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dea26ae1eb293db089798d3973a5fc928a18fdd97cc8801226fae705b02b14b0", size = 210912, upload-time = "2025-10-02T14:34:23.937Z" },
{ url = "https://files.pythonhosted.org/packages/40/aa/4395e669b0606a096d6788f40dbdf2b819d6773aa290c19e6e83cbfc312f/xxhash-3.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7a0b169aafb98f4284f73635a8e93f0735f9cbde17bd5ec332480484241aaa77", size = 198654, upload-time = "2025-10-02T14:34:25.644Z" },
{ url = "https://files.pythonhosted.org/packages/67/74/b044fcd6b3d89e9b1b665924d85d3f400636c23590226feb1eb09e1176ce/xxhash-3.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:08d45aef063a4531b785cd72de4887766d01dc8f362a515693df349fdb825e0c", size = 210867, upload-time = "2025-10-02T14:34:27.203Z" },
{ url = "https://files.pythonhosted.org/packages/bc/fd/3ce73bf753b08cb19daee1eb14aa0d7fe331f8da9c02dd95316ddfe5275e/xxhash-3.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929142361a48ee07f09121fe9e96a84950e8d4df3bb298ca5d88061969f34d7b", size = 414012, upload-time = "2025-10-02T14:34:28.409Z" },
{ url = "https://files.pythonhosted.org/packages/ba/b3/5a4241309217c5c876f156b10778f3ab3af7ba7e3259e6d5f5c7d0129eb2/xxhash-3.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:51312c768403d8540487dbbfb557454cfc55589bbde6424456951f7fcd4facb3", size = 191409, upload-time = "2025-10-02T14:34:29.696Z" },
{ url = "https://files.pythonhosted.org/packages/c0/01/99bfbc15fb9abb9a72b088c1d95219fc4782b7d01fc835bd5744d66dd0b8/xxhash-3.6.0-cp311-cp311-win32.whl", hash = "sha256:d1927a69feddc24c987b337ce81ac15c4720955b667fe9b588e02254b80446fd", size = 30574, upload-time = "2025-10-02T14:34:31.028Z" },
{ url = "https://files.pythonhosted.org/packages/65/79/9d24d7f53819fe301b231044ea362ce64e86c74f6e8c8e51320de248b3e5/xxhash-3.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:26734cdc2d4ffe449b41d186bbeac416f704a482ed835d375a5c0cb02bc63fef", size = 31481, upload-time = "2025-10-02T14:34:32.062Z" },
{ url = "https://files.pythonhosted.org/packages/30/4e/15cd0e3e8772071344eab2961ce83f6e485111fed8beb491a3f1ce100270/xxhash-3.6.0-cp311-cp311-win_arm64.whl", hash = "sha256:d72f67ef8bf36e05f5b6c65e8524f265bd61071471cd4cf1d36743ebeeeb06b7", size = 27861, upload-time = "2025-10-02T14:34:33.555Z" },
{ url = "https://files.pythonhosted.org/packages/9a/07/d9412f3d7d462347e4511181dea65e47e0d0e16e26fbee2ea86a2aefb657/xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c", size = 32744, upload-time = "2025-10-02T14:34:34.622Z" },
{ url = "https://files.pythonhosted.org/packages/79/35/0429ee11d035fc33abe32dca1b2b69e8c18d236547b9a9b72c1929189b9a/xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204", size = 30816, upload-time = "2025-10-02T14:34:36.043Z" },
{ url = "https://files.pythonhosted.org/packages/b7/f2/57eb99aa0f7d98624c0932c5b9a170e1806406cdbcdb510546634a1359e0/xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490", size = 194035, upload-time = "2025-10-02T14:34:37.354Z" },
{ url = "https://files.pythonhosted.org/packages/4c/ed/6224ba353690d73af7a3f1c7cdb1fc1b002e38f783cb991ae338e1eb3d79/xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2", size = 212914, upload-time = "2025-10-02T14:34:38.6Z" },
{ url = "https://files.pythonhosted.org/packages/38/86/fb6b6130d8dd6b8942cc17ab4d90e223653a89aa32ad2776f8af7064ed13/xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa", size = 212163, upload-time = "2025-10-02T14:34:39.872Z" },
{ url = "https://files.pythonhosted.org/packages/ee/dc/e84875682b0593e884ad73b2d40767b5790d417bde603cceb6878901d647/xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0", size = 445411, upload-time = "2025-10-02T14:34:41.569Z" },
{ url = "https://files.pythonhosted.org/packages/11/4f/426f91b96701ec2f37bb2b8cec664eff4f658a11f3fa9d94f0a887ea6d2b/xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2", size = 193883, upload-time = "2025-10-02T14:34:43.249Z" },
{ url = "https://files.pythonhosted.org/packages/53/5a/ddbb83eee8e28b778eacfc5a85c969673e4023cdeedcfcef61f36731610b/xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9", size = 210392, upload-time = "2025-10-02T14:34:45.042Z" },
{ url = "https://files.pythonhosted.org/packages/1e/c2/ff69efd07c8c074ccdf0a4f36fcdd3d27363665bcdf4ba399abebe643465/xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e", size = 197898, upload-time = "2025-10-02T14:34:46.302Z" },
{ url = "https://files.pythonhosted.org/packages/58/ca/faa05ac19b3b622c7c9317ac3e23954187516298a091eb02c976d0d3dd45/xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374", size = 210655, upload-time = "2025-10-02T14:34:47.571Z" },
{ url = "https://files.pythonhosted.org/packages/d4/7a/06aa7482345480cc0cb597f5c875b11a82c3953f534394f620b0be2f700c/xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d", size = 414001, upload-time = "2025-10-02T14:34:49.273Z" },
{ url = "https://files.pythonhosted.org/packages/23/07/63ffb386cd47029aa2916b3d2f454e6cc5b9f5c5ada3790377d5430084e7/xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae", size = 191431, upload-time = "2025-10-02T14:34:50.798Z" },
{ url = "https://files.pythonhosted.org/packages/0f/93/14fde614cadb4ddf5e7cebf8918b7e8fac5ae7861c1875964f17e678205c/xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb", size = 30617, upload-time = "2025-10-02T14:34:51.954Z" },
{ url = "https://files.pythonhosted.org/packages/13/5d/0d125536cbe7565a83d06e43783389ecae0c0f2ed037b48ede185de477c0/xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c", size = 31534, upload-time = "2025-10-02T14:34:53.276Z" },
{ url = "https://files.pythonhosted.org/packages/54/85/6ec269b0952ec7e36ba019125982cf11d91256a778c7c3f98a4c5043d283/xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829", size = 27876, upload-time = "2025-10-02T14:34:54.371Z" },
{ url = "https://files.pythonhosted.org/packages/33/76/35d05267ac82f53ae9b0e554da7c5e281ee61f3cad44c743f0fcd354f211/xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec", size = 32738, upload-time = "2025-10-02T14:34:55.839Z" },
{ url = "https://files.pythonhosted.org/packages/31/a8/3fbce1cd96534a95e35d5120637bf29b0d7f5d8fa2f6374e31b4156dd419/xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1", size = 30821, upload-time = "2025-10-02T14:34:57.219Z" },
{ url = "https://files.pythonhosted.org/packages/0c/ea/d387530ca7ecfa183cb358027f1833297c6ac6098223fd14f9782cd0015c/xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6", size = 194127, upload-time = "2025-10-02T14:34:59.21Z" },
{ url = "https://files.pythonhosted.org/packages/ba/0c/71435dcb99874b09a43b8d7c54071e600a7481e42b3e3ce1eb5226a5711a/xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263", size = 212975, upload-time = "2025-10-02T14:35:00.816Z" },
{ url = "https://files.pythonhosted.org/packages/84/7a/c2b3d071e4bb4a90b7057228a99b10d51744878f4a8a6dd643c8bd897620/xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546", size = 212241, upload-time = "2025-10-02T14:35:02.207Z" },
{ url = "https://files.pythonhosted.org/packages/81/5f/640b6eac0128e215f177df99eadcd0f1b7c42c274ab6a394a05059694c5a/xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89", size = 445471, upload-time = "2025-10-02T14:35:03.61Z" },
{ url = "https://files.pythonhosted.org/packages/5e/1e/3c3d3ef071b051cc3abbe3721ffb8365033a172613c04af2da89d5548a87/xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d", size = 193936, upload-time = "2025-10-02T14:35:05.013Z" },
{ url = "https://files.pythonhosted.org/packages/2c/bd/4a5f68381939219abfe1c22a9e3a5854a4f6f6f3c4983a87d255f21f2e5d/xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7", size = 210440, upload-time = "2025-10-02T14:35:06.239Z" },
{ url = "https://files.pythonhosted.org/packages/eb/37/b80fe3d5cfb9faff01a02121a0f4d565eb7237e9e5fc66e73017e74dcd36/xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db", size = 197990, upload-time = "2025-10-02T14:35:07.735Z" },
{ url = "https://files.pythonhosted.org/packages/d7/fd/2c0a00c97b9e18f72e1f240ad4e8f8a90fd9d408289ba9c7c495ed7dc05c/xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42", size = 210689, upload-time = "2025-10-02T14:35:09.438Z" },
{ url = "https://files.pythonhosted.org/packages/93/86/5dd8076a926b9a95db3206aba20d89a7fc14dd5aac16e5c4de4b56033140/xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11", size = 414068, upload-time = "2025-10-02T14:35:11.162Z" },
{ url = "https://files.pythonhosted.org/packages/af/3c/0bb129170ee8f3650f08e993baee550a09593462a5cddd8e44d0011102b1/xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd", size = 191495, upload-time = "2025-10-02T14:35:12.971Z" },
{ url = "https://files.pythonhosted.org/packages/e9/3a/6797e0114c21d1725e2577508e24006fd7ff1d8c0c502d3b52e45c1771d8/xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799", size = 30620, upload-time = "2025-10-02T14:35:14.129Z" },
{ url = "https://files.pythonhosted.org/packages/86/15/9bc32671e9a38b413a76d24722a2bf8784a132c043063a8f5152d390b0f9/xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392", size = 31542, upload-time = "2025-10-02T14:35:15.21Z" },
{ url = "https://files.pythonhosted.org/packages/39/c5/cc01e4f6188656e56112d6a8e0dfe298a16934b8c47a247236549a3f7695/xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6", size = 27880, upload-time = "2025-10-02T14:35:16.315Z" },
{ url = "https://files.pythonhosted.org/packages/f3/30/25e5321c8732759e930c555176d37e24ab84365482d257c3b16362235212/xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702", size = 32956, upload-time = "2025-10-02T14:35:17.413Z" },
{ url = "https://files.pythonhosted.org/packages/9f/3c/0573299560d7d9f8ab1838f1efc021a280b5ae5ae2e849034ef3dee18810/xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db", size = 31072, upload-time = "2025-10-02T14:35:18.844Z" },
{ url = "https://files.pythonhosted.org/packages/7a/1c/52d83a06e417cd9d4137722693424885cc9878249beb3a7c829e74bf7ce9/xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54", size = 196409, upload-time = "2025-10-02T14:35:20.31Z" },
{ url = "https://files.pythonhosted.org/packages/e3/8e/c6d158d12a79bbd0b878f8355432075fc82759e356ab5a111463422a239b/xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f", size = 215736, upload-time = "2025-10-02T14:35:21.616Z" },
{ url = "https://files.pythonhosted.org/packages/bc/68/c4c80614716345d55071a396cf03d06e34b5f4917a467faf43083c995155/xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5", size = 214833, upload-time = "2025-10-02T14:35:23.32Z" },
{ url = "https://files.pythonhosted.org/packages/7e/e9/ae27c8ffec8b953efa84c7c4a6c6802c263d587b9fc0d6e7cea64e08c3af/xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1", size = 448348, upload-time = "2025-10-02T14:35:25.111Z" },
{ url = "https://files.pythonhosted.org/packages/d7/6b/33e21afb1b5b3f46b74b6bd1913639066af218d704cc0941404ca717fc57/xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee", size = 196070, upload-time = "2025-10-02T14:35:26.586Z" },
{ url = "https://files.pythonhosted.org/packages/96/b6/fcabd337bc5fa624e7203aa0fa7d0c49eed22f72e93229431752bddc83d9/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd", size = 212907, upload-time = "2025-10-02T14:35:28.087Z" },
{ url = "https://files.pythonhosted.org/packages/4b/d3/9ee6160e644d660fcf176c5825e61411c7f62648728f69c79ba237250143/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729", size = 200839, upload-time = "2025-10-02T14:35:29.857Z" },
{ url = "https://files.pythonhosted.org/packages/0d/98/e8de5baa5109394baf5118f5e72ab21a86387c4f89b0e77ef3e2f6b0327b/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292", size = 213304, upload-time = "2025-10-02T14:35:31.222Z" },
{ url = "https://files.pythonhosted.org/packages/7b/1d/71056535dec5c3177eeb53e38e3d367dd1d16e024e63b1cee208d572a033/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf", size = 416930, upload-time = "2025-10-02T14:35:32.517Z" },
{ url = "https://files.pythonhosted.org/packages/dc/6c/5cbde9de2cd967c322e651c65c543700b19e7ae3e0aae8ece3469bf9683d/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033", size = 193787, upload-time = "2025-10-02T14:35:33.827Z" },
{ url = "https://files.pythonhosted.org/packages/19/fa/0172e350361d61febcea941b0cc541d6e6c8d65d153e85f850a7b256ff8a/xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec", size = 30916, upload-time = "2025-10-02T14:35:35.107Z" },
{ url = "https://files.pythonhosted.org/packages/ad/e6/e8cf858a2b19d6d45820f072eff1bea413910592ff17157cabc5f1227a16/xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8", size = 31799, upload-time = "2025-10-02T14:35:36.165Z" },
{ url = "https://files.pythonhosted.org/packages/56/15/064b197e855bfb7b343210e82490ae672f8bc7cdf3ddb02e92f64304ee8a/xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746", size = 28044, upload-time = "2025-10-02T14:35:37.195Z" },
{ url = "https://files.pythonhosted.org/packages/7e/5e/0138bc4484ea9b897864d59fce9be9086030825bc778b76cb5a33a906d37/xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e", size = 32754, upload-time = "2025-10-02T14:35:38.245Z" },
{ url = "https://files.pythonhosted.org/packages/18/d7/5dac2eb2ec75fd771957a13e5dda560efb2176d5203f39502a5fc571f899/xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405", size = 30846, upload-time = "2025-10-02T14:35:39.6Z" },
{ url = "https://files.pythonhosted.org/packages/fe/71/8bc5be2bb00deb5682e92e8da955ebe5fa982da13a69da5a40a4c8db12fb/xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3", size = 194343, upload-time = "2025-10-02T14:35:40.69Z" },
{ url = "https://files.pythonhosted.org/packages/e7/3b/52badfb2aecec2c377ddf1ae75f55db3ba2d321c5e164f14461c90837ef3/xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6", size = 213074, upload-time = "2025-10-02T14:35:42.29Z" },
{ url = "https://files.pythonhosted.org/packages/a2/2b/ae46b4e9b92e537fa30d03dbc19cdae57ed407e9c26d163895e968e3de85/xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063", size = 212388, upload-time = "2025-10-02T14:35:43.929Z" },
{ url = "https://files.pythonhosted.org/packages/f5/80/49f88d3afc724b4ac7fbd664c8452d6db51b49915be48c6982659e0e7942/xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7", size = 445614, upload-time = "2025-10-02T14:35:45.216Z" },
{ url = "https://files.pythonhosted.org/packages/ed/ba/603ce3961e339413543d8cd44f21f2c80e2a7c5cfe692a7b1f2cccf58f3c/xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b", size = 194024, upload-time = "2025-10-02T14:35:46.959Z" },
{ url = "https://files.pythonhosted.org/packages/78/d1/8e225ff7113bf81545cfdcd79eef124a7b7064a0bba53605ff39590b95c2/xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd", size = 210541, upload-time = "2025-10-02T14:35:48.301Z" },
{ url = "https://files.pythonhosted.org/packages/6f/58/0f89d149f0bad89def1a8dd38feb50ccdeb643d9797ec84707091d4cb494/xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0", size = 198305, upload-time = "2025-10-02T14:35:49.584Z" },
{ url = "https://files.pythonhosted.org/packages/11/38/5eab81580703c4df93feb5f32ff8fa7fe1e2c51c1f183ee4e48d4bb9d3d7/xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152", size = 210848, upload-time = "2025-10-02T14:35:50.877Z" },
{ url = "https://files.pythonhosted.org/packages/5e/6b/953dc4b05c3ce678abca756416e4c130d2382f877a9c30a20d08ee6a77c0/xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11", size = 414142, upload-time = "2025-10-02T14:35:52.15Z" },
{ url = "https://files.pythonhosted.org/packages/08/a9/238ec0d4e81a10eb5026d4a6972677cbc898ba6c8b9dbaec12ae001b1b35/xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5", size = 191547, upload-time = "2025-10-02T14:35:53.547Z" },
{ url = "https://files.pythonhosted.org/packages/f1/ee/3cf8589e06c2164ac77c3bf0aa127012801128f1feebf2a079272da5737c/xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f", size = 31214, upload-time = "2025-10-02T14:35:54.746Z" },
{ url = "https://files.pythonhosted.org/packages/02/5d/a19552fbc6ad4cb54ff953c3908bbc095f4a921bc569433d791f755186f1/xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad", size = 32290, upload-time = "2025-10-02T14:35:55.791Z" },
{ url = "https://files.pythonhosted.org/packages/b1/11/dafa0643bc30442c887b55baf8e73353a344ee89c1901b5a5c54a6c17d39/xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679", size = 28795, upload-time = "2025-10-02T14:35:57.162Z" },
{ url = "https://files.pythonhosted.org/packages/2c/db/0e99732ed7f64182aef4a6fb145e1a295558deec2a746265dcdec12d191e/xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4", size = 32955, upload-time = "2025-10-02T14:35:58.267Z" },
{ url = "https://files.pythonhosted.org/packages/55/f4/2a7c3c68e564a099becfa44bb3d398810cc0ff6749b0d3cb8ccb93f23c14/xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67", size = 31072, upload-time = "2025-10-02T14:35:59.382Z" },
{ url = "https://files.pythonhosted.org/packages/c6/d9/72a29cddc7250e8a5819dad5d466facb5dc4c802ce120645630149127e73/xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad", size = 196579, upload-time = "2025-10-02T14:36:00.838Z" },
{ url = "https://files.pythonhosted.org/packages/63/93/b21590e1e381040e2ca305a884d89e1c345b347404f7780f07f2cdd47ef4/xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b", size = 215854, upload-time = "2025-10-02T14:36:02.207Z" },
{ url = "https://files.pythonhosted.org/packages/ce/b8/edab8a7d4fa14e924b29be877d54155dcbd8b80be85ea00d2be3413a9ed4/xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b", size = 214965, upload-time = "2025-10-02T14:36:03.507Z" },
{ url = "https://files.pythonhosted.org/packages/27/67/dfa980ac7f0d509d54ea0d5a486d2bb4b80c3f1bb22b66e6a05d3efaf6c0/xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca", size = 448484, upload-time = "2025-10-02T14:36:04.828Z" },
{ url = "https://files.pythonhosted.org/packages/8c/63/8ffc2cc97e811c0ca5d00ab36604b3ea6f4254f20b7bc658ca825ce6c954/xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a", size = 196162, upload-time = "2025-10-02T14:36:06.182Z" },
{ url = "https://files.pythonhosted.org/packages/4b/77/07f0e7a3edd11a6097e990f6e5b815b6592459cb16dae990d967693e6ea9/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99", size = 213007, upload-time = "2025-10-02T14:36:07.733Z" },
{ url = "https://files.pythonhosted.org/packages/ae/d8/bc5fa0d152837117eb0bef6f83f956c509332ce133c91c63ce07ee7c4873/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3", size = 200956, upload-time = "2025-10-02T14:36:09.106Z" },
{ url = "https://files.pythonhosted.org/packages/26/a5/d749334130de9411783873e9b98ecc46688dad5db64ca6e04b02acc8b473/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6", size = 213401, upload-time = "2025-10-02T14:36:10.585Z" },
{ url = "https://files.pythonhosted.org/packages/89/72/abed959c956a4bfc72b58c0384bb7940663c678127538634d896b1195c10/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93", size = 417083, upload-time = "2025-10-02T14:36:12.276Z" },
{ url = "https://files.pythonhosted.org/packages/0c/b3/62fd2b586283b7d7d665fb98e266decadf31f058f1cf6c478741f68af0cb/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518", size = 193913, upload-time = "2025-10-02T14:36:14.025Z" },
{ url = "https://files.pythonhosted.org/packages/9a/9a/c19c42c5b3f5a4aad748a6d5b4f23df3bed7ee5445accc65a0fb3ff03953/xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119", size = 31586, upload-time = "2025-10-02T14:36:15.603Z" },
{ url = "https://files.pythonhosted.org/packages/03/d6/4cc450345be9924fd5dc8c590ceda1db5b43a0a889587b0ae81a95511360/xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f", size = 32526, upload-time = "2025-10-02T14:36:16.708Z" },
{ url = "https://files.pythonhosted.org/packages/0f/c9/7243eb3f9eaabd1a88a5a5acadf06df2d83b100c62684b7425c6a11bcaa8/xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95", size = 28898, upload-time = "2025-10-02T14:36:17.843Z" },
{ url = "https://files.pythonhosted.org/packages/93/1e/8aec23647a34a249f62e2398c42955acd9b4c6ed5cf08cbea94dc46f78d2/xxhash-3.6.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f7b7e2ec26c1666ad5fc9dbfa426a6a3367ceaf79db5dd76264659d509d73b0", size = 30662, upload-time = "2025-10-02T14:37:01.743Z" },
{ url = "https://files.pythonhosted.org/packages/b8/0b/b14510b38ba91caf43006209db846a696ceea6a847a0c9ba0a5b1adc53d6/xxhash-3.6.0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5dc1e14d14fa0f5789ec29a7062004b5933964bb9b02aae6622b8f530dc40296", size = 41056, upload-time = "2025-10-02T14:37:02.879Z" },
{ url = "https://files.pythonhosted.org/packages/50/55/15a7b8a56590e66ccd374bbfa3f9ffc45b810886c8c3b614e3f90bd2367c/xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:881b47fc47e051b37d94d13e7455131054b56749b91b508b0907eb07900d1c13", size = 36251, upload-time = "2025-10-02T14:37:04.44Z" },
{ url = "https://files.pythonhosted.org/packages/62/b2/5ac99a041a29e58e95f907876b04f7067a0242cb85b5f39e726153981503/xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6dc31591899f5e5666f04cc2e529e69b4072827085c1ef15294d91a004bc1bd", size = 32481, upload-time = "2025-10-02T14:37:05.869Z" },
{ url = "https://files.pythonhosted.org/packages/7b/d9/8d95e906764a386a3d3b596f3c68bb63687dfca806373509f51ce8eea81f/xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d", size = 31565, upload-time = "2025-10-02T14:37:06.966Z" },
]
[[package]]
name = "zstandard"
version = "0.25.0"

View File

@@ -1,14 +1,7 @@
# 🦜🍎️ LangChain Core
[![PyPI - Version](https://img.shields.io/pypi/v/langchain-core?label=%20)](https://pypi.org/project/langchain-core/#history)
[![PyPI - License](https://img.shields.io/pypi/l/langchain-core)](https://opensource.org/licenses/MIT)
[![PyPI - License](https://img.shields.io/pypi/l/langchain-core?style=flat-square)](https://opensource.org/licenses/MIT)
[![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain-core)](https://pypistats.org/packages/langchain-core)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
## Quick Install
@@ -16,14 +9,16 @@ To help you ship LangChain apps to production faster, check out [LangSmith](http
pip install langchain-core
```
## 🤔 What is this?
## What is it?
LangChain Core contains the base abstractions that power the LangChain ecosystem.
LangChain Core contains the base abstractions that power the the LangChain ecosystem.
These abstractions are designed to be as modular and simple as possible.
The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
For full documentation see the [API reference](https://reference.langchain.com/python/).
## ⛰️ Why build on top of LangChain Core?
The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
@@ -32,16 +27,12 @@ The LangChain ecosystem is built on top of `langchain-core`. Some of the benefit
- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
- **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
## 📖 Documentation
For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_core/).
## 📕 Releases & Versioning
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning Policy](https://docs.langchain.com/oss/python/versioning).
## 💁 Contributing
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing).

View File

@@ -6,6 +6,7 @@ This module is only relevant for LangChain developers, not for users.
This module and its submodules are for internal use only. Do not use them in your
own code. We may change the API at any time with no warning.
"""
from typing import TYPE_CHECKING

View File

@@ -51,26 +51,29 @@ def beta(
own (annotation-emitting) `C.__init__`).
Args:
message:
message : str, optional
Override the default beta message. The %(since)s,
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
and %(removal)s format specifiers will be replaced by the
values of the respective arguments passed to this function.
name:
name : str, optional
The name of the beta object.
obj_type:
obj_type : str, optional
The object type being beta.
addendum:
addendum : str, optional
Additional text appended directly to the final message.
Returns:
A decorator which can be used to mark functions or classes as beta.
```python
@beta
def the_function_to_annotate():
pass
```
Examples:
.. code-block:: python
@beta
def the_function_to_annotate():
pass
"""
def beta(

View File

@@ -97,44 +97,47 @@ def deprecated(
property, and 'function' otherwise.
Args:
since:
since : str
The release at which this API became deprecated.
message:
message : str, optional
Override the default deprecation message. The %(since)s,
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
and %(removal)s format specifiers will be replaced by the
values of the respective arguments passed to this function.
name:
name : str, optional
The name of the deprecated object.
alternative:
alternative : str, optional
An alternative API that the user may use in place of the
deprecated API. The deprecation warning will tell the user
about this alternative if provided.
alternative_import:
alternative_import: str, optional
An alternative import that the user may use instead.
pending:
pending : bool, optional
If `True`, uses a `PendingDeprecationWarning` instead of a
DeprecationWarning. Cannot be used together with removal.
obj_type:
obj_type : str, optional
The object type being deprecated.
addendum:
addendum : str, optional
Additional text appended directly to the final message.
removal:
removal : str, optional
The expected removal version. With the default (an empty
string), a removal version is automatically computed from
since. Set to other Falsy values to not schedule a removal
date. Cannot be used together with pending.
package:
package: str, optional
The package of the deprecated object.
Returns:
A decorator to mark a function or class as deprecated.
```python
@deprecated("1.4.0")
def the_function_to_deprecate():
pass
```
Examples:
.. code-block:: python
@deprecated("1.4.0")
def the_function_to_deprecate():
pass
"""
_validate_deprecation_params(
removal, alternative, alternative_import, pending=pending
@@ -547,10 +550,12 @@ def rename_parameter(
A decorator indicating that a parameter was renamed.
Example:
```python
@_api.rename_parameter("3.1", "bad_name", "good_name")
def func(good_name): ...
```
.. code-block:: python
@_api.rename_parameter("3.1", "bad_name", "good_name")
def func(good_name): ...
"""
def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]:

View File

@@ -1,24 +1,25 @@
"""Schema definitions for representing agent actions, observations, and return values.
!!! warning
The schema definitions are provided for backwards compatibility.
**ATTENTION** The schema definitions are provided for backwards compatibility.
!!! warning
New agents should be built using the
[`langchain` library](https://pypi.org/project/langchain/), which provides a
[langgraph library](https://github.com/langchain-ai/langgraph), which provides a
simpler and more flexible way to define agents.
See docs on [building agents](https://docs.langchain.com/oss/python/langchain/agents).
Please see the
[migration guide](https://python.langchain.com/docs/how_to/migrate_agent/) for
information on how to migrate existing agents to modern langgraph agents.
Agents use language models to choose a sequence of actions to take.
A basic agent works in the following manner:
1. Given a prompt an agent uses an LLM to request an action to take
(e.g., a tool to run).
(e.g., a tool to run).
2. The agent executes the action (e.g., runs the tool), and receives an observation.
3. The agent returns the observation to the LLM, which can then be used to generate
the next action.
the next action.
4. When the agent reaches a stopping condition, it returns a final return value.
The schemas for the agents themselves are defined in langchain.agents.agent.
@@ -52,42 +53,40 @@ class AgentAction(Serializable):
"""The input to pass in to the Tool."""
log: str
"""Additional information to log about the action.
This log can be used in a few ways. First, it can be used to audit what exactly the
LLM predicted to lead to this `(tool, tool_input)`.
Second, it can be used in future iterations to show the LLMs prior thoughts. This is
useful when `(tool, tool_input)` does not contain full information about the LLM
prediction (for example, any `thought` before the tool/tool_input).
"""
This log can be used in a few ways. First, it can be used to audit
what exactly the LLM predicted to lead to this (tool, tool_input).
Second, it can be used in future iterations to show the LLMs prior
thoughts. This is useful when (tool, tool_input) does not contain
full information about the LLM prediction (for example, any `thought`
before the tool/tool_input)."""
type: Literal["AgentAction"] = "AgentAction"
# Override init to support instantiation by position for backward compat.
def __init__(self, tool: str, tool_input: str | dict, log: str, **kwargs: Any):
"""Create an `AgentAction`.
"""Create an AgentAction.
Args:
tool: The name of the tool to execute.
tool_input: The input to pass in to the `Tool`.
tool_input: The input to pass in to the Tool.
log: Additional information to log about the action.
"""
super().__init__(tool=tool, tool_input=tool_input, log=log, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""`AgentAction` is serializable.
"""AgentAction is serializable.
Returns:
`True`
True
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
"""Get the namespace of the langchain object.
Returns:
`["langchain", "schema", "agent"]`
``["langchain", "schema", "agent"]``
"""
return ["langchain", "schema", "agent"]
@@ -100,23 +99,19 @@ class AgentAction(Serializable):
class AgentActionMessageLog(AgentAction):
"""Representation of an action to be executed by an agent.
This is similar to `AgentAction`, but includes a message log consisting of
chat messages.
This is useful when working with `ChatModels`, and is used to reconstruct
conversation history from the agent's perspective.
This is similar to AgentAction, but includes a message log consisting of
chat messages. This is useful when working with ChatModels, and is used
to reconstruct conversation history from the agent's perspective.
"""
message_log: Sequence[BaseMessage]
"""Similar to log, this can be used to pass along extra information about what exact
messages were predicted by the LLM before parsing out the `(tool, tool_input)`.
This is again useful if `(tool, tool_input)` cannot be used to fully recreate the
LLM prediction, and you need that LLM prediction (for future agent iteration).
"""Similar to log, this can be used to pass along extra
information about what exact messages were predicted by the LLM
before parsing out the (tool, tool_input). This is again useful
if (tool, tool_input) cannot be used to fully recreate the LLM
prediction, and you need that LLM prediction (for future agent iteration).
Compared to `log`, this is useful when the underlying LLM is a
chat model (and therefore returns messages rather than a string).
"""
ChatModel (and therefore returns messages rather than a string)."""
# Ignoring type because we're overriding the type from AgentAction.
# And this is the correct thing to do in this case.
# The type literal is used for serialization purposes.
@@ -124,12 +119,12 @@ class AgentActionMessageLog(AgentAction):
class AgentStep(Serializable):
"""Result of running an `AgentAction`."""
"""Result of running an AgentAction."""
action: AgentAction
"""The `AgentAction` that was executed."""
"""The AgentAction that was executed."""
observation: Any
"""The result of the `AgentAction`."""
"""The result of the AgentAction."""
@property
def messages(self) -> Sequence[BaseMessage]:
@@ -138,22 +133,19 @@ class AgentStep(Serializable):
class AgentFinish(Serializable):
"""Final return value of an `ActionAgent`.
"""Final return value of an ActionAgent.
Agents return an `AgentFinish` when they have reached a stopping condition.
Agents return an AgentFinish when they have reached a stopping condition.
"""
return_values: dict
"""Dictionary of return values."""
log: str
"""Additional information to log about the return value.
This is used to pass along the full LLM prediction, not just the parsed out
return value.
For example, if the full LLM prediction was `Final Answer: 2` you may want to just
return `2` as a return value, but pass along the full string as a `log` (for
debugging or observability purposes).
return value. For example, if the full LLM prediction was
`Final Answer: 2` you may want to just return `2` as a return value, but pass
along the full string as a `log` (for debugging or observability purposes).
"""
type: Literal["AgentFinish"] = "AgentFinish"
@@ -163,15 +155,15 @@ class AgentFinish(Serializable):
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
"""Return True as this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
"""Get the namespace of the langchain object.
Returns:
`["langchain", "schema", "agent"]`
``["langchain", "schema", "agent"]``
"""
return ["langchain", "schema", "agent"]
@@ -211,7 +203,7 @@ def _convert_agent_observation_to_messages(
observation: Observation to convert to a message.
Returns:
`AIMessage` that corresponds to the original tool invocation.
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, AgentActionMessageLog):
return [_create_function_message(agent_action, observation)]
@@ -234,7 +226,7 @@ def _create_function_message(
observation: the result of the tool invocation.
Returns:
`FunctionMessage` that corresponds to the original tool invocation.
FunctionMessage that corresponds to the original tool invocation.
"""
if not isinstance(observation, str):
try:

View File

@@ -1,17 +1,24 @@
"""Optional caching layer for language models.
"""Cache classes.
Distinct from provider-based [prompt caching](https://docs.langchain.com/oss/python/langchain/models#prompt-caching).
!!! warning
Beta Feature!
!!! warning "Beta feature"
This is a beta feature. Please be wary of deploying experimental code to production
unless you've taken appropriate precautions.
**Cache** provides an optional caching layer for LLMs.
A cache is useful for two reasons:
Cache is useful for two reasons:
1. It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
2. It can speed up your application by reducing the number of API calls you make to the
LLM provider.
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by reducing the number of API calls you make
to the LLM provider.
Cache directly competes with Memory. See documentation for Pros and Cons.
**Class hierarchy:**
.. code-block::
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
"""
from __future__ import annotations
@@ -33,8 +40,8 @@ class BaseCache(ABC):
The cache interface consists of the following methods:
- lookup: Look up a value based on a prompt and `llm_string`.
- update: Update the cache based on a prompt and `llm_string`.
- lookup: Look up a value based on a prompt and llm_string.
- update: Update the cache based on a prompt and llm_string.
- clear: Clear the cache.
In addition, the cache interface provides an async version of each method.
@@ -46,46 +53,43 @@ class BaseCache(ABC):
@abstractmethod
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Look up based on `prompt` and `llm_string`.
"""Look up based on prompt and llm_string.
A cache implementation is expected to generate a key from the 2-tuple
of `prompt` and `llm_string` (e.g., by concatenating them with a delimiter).
of prompt and llm_string (e.g., by concatenating them with a delimiter).
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
This is used to capture the invocation parameters of the LLM
(e.g., model name, temperature, stop tokens, max tokens, etc.).
These invocation parameters are serialized into a string representation.
These invocation parameters are serialized into a string
representation.
Returns:
On a cache miss, return `None`. On a cache hit, return the cached value.
The cached value is a list of `Generation` (or subclasses).
On a cache miss, return None. On a cache hit, return the cached value.
The cached value is a list of Generations (or subclasses).
"""
@abstractmethod
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on `prompt` and `llm_string`.
"""Update cache based on prompt and llm_string.
The prompt and llm_string are used to generate a key for the cache.
The key should match that of the lookup method.
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
This is used to capture the invocation parameters of the LLM
(e.g., model name, temperature, stop tokens, max tokens, etc.).
These invocation parameters are serialized into a string
representation.
return_val: The value to be cached. The value is a list of `Generation`
return_val: The value to be cached. The value is a list of Generations
(or subclasses).
"""
@@ -94,49 +98,45 @@ class BaseCache(ABC):
"""Clear cache that can take additional keyword arguments."""
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Async look up based on `prompt` and `llm_string`.
"""Async look up based on prompt and llm_string.
A cache implementation is expected to generate a key from the 2-tuple
of `prompt` and `llm_string` (e.g., by concatenating them with a delimiter).
of prompt and llm_string (e.g., by concatenating them with a delimiter).
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
This is used to capture the invocation parameters of the LLM
(e.g., model name, temperature, stop tokens, max tokens, etc.).
These invocation parameters are serialized into a string
representation.
Returns:
On a cache miss, return `None`. On a cache hit, return the cached value.
The cached value is a list of `Generation` (or subclasses).
On a cache miss, return None. On a cache hit, return the cached value.
The cached value is a list of Generations (or subclasses).
"""
return await run_in_executor(None, self.lookup, prompt, llm_string)
async def aupdate(
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) -> None:
"""Async update cache based on `prompt` and `llm_string`.
"""Async update cache based on prompt and llm_string.
The prompt and llm_string are used to generate a key for the cache.
The key should match that of the look up method.
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
This is used to capture the invocation parameters of the LLM
(e.g., model name, temperature, stop tokens, max tokens, etc.).
These invocation parameters are serialized into a string
representation.
return_val: The value to be cached. The value is a list of `Generation`
return_val: The value to be cached. The value is a list of Generations
(or subclasses).
"""
return await run_in_executor(None, self.update, prompt, llm_string, return_val)
@@ -156,9 +156,10 @@ class InMemoryCache(BaseCache):
maxsize: The maximum number of items to store in the cache.
If `None`, the cache has no maximum size.
If the cache exceeds the maximum size, the oldest items are removed.
Default is None.
Raises:
ValueError: If `maxsize` is less than or equal to `0`.
ValueError: If maxsize is less than or equal to 0.
"""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
if maxsize is not None and maxsize <= 0:
@@ -167,28 +168,28 @@ class InMemoryCache(BaseCache):
self._maxsize = maxsize
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Look up based on `prompt` and `llm_string`.
"""Look up based on prompt and llm_string.
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
Returns:
On a cache miss, return `None`. On a cache hit, return the cached value.
On a cache miss, return None. On a cache hit, return the cached value.
"""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on `prompt` and `llm_string`.
"""Update cache based on prompt and llm_string.
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
return_val: The value to be cached. The value is a list of `Generation`
return_val: The value to be cached. The value is a list of Generations
(or subclasses).
"""
if self._maxsize is not None and len(self._cache) == self._maxsize:
@@ -201,30 +202,30 @@ class InMemoryCache(BaseCache):
self._cache = {}
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Async look up based on `prompt` and `llm_string`.
"""Async look up based on prompt and llm_string.
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
Returns:
On a cache miss, return `None`. On a cache hit, return the cached value.
On a cache miss, return None. On a cache hit, return the cached value.
"""
return self.lookup(prompt, llm_string)
async def aupdate(
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) -> None:
"""Async update cache based on `prompt` and `llm_string`.
"""Async update cache based on prompt and llm_string.
Args:
prompt: A string representation of the prompt.
In the case of a chat model, the prompt is a non-trivial
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
return_val: The value to be cached. The value is a list of `Generation`
return_val: The value to be cached. The value is a list of Generations
(or subclasses).
"""
self.update(prompt, llm_string, return_val)

View File

@@ -1,4 +1,11 @@
"""**Callback handlers** allow listening to events in LangChain."""
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from typing import TYPE_CHECKING

View File

@@ -35,10 +35,10 @@ class RetrieverManagerMixin:
"""Run when Retriever errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_retriever_end(
@@ -52,10 +52,10 @@ class RetrieverManagerMixin:
"""Run when Retriever ends running.
Args:
documents: The documents retrieved.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
documents (Sequence[Document]): The documents retrieved.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
@@ -76,11 +76,12 @@ class LLMManagerMixin:
For both chat models and non-chat models (legacy LLMs).
Args:
token: The new token.
chunk: The new generated chunk, containing content and other information.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
token (str): The new token.
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
containing content and other information.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_llm_end(
@@ -94,10 +95,10 @@ class LLMManagerMixin:
"""Run when LLM ends running.
Args:
response: The response which was generated.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
response (LLMResult): The response which was generated.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_llm_error(
@@ -111,10 +112,10 @@ class LLMManagerMixin:
"""Run when LLM errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
@@ -132,10 +133,10 @@ class ChainManagerMixin:
"""Run when chain ends running.
Args:
outputs: The outputs of the chain.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
outputs (dict[str, Any]): The outputs of the chain.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_chain_error(
@@ -149,10 +150,10 @@ class ChainManagerMixin:
"""Run when chain errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_agent_action(
@@ -166,10 +167,10 @@ class ChainManagerMixin:
"""Run on agent action.
Args:
action: The agent action.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
action (AgentAction): The agent action.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_agent_finish(
@@ -183,10 +184,10 @@ class ChainManagerMixin:
"""Run on the agent end.
Args:
finish: The agent finish.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
finish (AgentFinish): The agent finish.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
@@ -204,10 +205,10 @@ class ToolManagerMixin:
"""Run when the tool ends running.
Args:
output: The output of the tool.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
output (Any): The output of the tool.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_tool_error(
@@ -221,10 +222,10 @@ class ToolManagerMixin:
"""Run when tool errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
@@ -247,16 +248,16 @@ class CallbackManagerMixin:
!!! warning
This method is called for non-chat models (regular LLMs). If you're
implementing a handler for a chat model, you should use
`on_chat_model_start` instead.
``on_chat_model_start`` instead.
Args:
serialized: The serialized LLM.
prompts: The prompts.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized LLM.
prompts (list[str]): The prompts.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
def on_chat_model_start(
@@ -274,16 +275,16 @@ class CallbackManagerMixin:
!!! warning
This method is called for chat models. If you're implementing a handler for
a non-chat model, you should use `on_llm_start` instead.
a non-chat model, you should use ``on_llm_start`` instead.
Args:
serialized: The serialized chat model.
messages: The messages.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized chat model.
messages (list[list[BaseMessage]]): The messages.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
@@ -304,13 +305,13 @@ class CallbackManagerMixin:
"""Run when the Retriever starts running.
Args:
serialized: The serialized Retriever.
query: The query.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized Retriever.
query (str): The query.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
def on_chain_start(
@@ -327,13 +328,13 @@ class CallbackManagerMixin:
"""Run when a chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized chain.
inputs (dict[str, Any]): The inputs.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
def on_tool_start(
@@ -351,14 +352,14 @@ class CallbackManagerMixin:
"""Run when the tool starts running.
Args:
serialized: The serialized chain.
input_str: The input string.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
inputs: The inputs.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized tool.
input_str (str): The input string.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
inputs (dict[str, Any] | None): The inputs.
kwargs (Any): Additional keyword arguments.
"""
@@ -376,10 +377,10 @@ class RunManagerMixin:
"""Run on an arbitrary text.
Args:
text: The text.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
text (str): The text.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_retry(
@@ -393,10 +394,10 @@ class RunManagerMixin:
"""Run on a retry event.
Args:
retry_state: The retry state.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
retry_state (RetryCallState): The retry state.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
def on_custom_event(
@@ -414,12 +415,14 @@ class RunManagerMixin:
Args:
name: The name of the custom event.
data: The data for the custom event. Format will match
the format specified by the user.
the format specified by the user.
run_id: The ID of the run.
tags: The tags associated with the custom event
(includes inherited tags).
metadata: The metadata associated with the custom event
(includes inherited metadata).
!!! version-added "Added in version 0.2.15"
"""
@@ -494,16 +497,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
!!! warning
This method is called for non-chat models (regular LLMs). If you're
implementing a handler for a chat model, you should use
`on_chat_model_start` instead.
``on_chat_model_start`` instead.
Args:
serialized: The serialized LLM.
prompts: The prompts.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized LLM.
prompts (list[str]): The prompts.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
async def on_chat_model_start(
@@ -521,16 +524,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
!!! warning
This method is called for chat models. If you're implementing a handler for
a non-chat model, you should use `on_llm_start` instead.
a non-chat model, you should use ``on_llm_start`` instead.
Args:
serialized: The serialized chat model.
messages: The messages.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized chat model.
messages (list[list[BaseMessage]]): The messages.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
@@ -552,12 +555,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
For both chat models and non-chat models (legacy LLMs).
Args:
token: The new token.
chunk: The new generated chunk, containing content and other information.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
token (str): The new token.
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
containing content and other information.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_llm_end(
@@ -572,11 +576,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run when the model ends running.
Args:
response: The response which was generated.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
response (LLMResult): The response which was generated.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_llm_error(
@@ -595,7 +599,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
kwargs (Any): Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.
"""
@@ -614,13 +618,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run when a chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized chain.
inputs (dict[str, Any]): The inputs.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
async def on_chain_end(
@@ -635,11 +639,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run when a chain ends running.
Args:
outputs: The outputs of the chain.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
outputs (dict[str, Any]): The outputs of the chain.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_chain_error(
@@ -654,11 +658,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run when chain errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_tool_start(
@@ -676,14 +680,14 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run when the tool starts running.
Args:
serialized: The serialized tool.
input_str: The input string.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
inputs: The inputs.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized tool.
input_str (str): The input string.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
inputs (dict[str, Any] | None): The inputs.
kwargs (Any): Additional keyword arguments.
"""
async def on_tool_end(
@@ -698,11 +702,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run when the tool ends running.
Args:
output: The output of the tool.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
output (Any): The output of the tool.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_tool_error(
@@ -717,11 +721,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run when tool errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_text(
@@ -736,11 +740,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run on an arbitrary text.
Args:
text: The text.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
text (str): The text.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_retry(
@@ -754,10 +758,10 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run on a retry event.
Args:
retry_state: The retry state.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
retry_state (RetryCallState): The retry state.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
kwargs (Any): Additional keyword arguments.
"""
async def on_agent_action(
@@ -772,11 +776,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run on agent action.
Args:
action: The agent action.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
action (AgentAction): The agent action.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_agent_finish(
@@ -791,11 +795,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run on the agent end.
Args:
finish: The agent finish.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
finish (AgentFinish): The agent finish.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_retriever_start(
@@ -812,13 +816,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run on the retriever start.
Args:
serialized: The serialized retriever.
query: The query.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized retriever.
query (str): The query.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
metadata (dict[str, Any] | None): The metadata.
kwargs (Any): Additional keyword arguments.
"""
async def on_retriever_end(
@@ -833,11 +837,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run on the retriever end.
Args:
documents: The documents retrieved.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
documents (Sequence[Document]): The documents retrieved.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_retriever_error(
@@ -852,11 +856,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""Run on retriever error.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
run_id (UUID): The run ID. This is the ID of the current run.
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
tags (list[str] | None): The tags.
kwargs (Any): Additional keyword arguments.
"""
async def on_custom_event(
@@ -874,12 +878,14 @@ class AsyncCallbackHandler(BaseCallbackHandler):
Args:
name: The name of the custom event.
data: The data for the custom event. Format will match
the format specified by the user.
the format specified by the user.
run_id: The ID of the run.
tags: The tags associated with the custom event
(includes inherited tags).
metadata: The metadata associated with the custom event
(includes inherited metadata).
!!! version-added "Added in version 0.2.15"
"""
@@ -937,29 +943,35 @@ class BaseCallbackManager(CallbackManagerMixin):
within merge_configs.
Returns:
The merged callback manager of the same type as the current object.
BaseCallbackManager: The merged callback manager of the same type
as the current object.
Example: Merging two callback managers.
```python
from langchain_core.callbacks.manager import (
CallbackManager,
trace_as_chain_group,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
.. code-block:: python
manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"])
with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager:
merged_manager = group_manager.merge(manager)
print(merged_manager.handlers)
# [
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
# ]
from langchain_core.callbacks.manager import (
CallbackManager,
trace_as_chain_group,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
manager = CallbackManager(
handlers=[StdOutCallbackHandler()], tags=["tag2"]
)
with trace_as_chain_group(
"My Group Name", tags=["tag1"]
) as group_manager:
merged_manager = group_manager.merge(manager)
print(merged_manager.handlers)
# [
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
# ]
print(merged_manager.tags)
# ['tag2', 'tag1']
print(merged_manager.tags)
# ['tag2', 'tag1']
```
""" # noqa: E501
manager = self.__class__(
parent_run_id=self.parent_run_id or other.parent_run_id,
@@ -996,8 +1008,8 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Add a handler to the callback manager.
Args:
handler: The handler to add.
inherit: Whether to inherit the handler.
handler (BaseCallbackHandler): The handler to add.
inherit (bool): Whether to inherit the handler. Default is True.
"""
if handler not in self.handlers:
self.handlers.append(handler)
@@ -1008,7 +1020,7 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Remove a handler from the callback manager.
Args:
handler: The handler to remove.
handler (BaseCallbackHandler): The handler to remove.
"""
if handler in self.handlers:
self.handlers.remove(handler)
@@ -1023,8 +1035,8 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Set handlers as the only handlers on the callback manager.
Args:
handlers: The handlers to set.
inherit: Whether to inherit the handlers.
handlers (list[BaseCallbackHandler]): The handlers to set.
inherit (bool): Whether to inherit the handlers. Default is True.
"""
self.handlers = []
self.inheritable_handlers = []
@@ -1039,8 +1051,8 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Set handler as the only handler on the callback manager.
Args:
handler: The handler to set.
inherit: Whether to inherit the handler.
handler (BaseCallbackHandler): The handler to set.
inherit (bool): Whether to inherit the handler. Default is True.
"""
self.set_handlers([handler], inherit=inherit)
@@ -1052,8 +1064,8 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Add tags to the callback manager.
Args:
tags: The tags to add.
inherit: Whether to inherit the tags.
tags (list[str]): The tags to add.
inherit (bool): Whether to inherit the tags. Default is True.
"""
for tag in tags:
if tag in self.tags:
@@ -1066,7 +1078,7 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Remove tags from the callback manager.
Args:
tags: The tags to remove.
tags (list[str]): The tags to remove.
"""
for tag in tags:
if tag in self.tags:
@@ -1082,8 +1094,8 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Add metadata to the callback manager.
Args:
metadata: The metadata to add.
inherit: Whether to inherit the metadata.
metadata (dict[str, Any]): The metadata to add.
inherit (bool): Whether to inherit the metadata. Default is True.
"""
self.metadata.update(metadata)
if inherit:
@@ -1093,7 +1105,7 @@ class BaseCallbackManager(CallbackManagerMixin):
"""Remove metadata from the callback manager.
Args:
keys: The keys to remove.
keys (list[str]): The keys to remove.
"""
for key in keys:
self.metadata.pop(key, None)

View File

@@ -27,27 +27,27 @@ class FileCallbackHandler(BaseCallbackHandler):
Examples:
Using as a context manager (recommended):
```python
with FileCallbackHandler("output.txt") as handler:
# Use handler with your chain/agent
chain.invoke(inputs, config={"callbacks": [handler]})
```
.. code-block:: python
with FileCallbackHandler("output.txt") as handler:
# Use handler with your chain/agent
chain.invoke(inputs, config={"callbacks": [handler]})
Direct instantiation (deprecated):
```python
handler = FileCallbackHandler("output.txt")
# File remains open until handler is garbage collected
try:
chain.invoke(inputs, config={"callbacks": [handler]})
finally:
handler.close() # Explicit cleanup recommended
```
.. code-block:: python
handler = FileCallbackHandler("output.txt")
# File remains open until handler is garbage collected
try:
chain.invoke(inputs, config={"callbacks": [handler]})
finally:
handler.close() # Explicit cleanup recommended
Args:
filename: The file path to write to.
mode: The file open mode. Defaults to `'a'` (append).
color: Default color for text output.
color: Default color for text output. Defaults to `None`.
!!! note
When not used as a context manager, a deprecation warning will be issued
@@ -64,7 +64,7 @@ class FileCallbackHandler(BaseCallbackHandler):
Args:
filename: Path to the output file.
mode: File open mode (e.g., `'w'`, `'a'`, `'x'`). Defaults to `'a'`.
color: Default text color for output.
color: Default text color for output. Defaults to `None`.
"""
self.filename = filename
@@ -132,7 +132,7 @@ class FileCallbackHandler(BaseCallbackHandler):
Args:
text: The text to write to the file.
color: Optional color for the text. Defaults to `self.color`.
end: String appended after the text.
end: String appended after the text. Defaults to `""`.
file: Optional file to write to. Defaults to `self.file`.
Raises:
@@ -239,7 +239,7 @@ class FileCallbackHandler(BaseCallbackHandler):
text: The text to write.
color: Color override for this specific output. If `None`, uses
`self.color`.
end: String appended after the text.
end: String appended after the text. Defaults to `""`.
**kwargs: Additional keyword arguments.
"""

File diff suppressed because it is too large Load Diff

View File

@@ -20,7 +20,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
"""Initialize callback handler.
Args:
color: The color to use for the text.
color: The color to use for the text. Defaults to `None`.
"""
self.color = color
@@ -31,9 +31,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
"""Print out that we are entering a chain.
Args:
serialized: The serialized chain.
inputs: The inputs to the chain.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized chain.
inputs (dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
@@ -48,8 +48,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
"""Print out that we finished a chain.
Args:
outputs: The outputs of the chain.
**kwargs: Additional keyword arguments.
outputs (dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
@@ -60,9 +60,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
"""Run on agent action.
Args:
action: The agent action.
color: The color to use for the text.
**kwargs: Additional keyword arguments.
action (AgentAction): The agent action.
color (str | None): The color to use for the text. Defaults to `None`.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color)
@@ -78,11 +78,12 @@ class StdOutCallbackHandler(BaseCallbackHandler):
"""If not the final action, print out observation.
Args:
output: The output to print.
color: The color to use for the text.
observation_prefix: The observation prefix.
llm_prefix: The LLM prefix.
**kwargs: Additional keyword arguments.
output (Any): The output to print.
color (str | None): The color to use for the text. Defaults to `None`.
observation_prefix (str | None): The observation prefix.
Defaults to `None`.
llm_prefix (str | None): The LLM prefix. Defaults to `None`.
**kwargs (Any): Additional keyword arguments.
"""
output = str(output)
if observation_prefix is not None:
@@ -102,10 +103,10 @@ class StdOutCallbackHandler(BaseCallbackHandler):
"""Run when the agent ends.
Args:
text: The text to print.
color: The color to use for the text.
end: The end character to use.
**kwargs: Additional keyword arguments.
text (str): The text to print.
color (str | None): The color to use for the text. Defaults to `None`.
end (str): The end character to use. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end)
@@ -116,8 +117,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
"""Run on the agent end.
Args:
finish: The agent finish.
color: The color to use for the text.
**kwargs: Additional keyword arguments.
finish (AgentFinish): The agent finish.
color (str | None): The color to use for the text. Defaults to `None`.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n")

View File

@@ -24,9 +24,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Run when LLM starts running.
Args:
serialized: The serialized LLM.
prompts: The prompts to run.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized LLM.
prompts (list[str]): The prompts to run.
**kwargs (Any): Additional keyword arguments.
"""
def on_chat_model_start(
@@ -38,9 +38,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Run when LLM starts running.
Args:
serialized: The serialized LLM.
messages: The messages to run.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized LLM.
messages (list[list[BaseMessage]]): The messages to run.
**kwargs (Any): Additional keyword arguments.
"""
@override
@@ -48,8 +48,8 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Run on new LLM token. Only available when streaming is enabled.
Args:
token: The new token.
**kwargs: Additional keyword arguments.
token (str): The new token.
**kwargs (Any): Additional keyword arguments.
"""
sys.stdout.write(token)
sys.stdout.flush()
@@ -58,16 +58,16 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Run when LLM ends running.
Args:
response: The response from the LLM.
**kwargs: Additional keyword arguments.
response (LLMResult): The response from the LLM.
**kwargs (Any): Additional keyword arguments.
"""
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors.
Args:
error: The error that occurred.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_start(
@@ -76,25 +76,25 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Run when a chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs to the chain.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized chain.
inputs (dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Run when a chain ends running.
Args:
outputs: The outputs of the chain.
**kwargs: Additional keyword arguments.
outputs (dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors.
Args:
error: The error that occurred.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_start(
@@ -103,47 +103,47 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Run when the tool starts running.
Args:
serialized: The serialized tool.
input_str: The input string.
**kwargs: Additional keyword arguments.
serialized (dict[str, Any]): The serialized tool.
input_str (str): The input string.
**kwargs (Any): Additional keyword arguments.
"""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action.
Args:
action: The agent action.
**kwargs: Additional keyword arguments.
action (AgentAction): The agent action.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output: The output of the tool.
**kwargs: Additional keyword arguments.
output (Any): The output of the tool.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors.
Args:
error: The error that occurred.
**kwargs: Additional keyword arguments.
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on an arbitrary text.
Args:
text: The text to print.
**kwargs: Additional keyword arguments.
text (str): The text to print.
**kwargs (Any): Additional keyword arguments.
"""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on the agent end.
Args:
finish: The agent finish.
**kwargs: Additional keyword arguments.
finish (AgentFinish): The agent finish.
**kwargs (Any): Additional keyword arguments.
"""

View File

@@ -19,31 +19,32 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
```python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
.. code-block:: python
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-20241022")
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
callback = UsageMetadataCallbackHandler()
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
callback.usage_metadata
```
```txt
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
```
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
!!! version-added "Added in `langchain-core` 0.3.49"
callback = UsageMetadataCallbackHandler()
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
callback.usage_metadata
.. code-block::
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
!!! version-added "Added in version 0.3.49"
"""
@@ -95,46 +96,42 @@ def get_usage_metadata_callback(
"""Get usage metadata callback.
Get context manager for tracking usage metadata across chat model calls using
`AIMessage.usage_metadata`.
``AIMessage.usage_metadata``.
Args:
name: The name of the context variable.
name (str): The name of the context variable. Defaults to
``'usage_metadata_callback'``.
Yields:
The usage metadata callback.
Example:
```python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
.. code-block:: python
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-20241022")
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
with get_usage_metadata_callback() as cb:
llm_1.invoke("Hello")
llm_2.invoke("Hello")
print(cb.usage_metadata)
```
```txt
{
"gpt-4o-mini-2024-07-18": {
"input_tokens": 8,
"output_tokens": 10,
"total_tokens": 18,
"input_token_details": {"audio": 0, "cache_read": 0},
"output_token_details": {"audio": 0, "reasoning": 0},
},
"claude-3-5-haiku-20241022": {
"input_tokens": 8,
"output_tokens": 21,
"total_tokens": 29,
"input_token_details": {"cache_read": 0, "cache_creation": 0},
},
}
```
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
!!! version-added "Added in `langchain-core` 0.3.49"
with get_usage_metadata_callback() as cb:
llm_1.invoke("Hello")
llm_2.invoke("Hello")
print(cb.usage_metadata)
.. code-block::
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
!!! version-added "Added in version 0.3.49"
"""
usage_metadata_callback_var: ContextVar[UsageMetadataCallbackHandler | None] = (

View File

@@ -1,4 +1,18 @@
"""**Chat message history** stores a history of the message interactions in a chat."""
"""**Chat message history** stores a history of the message interactions in a chat.
**Class hierarchy:**
.. code-block::
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
**Main helpers:**
.. code-block::
AIMessage, HumanMessage, BaseMessage
""" # noqa: E501
from __future__ import annotations
@@ -49,45 +63,46 @@ class BaseChatMessageHistory(ABC):
Example: Shows a default implementation.
```python
import json
import os
from langchain_core.messages import messages_from_dict, message_to_dict
.. code-block:: python
import json
import os
from langchain_core.messages import messages_from_dict, message_to_dict
class FileChatMessageHistory(BaseChatMessageHistory):
storage_path: str
session_id: str
class FileChatMessageHistory(BaseChatMessageHistory):
storage_path: str
session_id: str
@property
def messages(self) -> list[BaseMessage]:
try:
with open(
os.path.join(self.storage_path, self.session_id),
"r",
encoding="utf-8",
) as f:
messages_data = json.load(f)
return messages_from_dict(messages_data)
except FileNotFoundError:
return []
@property
def messages(self) -> list[BaseMessage]:
try:
with open(
os.path.join(self.storage_path, self.session_id),
"r",
encoding="utf-8",
) as f:
messages_data = json.load(f)
return messages_from_dict(messages_data)
except FileNotFoundError:
return []
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
all_messages = list(self.messages) # Existing messages
all_messages.extend(messages) # Add new messages
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
all_messages = list(self.messages) # Existing messages
all_messages.extend(messages) # Add new messages
serialized = [message_to_dict(message) for message in all_messages]
file_path = os.path.join(self.storage_path, self.session_id)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w", encoding="utf-8") as f:
json.dump(serialized, f)
serialized = [message_to_dict(message) for message in all_messages]
file_path = os.path.join(self.storage_path, self.session_id)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w", encoding="utf-8") as f:
json.dump(serialized, f)
def clear(self) -> None:
file_path = os.path.join(self.storage_path, self.session_id)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w", encoding="utf-8") as f:
json.dump([], f)
def clear(self) -> None:
file_path = os.path.join(self.storage_path, self.session_id)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w", encoding="utf-8") as f:
json.dump([], f)
```
"""
messages: list[BaseMessage]
@@ -115,13 +130,13 @@ class BaseChatMessageHistory(ABC):
"""Convenience method for adding a human message string to the store.
!!! note
This is a convenience method. Code should favor the bulk `add_messages`
This is a convenience method. Code should favor the bulk ``add_messages``
interface instead to save on round-trips to the persistence layer.
This method may be deprecated in a future release.
Args:
message: The `HumanMessage` to add to the store.
message: The human message to add to the store.
"""
if isinstance(message, HumanMessage):
self.add_message(message)
@@ -129,16 +144,16 @@ class BaseChatMessageHistory(ABC):
self.add_message(HumanMessage(content=message))
def add_ai_message(self, message: AIMessage | str) -> None:
"""Convenience method for adding an `AIMessage` string to the store.
"""Convenience method for adding an AI message string to the store.
!!! note
This is a convenience method. Code should favor the bulk `add_messages`
This is a convenience method. Code should favor the bulk ``add_messages``
interface instead to save on round-trips to the persistence layer.
This method may be deprecated in a future release.
Args:
message: The `AIMessage` to add.
message: The AI message to add.
"""
if isinstance(message, AIMessage):
self.add_message(message)
@@ -153,7 +168,7 @@ class BaseChatMessageHistory(ABC):
Raises:
NotImplementedError: If the sub-class has not implemented an efficient
`add_messages` method.
add_messages method.
"""
if type(self).add_messages != BaseChatMessageHistory.add_messages:
# This means that the sub-class has implemented an efficient add_messages
@@ -173,7 +188,7 @@ class BaseChatMessageHistory(ABC):
in an efficient manner to avoid unnecessary round-trips to the underlying store.
Args:
messages: A sequence of `BaseMessage` objects to store.
messages: A sequence of BaseMessage objects to store.
"""
for message in messages:
self.add_message(message)
@@ -182,7 +197,7 @@ class BaseChatMessageHistory(ABC):
"""Async add a list of messages.
Args:
messages: A sequence of `BaseMessage` objects to store.
messages: A sequence of BaseMessage objects to store.
"""
await run_in_executor(None, self.add_messages, messages)

View File

@@ -27,7 +27,7 @@ class BaseLoader(ABC): # noqa: B024
"""Interface for Document Loader.
Implementations should implement the lazy-loading method using generators
to avoid loading all documents into memory at once.
to avoid loading all Documents into memory at once.
`load` is provided just for user convenience and should not be overridden.
"""
@@ -35,40 +35,38 @@ class BaseLoader(ABC): # noqa: B024
# Sub-classes should not implement this method directly. Instead, they
# should implement the lazy load method.
def load(self) -> list[Document]:
"""Load data into `Document` objects.
"""Load data into Document objects.
Returns:
The documents.
the documents.
"""
return list(self.lazy_load())
async def aload(self) -> list[Document]:
"""Load data into `Document` objects.
"""Load data into Document objects.
Returns:
The documents.
the documents.
"""
return [document async for document in self.alazy_load()]
def load_and_split(
self, text_splitter: TextSplitter | None = None
) -> list[Document]:
"""Load `Document` and split into chunks. Chunks are returned as `Document`.
"""Load Documents and split into chunks. Chunks are returned as Documents.
!!! danger
Do not override this method. It should be considered to be deprecated!
Do not override this method. It should be considered to be deprecated!
Args:
text_splitter: `TextSplitter` instance to use for splitting documents.
Defaults to `RecursiveCharacterTextSplitter`.
text_splitter: TextSplitter instance to use for splitting documents.
Defaults to RecursiveCharacterTextSplitter.
Raises:
ImportError: If `langchain-text-splitters` is not installed
and no `text_splitter` is provided.
ImportError: If langchain-text-splitters is not installed
and no text_splitter is provided.
Returns:
List of `Document`.
List of Documents.
"""
if text_splitter is None:
if not _HAS_TEXT_SPLITTERS:
@@ -88,10 +86,10 @@ class BaseLoader(ABC): # noqa: B024
# Attention: This method will be upgraded into an abstractmethod once it's
# implemented in all the existing subclasses.
def lazy_load(self) -> Iterator[Document]:
"""A lazy loader for `Document`.
"""A lazy loader for Documents.
Yields:
The `Document` objects.
the documents.
"""
if type(self).load != BaseLoader.load:
return iter(self.load())
@@ -99,10 +97,10 @@ class BaseLoader(ABC): # noqa: B024
raise NotImplementedError(msg)
async def alazy_load(self) -> AsyncIterator[Document]:
"""A lazy loader for `Document`.
"""A lazy loader for Documents.
Yields:
The `Document` objects.
the documents.
"""
iterator = await run_in_executor(None, self.lazy_load)
done = object()
@@ -117,7 +115,7 @@ class BaseBlobParser(ABC):
"""Abstract interface for blob parsers.
A blob parser provides a way to parse raw data stored in a blob into one
or more `Document` objects.
or more documents.
The parser can be composed with blob loaders, making it easy to reuse
a parser independent of how the blob was originally loaded.
@@ -130,25 +128,25 @@ class BaseBlobParser(ABC):
Subclasses are required to implement this method.
Args:
blob: `Blob` instance
blob: Blob instance
Returns:
Generator of `Document` objects
Generator of documents
"""
def parse(self, blob: Blob) -> list[Document]:
"""Eagerly parse the blob into a `Document` or list of `Document` objects.
"""Eagerly parse the blob into a document or documents.
This is a convenience method for interactive development environment.
Production applications should favor the `lazy_parse` method instead.
Production applications should favor the lazy_parse method instead.
Subclasses should generally not over-ride this parse method.
Args:
blob: `Blob` instance
blob: Blob instance
Returns:
List of `Document` objects
List of documents
"""
return list(self.lazy_parse(blob))

View File

@@ -28,7 +28,7 @@ class BlobLoader(ABC):
def yield_blobs(
self,
) -> Iterable[Blob]:
"""A lazy loader for raw data represented by LangChain's `Blob` object.
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs

View File

@@ -14,27 +14,30 @@ from langchain_core.documents import Document
class LangSmithLoader(BaseLoader):
"""Load LangSmith Dataset examples as `Document` objects.
"""Load LangSmith Dataset examples as Documents.
Loads the example inputs as the `Document` page content and places the entire
example into the `Document` metadata. This allows you to easily create few-shot
example retrievers from the loaded documents.
Loads the example inputs as the Document page content and places the entire example
into the Document metadata. This allows you to easily create few-shot example
retrievers from the loaded documents.
??? note "Lazy loading example"
??? note "Lazy load"
```python
from langchain_core.document_loaders import LangSmithLoader
.. code-block:: python
loader = LangSmithLoader(dataset_id="...", limit=100)
docs = []
for doc in loader.lazy_load():
docs.append(doc)
```
from langchain_core.document_loaders import LangSmithLoader
```python
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
```
"""
loader = LangSmithLoader(dataset_id="...", limit=100)
docs = []
for doc in loader.lazy_load():
docs.append(doc)
.. code-block:: python
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
!!! version-added "Added in version 0.2.34"
""" # noqa: E501
def __init__(
self,
@@ -57,25 +60,26 @@ class LangSmithLoader(BaseLoader):
"""Create a LangSmith loader.
Args:
dataset_id: The ID of the dataset to filter by.
dataset_name: The name of the dataset to filter by.
dataset_id: The ID of the dataset to filter by. Defaults to `None`.
dataset_name: The name of the dataset to filter by. Defaults to `None`.
content_key: The inputs key to set as Document page content. `'.'` characters
are interpreted as nested keys. E.g. `content_key="first.second"` will
result in
`Document(page_content=format_content(example.inputs["first"]["second"]))`
format_content: Function for converting the content extracted from the example
inputs into a string. Defaults to JSON-encoding the contents.
example_ids: The IDs of the examples to filter by.
as_of: The dataset version tag or timestamp to retrieve the examples as of.
Response examples will only be those that were present at the time of
the tagged (or timestamped) version.
example_ids: The IDs of the examples to filter by. Defaults to `None`.
as_of: The dataset version tag OR
timestamp to retrieve the examples as of.
Response examples will only be those that were present at the time
of the tagged (or timestamped) version.
splits: A list of dataset splits, which are
divisions of your dataset such as `train`, `test`, or `validation`.
divisions of your dataset such as 'train', 'test', or 'validation'.
Returns examples only from the specified splits.
inline_s3_urls: Whether to inline S3 URLs.
offset: The offset to start from.
inline_s3_urls: Whether to inline S3 URLs. Defaults to `True`.
offset: The offset to start from. Defaults to 0.
limit: The maximum number of examples to return.
metadata: Metadata to filter by.
metadata: Metadata to filter by. Defaults to `None`.
filter: A structured filter string to apply to the examples.
client: LangSmith Client. If not provided will be initialized from below args.
client_kwargs: Keyword args to pass to LangSmith client init. Should only be

View File

@@ -1,28 +1,8 @@
"""Documents module for data retrieval and processing workflows.
"""Documents module.
This module provides core abstractions for handling data in retrieval-augmented
generation (RAG) pipelines, vector stores, and document processing workflows.
**Document** module is a collection of classes that handle documents
and their transformations.
!!! warning "Documents vs. message content"
This module is distinct from `langchain_core.messages.content`, which provides
multimodal content blocks for **LLM chat I/O** (text, images, audio, etc. within
messages).
**Key distinction:**
- **Documents** (this module): For **data retrieval and processing workflows**
- Vector stores, retrievers, RAG pipelines
- Text chunking, embedding, and semantic search
- Example: Chunks of a PDF stored in a vector database
- **Content Blocks** (`messages.content`): For **LLM conversational I/O**
- Multimodal message content sent to/from models
- Tool calls, reasoning, citations within chat
- Example: An image sent to a vision model in a chat message (via
[`ImageContentBlock`][langchain.messages.ImageContentBlock])
While both can represent similar data types (text, files), they serve different
architectural purposes in LangChain applications.
"""
from typing import TYPE_CHECKING

View File

@@ -1,16 +1,4 @@
"""Base classes for media and documents.
This module contains core abstractions for **data retrieval and processing workflows**:
- `BaseMedia`: Base class providing `id` and `metadata` fields
- `Blob`: Raw data loading (files, binary data) - used by document loaders
- `Document`: Text content for retrieval (RAG, vector stores, semantic search)
!!! note "Not for LLM chat messages"
These classes are for data processing pipelines, not LLM I/O. For multimodal
content in chat messages (images, audio in conversations), see
`langchain.messages` content blocks instead.
"""
"""Base classes for media and documents."""
from __future__ import annotations
@@ -31,23 +19,27 @@ PathLike = str | PurePath
class BaseMedia(Serializable):
"""Base class for content used in retrieval and data processing workflows.
"""Use to represent media content.
Provides common fields for content that needs to be stored, indexed, or searched.
Media objects can be used to represent raw data, such as text or binary data.
!!! note
For multimodal content in **chat messages** (images, audio sent to/from LLMs),
use `langchain.messages` content blocks instead.
LangChain Media objects allow associating metadata and an optional identifier
with the content.
The presence of an ID and metadata make it easier to store, index, and search
over the content in a structured way.
"""
# The ID field is optional at the moment.
# It will likely become required in a future major release after
# it has been adopted by enough VectorStore implementations.
# it has been adopted by enough vectorstore implementations.
id: str | None = Field(default=None, coerce_numbers_to_str=True)
"""An optional identifier for the document.
Ideally this should be unique across the document collection and formatted
as a UUID, but this will not be enforced.
!!! version-added "Added in version 0.2.11"
"""
metadata: dict = Field(default_factory=dict)
@@ -55,70 +47,72 @@ class BaseMedia(Serializable):
class Blob(BaseMedia):
"""Raw data abstraction for document loading and file processing.
"""Blob represents raw data by either reference or value.
Represents raw bytes or text, either in-memory or by file reference. Used
primarily by document loaders to decouple data loading from parsing.
Provides an interface to materialize the blob in different representations, and
help to decouple the development of data loaders from the downstream parsing of
the raw data.
Inspired by [Mozilla's `Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob)
Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob
???+ example "Initialize a blob from in-memory data"
Example: Initialize a blob from in-memory data
```python
from langchain_core.documents import Blob
.. code-block:: python
blob = Blob.from_data("Hello, world!")
from langchain_core.documents import Blob
# Read the blob as a string
print(blob.as_string())
blob = Blob.from_data("Hello, world!")
# Read the blob as bytes
print(blob.as_bytes())
# Read the blob as a string
print(blob.as_string())
# Read the blob as a byte stream
with blob.as_bytes_io() as f:
print(f.read())
```
# Read the blob as bytes
print(blob.as_bytes())
??? example "Load from memory and specify MIME type and metadata"
# Read the blob as a byte stream
with blob.as_bytes_io() as f:
print(f.read())
```python
from langchain_core.documents import Blob
Example: Load from memory and specify mime-type and metadata
blob = Blob.from_data(
data="Hello, world!",
mime_type="text/plain",
metadata={"source": "https://example.com"},
)
```
.. code-block:: python
??? example "Load the blob from a file"
from langchain_core.documents import Blob
```python
from langchain_core.documents import Blob
blob = Blob.from_data(
data="Hello, world!",
mime_type="text/plain",
metadata={"source": "https://example.com"},
)
blob = Blob.from_path("path/to/file.txt")
Example: Load the blob from a file
# Read the blob as a string
print(blob.as_string())
.. code-block:: python
# Read the blob as bytes
print(blob.as_bytes())
from langchain_core.documents import Blob
blob = Blob.from_path("path/to/file.txt")
# Read the blob as a string
print(blob.as_string())
# Read the blob as bytes
print(blob.as_bytes())
# Read the blob as a byte stream
with blob.as_bytes_io() as f:
print(f.read())
# Read the blob as a byte stream
with blob.as_bytes_io() as f:
print(f.read())
```
"""
data: bytes | str | None = None
"""Raw data associated with the `Blob`."""
"""Raw data associated with the blob."""
mimetype: str | None = None
"""MIME type, not to be confused with a file extension."""
"""MimeType not to be confused with a file extension."""
encoding: str = "utf-8"
"""Encoding to use if decoding the bytes into a string.
Uses `utf-8` as default encoding if decoding to string.
Use utf-8 as default encoding, if decoding to string.
"""
path: PathLike | None = None
"""Location where the original content was found."""
@@ -132,9 +126,9 @@ class Blob(BaseMedia):
def source(self) -> str | None:
"""The source location of the blob as string if known otherwise none.
If a path is associated with the `Blob`, it will default to the path location.
If a path is associated with the blob, it will default to the path location.
Unless explicitly set via a metadata field called `'source'`, in which
Unless explicitly set via a metadata field called "source", in which
case that value will be used instead.
"""
if self.metadata and "source" in self.metadata:
@@ -218,15 +212,15 @@ class Blob(BaseMedia):
"""Load the blob from a path like object.
Args:
path: Path-like object to file to be read
path: path like object to file to be read
encoding: Encoding to use if decoding the bytes into a string
mime_type: If provided, will be set as the MIME type of the data
guess_type: If `True`, the MIME type will be guessed from the file
extension, if a MIME type was not provided
metadata: Metadata to associate with the `Blob`
mime_type: if provided, will be set as the mime-type of the data
guess_type: If `True`, the mimetype will be guessed from the file extension,
if a mime-type was not provided
metadata: Metadata to associate with the blob
Returns:
`Blob` instance
Blob instance
"""
if mime_type is None and guess_type:
mimetype = mimetypes.guess_type(path)[0] if guess_type else None
@@ -252,17 +246,17 @@ class Blob(BaseMedia):
path: str | None = None,
metadata: dict | None = None,
) -> Blob:
"""Initialize the `Blob` from in-memory data.
"""Initialize the blob from in-memory data.
Args:
data: The in-memory data associated with the `Blob`
data: the in-memory data associated with the blob
encoding: Encoding to use if decoding the bytes into a string
mime_type: If provided, will be set as the MIME type of the data
path: If provided, will be set as the source from which the data came
metadata: Metadata to associate with the `Blob`
mime_type: if provided, will be set as the mime-type of the data
path: if provided, will be set as the source from which the data came
metadata: Metadata to associate with the blob
Returns:
`Blob` instance
Blob instance
"""
return cls(
data=data,
@@ -283,18 +277,16 @@ class Blob(BaseMedia):
class Document(BaseMedia):
"""Class for storing a piece of text and associated metadata.
!!! note
`Document` is for **retrieval workflows**, not chat I/O. For sending text
to an LLM in a conversation, use message types from `langchain.messages`.
Example:
```python
from langchain_core.documents import Document
document = Document(
page_content="Hello, world!", metadata={"source": "https://example.com"}
)
```
.. code-block:: python
from langchain_core.documents import Document
document = Document(
page_content="Hello, world!", metadata={"source": "https://example.com"}
)
"""
page_content: str
@@ -309,12 +301,12 @@ class Document(BaseMedia):
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
"""Return True as this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
"""Get the namespace of the langchain object.
Returns:
["langchain", "schema", "document"]
@@ -322,10 +314,10 @@ class Document(BaseMedia):
return ["langchain", "schema", "document"]
def __str__(self) -> str:
"""Override `__str__` to restrict it to page_content and metadata.
"""Override __str__ to restrict it to page_content and metadata.
Returns:
A string representation of the `Document`.
A string representation of the Document.
"""
# The format matches pydantic format for __str__.
#

View File

@@ -21,14 +21,14 @@ class BaseDocumentCompressor(BaseModel, ABC):
This abstraction is primarily used for post-processing of retrieved documents.
`Document` objects matching a given query are first retrieved.
Documents matching a given query are first retrieved.
Then the list of documents can be further processed.
For example, one could re-rank the retrieved documents using an LLM.
!!! note
Users should favor using a `RunnableLambda` instead of sub-classing from this
Users should favor using a RunnableLambda instead of sub-classing from this
interface.
"""
@@ -43,9 +43,9 @@ class BaseDocumentCompressor(BaseModel, ABC):
"""Compress retrieved documents given the query context.
Args:
documents: The retrieved `Document` objects.
documents: The retrieved documents.
query: The query context.
callbacks: Optional `Callbacks` to run during compression.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
@@ -61,9 +61,9 @@ class BaseDocumentCompressor(BaseModel, ABC):
"""Async compress retrieved documents given the query context.
Args:
documents: The retrieved `Document` objects.
documents: The retrieved documents.
query: The query context.
callbacks: Optional `Callbacks` to run during compression.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.

View File

@@ -16,38 +16,39 @@ if TYPE_CHECKING:
class BaseDocumentTransformer(ABC):
"""Abstract base class for document transformation.
A document transformation takes a sequence of `Document` objects and returns a
sequence of transformed `Document` objects.
A document transformation takes a sequence of Documents and returns a
sequence of transformed Documents.
Example:
```python
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
.. code-block:: python
class Config:
arbitrary_types_allowed = True
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents,
self.similarity_fn,
self.similarity_threshold,
)
return [stateful_documents[i] for i in sorted(included_idxs)]
class Config:
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents,
self.similarity_fn,
self.similarity_threshold,
)
return [stateful_documents[i] for i in sorted(included_idxs)]
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
```
"""
@abstractmethod
@@ -57,10 +58,10 @@ class BaseDocumentTransformer(ABC):
"""Transform a list of documents.
Args:
documents: A sequence of `Document` objects to be transformed.
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed `Document` objects.
A sequence of transformed Documents.
"""
async def atransform_documents(
@@ -69,10 +70,10 @@ class BaseDocumentTransformer(ABC):
"""Asynchronously transform a list of documents.
Args:
documents: A sequence of `Document` objects to be transformed.
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed `Document` objects.
A sequence of transformed Documents.
"""
return await run_in_executor(
None, self.transform_documents, documents, **kwargs

View File

@@ -18,38 +18,40 @@ class FakeEmbeddings(Embeddings, BaseModel):
This embedding model creates embeddings by sampling from a normal distribution.
!!! danger "Toy model"
Do not use this outside of testing, as it is not a real embedding model.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
```python
from langchain_core.embeddings import FakeEmbeddings
.. code-block:: python
embed = FakeEmbeddings(size=100)
```
from langchain_core.embeddings import FakeEmbeddings
embed = FakeEmbeddings(size=100)
Embed single text:
```python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
```
```python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
```
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
```python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
```
```python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
```
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
@@ -73,38 +75,40 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
This embedding model creates embeddings by sampling from a normal distribution
with a seed based on the hash of the text.
!!! danger "Toy model"
Do not use this outside of testing, as it is not a real embedding model.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
```python
from langchain_core.embeddings import DeterministicFakeEmbedding
.. code-block:: python
embed = DeterministicFakeEmbedding(size=100)
```
from langchain_core.embeddings import DeterministicFakeEmbedding
embed = DeterministicFakeEmbedding(size=100)
Embed single text:
```python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
```
```python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
```
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
```python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
```
```python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
```
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int

View File

@@ -29,7 +29,7 @@ class LengthBasedExampleSelector(BaseExampleSelector, BaseModel):
max_length: int = 2048
"""Max length for the prompt, beyond which examples are cut."""
example_text_lengths: list[int] = Field(default_factory=list)
example_text_lengths: list[int] = Field(default_factory=list) # :meta private:
"""Length of each example."""
def add_example(self, example: dict[str, str]) -> None:

View File

@@ -41,7 +41,7 @@ class _VectorStoreExampleSelector(BaseExampleSelector, BaseModel, ABC):
"""Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables."""
vectorstore_kwargs: dict[str, Any] | None = None
"""Extra arguments passed to similarity_search function of the `VectorStore`."""
"""Extra arguments passed to similarity_search function of the vectorstore."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
@@ -154,12 +154,12 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select.
k: Number of examples to select. Default is 4.
input_keys: If provided, the search is based on the input variables
instead of all variables.
example_keys: If provided, keys to filter examples to.
vectorstore_kwargs: Extra arguments passed to similarity_search function
of the `VectorStore`.
of the vectorstore.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
@@ -198,12 +198,12 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select.
k: Number of examples to select. Default is 4.
input_keys: If provided, the search is based on the input variables
instead of all variables.
example_keys: If provided, keys to filter examples to.
vectorstore_kwargs: Extra arguments passed to similarity_search function
of the `VectorStore`.
of the vectorstore.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
@@ -285,13 +285,14 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select.
fetch_k: Number of `Document` objects to fetch to pass to MMR algorithm.
k: Number of examples to select. Default is 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Default is 20.
input_keys: If provided, the search is based on the input variables
instead of all variables.
example_keys: If provided, keys to filter examples to.
vectorstore_kwargs: Extra arguments passed to similarity_search function
of the `VectorStore`.
of the vectorstore.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
@@ -332,13 +333,14 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select.
fetch_k: Number of `Document` objects to fetch to pass to MMR algorithm.
k: Number of examples to select. Default is 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Default is 20.
input_keys: If provided, the search is based on the input variables
instead of all variables.
example_keys: If provided, keys to filter examples to.
vectorstore_kwargs: Extra arguments passed to similarity_search function
of the `VectorStore`.
of the vectorstore.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:

View File

@@ -16,10 +16,9 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or execution errors
that also may arise inside the output parser.
`OutputParserException` will be available to catch and handle in ways to fix the
parsing error, while other errors will be raised.
that also may arise inside the output parser. OutputParserExceptions will be
available to catch and handle in ways to fix the parsing error, while other
errors will be raised.
"""
def __init__(
@@ -29,24 +28,24 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
llm_output: str | None = None,
send_to_llm: bool = False, # noqa: FBT001,FBT002
):
"""Create an `OutputParserException`.
"""Create an OutputParserException.
Args:
error: The error that's being re-raised or an error message.
observation: String explanation of error which can be passed to a model to
try and remediate the issue.
observation: String explanation of error which can be passed to a
model to try and remediate the issue. Defaults to `None`.
llm_output: String model output which is error-ing.
Defaults to `None`.
send_to_llm: Whether to send the observation and llm_output back to an Agent
after an `OutputParserException` has been raised.
after an OutputParserException has been raised.
This gives the underlying model driving the agent the context that the
previous output was improperly structured, in the hopes that it will
update the output to the correct format.
Defaults to `False`.
Raises:
ValueError: If `send_to_llm` is `True` but either observation or
`llm_output` are not provided.
ValueError: If ``send_to_llm`` is True but either observation or
``llm_output`` are not provided.
"""
if isinstance(error, str):
error = create_message(
@@ -68,11 +67,11 @@ class ErrorCode(Enum):
"""Error codes."""
INVALID_PROMPT_INPUT = "INVALID_PROMPT_INPUT"
INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS" # Used in JS; not Py (yet)
INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS"
MESSAGE_COERCION_FAILURE = "MESSAGE_COERCION_FAILURE"
MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION" # Used in JS; not Py (yet)
MODEL_NOT_FOUND = "MODEL_NOT_FOUND" # Used in JS; not Py (yet)
MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT" # Used in JS; not Py (yet)
MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION"
MODEL_NOT_FOUND = "MODEL_NOT_FOUND"
MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT"
OUTPUT_PARSING_FAILURE = "OUTPUT_PARSING_FAILURE"
@@ -88,6 +87,6 @@ def create_message(*, message: str, error_code: ErrorCode) -> str:
"""
return (
f"{message}\n"
"For troubleshooting, visit: https://docs.langchain.com/oss/python/langchain"
f"/errors/{error_code.value} "
"For troubleshooting, visit: https://python.langchain.com/docs/"
f"troubleshooting/errors/{error_code.value} "
)

View File

@@ -1,7 +1,7 @@
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a `VectorStore` while avoiding duplicated content and over-writing content
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""

View File

@@ -298,58 +298,61 @@ def index(
For the time being, documents are indexed using their hashes, and users
are not able to specify the uid of the document.
!!! warning "Behavior changed in `langchain-core` 0.3.25"
!!! warning "Behavior changed in 0.3.25"
Added `scoped_full` cleanup mode.
!!! warning
* In full mode, the loader should be returning
the entire dataset, and not just a subset of the dataset.
Otherwise, the auto_cleanup will remove documents that it is not
supposed to.
the entire dataset, and not just a subset of the dataset.
Otherwise, the auto_cleanup will remove documents that it is not
supposed to.
* In incremental mode, if documents associated with a particular
source id appear across different batches, the indexing API
will do some redundant work. This will still result in the
correct end state of the index, but will unfortunately not be
100% efficient. For example, if a given document is split into 15
chunks, and we index them using a batch size of 5, we'll have 3 batches
all with the same source id. In general, to avoid doing too much
redundant work select as big a batch size as possible.
source id appear across different batches, the indexing API
will do some redundant work. This will still result in the
correct end state of the index, but will unfortunately not be
100% efficient. For example, if a given document is split into 15
chunks, and we index them using a batch size of 5, we'll have 3 batches
all with the same source id. In general, to avoid doing too much
redundant work select as big a batch size as possible.
* The `scoped_full` mode is suitable if determining an appropriate batch size
is challenging or if your data loader cannot return the entire dataset at
once. This mode keeps track of source IDs in memory, which should be fine
for most use cases. If your dataset is large (10M+ docs), you will likely
need to parallelize the indexing process regardless.
is challenging or if your data loader cannot return the entire dataset at
once. This mode keeps track of source IDs in memory, which should be fine
for most use cases. If your dataset is large (10M+ docs), you will likely
need to parallelize the indexing process regardless.
Args:
docs_source: Data loader or iterable of documents to index.
record_manager: Timestamped set to keep track of which documents were
updated.
vector_store: `VectorStore` or DocumentIndex to index the documents into.
batch_size: Batch size to use when indexing.
cleanup: How to handle clean up of documents.
vector_store: VectorStore or DocumentIndex to index the documents into.
batch_size: Batch size to use when indexing. Default is 100.
cleanup: How to handle clean up of documents. Default is None.
- incremental: Cleans up all documents that haven't been updated AND
that are associated with source IDs that were seen during indexing.
Clean up is done continuously during indexing helping to minimize the
probability of users seeing duplicated content.
that are associated with source ids that were seen during indexing.
Clean up is done continuously during indexing helping to minimize the
probability of users seeing duplicated content.
- full: Delete all documents that have not been returned by the loader
during this run of indexing.
Clean up runs after all documents have been indexed.
This means that users may see duplicated content during indexing.
during this run of indexing.
Clean up runs after all documents have been indexed.
This means that users may see duplicated content during indexing.
- scoped_full: Similar to Full, but only deletes all documents
that haven't been updated AND that are associated with
source IDs that were seen during indexing.
that haven't been updated AND that are associated with
source ids that were seen during indexing.
- None: Do not delete any documents.
source_id_key: Optional key that helps identify the original source
of the document.
of the document. Default is None.
cleanup_batch_size: Batch size to use when cleaning up documents.
Default is 1_000.
force_update: Force update documents even if they are present in the
record manager. Useful if you are re-indexing with updated embeddings.
Default is False.
key_encoder: Hashing algorithm to use for hashing the document content and
metadata. Options include "blake2b", "sha256", and "sha512".
metadata. Default is "sha1".
Other options include "blake2b", "sha256", and "sha512".
!!! version-added "Added in `langchain-core` 0.3.66"
!!! version-added "Added in version 0.3.66"
key_encoder: Hashing algorithm to use for hashing the document.
If not provided, a default encoder using SHA-1 will be used.
@@ -363,10 +366,10 @@ def index(
When changing the key encoder, you must change the
index as well to avoid duplicated documents in the cache.
upsert_kwargs: Additional keyword arguments to pass to the add_documents
method of the `VectorStore` or the upsert method of the DocumentIndex.
method of the VectorStore or the upsert method of the DocumentIndex.
For example, you can use this to specify a custom vector_field:
upsert_kwargs={"vector_field": "embedding"}
!!! version-added "Added in `langchain-core` 0.3.10"
!!! version-added "Added in version 0.3.10"
Returns:
Indexing result which contains information about how many documents
@@ -375,10 +378,10 @@ def index(
Raises:
ValueError: If cleanup mode is not one of 'incremental', 'full' or None
ValueError: If cleanup mode is incremental and source_id_key is None.
ValueError: If `VectorStore` does not have
ValueError: If vectorstore does not have
"delete" and "add_documents" required methods.
ValueError: If source_id_key is not None, but is not a string or callable.
TypeError: If `vectorstore` is not a `VectorStore` or a DocumentIndex.
TypeError: If `vectorstore` is not a VectorStore or a DocumentIndex.
AssertionError: If `source_id` is None when cleanup mode is incremental.
(should be unreachable code).
"""
@@ -415,7 +418,7 @@ def index(
raise ValueError(msg)
if type(destination).delete == VectorStore.delete:
# Checking if the VectorStore has overridden the default delete method
# Checking if the vectorstore has overridden the default delete method
# implementation which just raises a NotImplementedError
msg = "Vectorstore has not implemented the delete method"
raise ValueError(msg)
@@ -466,11 +469,11 @@ def index(
]
if cleanup in {"incremental", "scoped_full"}:
# Source IDs are required.
# source ids are required.
for source_id, hashed_doc in zip(source_ids, hashed_docs, strict=False):
if source_id is None:
msg = (
f"Source IDs are required when cleanup mode is "
f"Source ids are required when cleanup mode is "
f"incremental or scoped_full. "
f"Document that starts with "
f"content: {hashed_doc.page_content[:100]} "
@@ -479,7 +482,7 @@ def index(
raise ValueError(msg)
if cleanup == "scoped_full":
scoped_full_cleanup_source_ids.add(source_id)
# Source IDs cannot be None after for loop above.
# source ids cannot be None after for loop above.
source_ids = cast("Sequence[str]", source_ids)
exists_batch = record_manager.exists(
@@ -538,7 +541,7 @@ def index(
# If source IDs are provided, we can do the deletion incrementally!
if cleanup == "incremental":
# Get the uids of the documents that were not returned by the loader.
# mypy isn't good enough to determine that source IDs cannot be None
# mypy isn't good enough to determine that source ids cannot be None
# here due to a check that's happening above, so we check again.
for source_id in source_ids:
if source_id is None:
@@ -636,58 +639,61 @@ async def aindex(
For the time being, documents are indexed using their hashes, and users
are not able to specify the uid of the document.
!!! warning "Behavior changed in `langchain-core` 0.3.25"
!!! warning "Behavior changed in 0.3.25"
Added `scoped_full` cleanup mode.
!!! warning
* In full mode, the loader should be returning
the entire dataset, and not just a subset of the dataset.
Otherwise, the auto_cleanup will remove documents that it is not
supposed to.
the entire dataset, and not just a subset of the dataset.
Otherwise, the auto_cleanup will remove documents that it is not
supposed to.
* In incremental mode, if documents associated with a particular
source id appear across different batches, the indexing API
will do some redundant work. This will still result in the
correct end state of the index, but will unfortunately not be
100% efficient. For example, if a given document is split into 15
chunks, and we index them using a batch size of 5, we'll have 3 batches
all with the same source id. In general, to avoid doing too much
redundant work select as big a batch size as possible.
source id appear across different batches, the indexing API
will do some redundant work. This will still result in the
correct end state of the index, but will unfortunately not be
100% efficient. For example, if a given document is split into 15
chunks, and we index them using a batch size of 5, we'll have 3 batches
all with the same source id. In general, to avoid doing too much
redundant work select as big a batch size as possible.
* The `scoped_full` mode is suitable if determining an appropriate batch size
is challenging or if your data loader cannot return the entire dataset at
once. This mode keeps track of source IDs in memory, which should be fine
for most use cases. If your dataset is large (10M+ docs), you will likely
need to parallelize the indexing process regardless.
is challenging or if your data loader cannot return the entire dataset at
once. This mode keeps track of source IDs in memory, which should be fine
for most use cases. If your dataset is large (10M+ docs), you will likely
need to parallelize the indexing process regardless.
Args:
docs_source: Data loader or iterable of documents to index.
record_manager: Timestamped set to keep track of which documents were
updated.
vector_store: `VectorStore` or DocumentIndex to index the documents into.
batch_size: Batch size to use when indexing.
cleanup: How to handle clean up of documents.
vector_store: VectorStore or DocumentIndex to index the documents into.
batch_size: Batch size to use when indexing. Default is 100.
cleanup: How to handle clean up of documents. Default is None.
- incremental: Cleans up all documents that haven't been updated AND
that are associated with source IDs that were seen during indexing.
Clean up is done continuously during indexing helping to minimize the
probability of users seeing duplicated content.
that are associated with source ids that were seen during indexing.
Clean up is done continuously during indexing helping to minimize the
probability of users seeing duplicated content.
- full: Delete all documents that have not been returned by the loader
during this run of indexing.
Clean up runs after all documents have been indexed.
This means that users may see duplicated content during indexing.
during this run of indexing.
Clean up runs after all documents have been indexed.
This means that users may see duplicated content during indexing.
- scoped_full: Similar to Full, but only deletes all documents
that haven't been updated AND that are associated with
source IDs that were seen during indexing.
that haven't been updated AND that are associated with
source ids that were seen during indexing.
- None: Do not delete any documents.
source_id_key: Optional key that helps identify the original source
of the document.
of the document. Default is None.
cleanup_batch_size: Batch size to use when cleaning up documents.
Default is 1_000.
force_update: Force update documents even if they are present in the
record manager. Useful if you are re-indexing with updated embeddings.
Default is False.
key_encoder: Hashing algorithm to use for hashing the document content and
metadata. Options include "blake2b", "sha256", and "sha512".
metadata. Default is "sha1".
Other options include "blake2b", "sha256", and "sha512".
!!! version-added "Added in `langchain-core` 0.3.66"
!!! version-added "Added in version 0.3.66"
key_encoder: Hashing algorithm to use for hashing the document.
If not provided, a default encoder using SHA-1 will be used.
@@ -701,10 +707,10 @@ async def aindex(
When changing the key encoder, you must change the
index as well to avoid duplicated documents in the cache.
upsert_kwargs: Additional keyword arguments to pass to the add_documents
method of the `VectorStore` or the upsert method of the DocumentIndex.
method of the VectorStore or the upsert method of the DocumentIndex.
For example, you can use this to specify a custom vector_field:
upsert_kwargs={"vector_field": "embedding"}
!!! version-added "Added in `langchain-core` 0.3.10"
!!! version-added "Added in version 0.3.10"
Returns:
Indexing result which contains information about how many documents
@@ -713,10 +719,10 @@ async def aindex(
Raises:
ValueError: If cleanup mode is not one of 'incremental', 'full' or None
ValueError: If cleanup mode is incremental and source_id_key is None.
ValueError: If `VectorStore` does not have
ValueError: If vectorstore does not have
"adelete" and "aadd_documents" required methods.
ValueError: If source_id_key is not None, but is not a string or callable.
TypeError: If `vector_store` is not a `VectorStore` or DocumentIndex.
TypeError: If `vector_store` is not a VectorStore or DocumentIndex.
AssertionError: If `source_id_key` is None when cleanup mode is
incremental or `scoped_full` (should be unreachable).
"""
@@ -757,7 +763,7 @@ async def aindex(
type(destination).adelete == VectorStore.adelete
and type(destination).delete == VectorStore.delete
):
# Checking if the VectorStore has overridden the default adelete or delete
# Checking if the vectorstore has overridden the default adelete or delete
# methods implementation which just raises a NotImplementedError
msg = "Vectorstore has not implemented the adelete or delete method"
raise ValueError(msg)
@@ -815,11 +821,11 @@ async def aindex(
]
if cleanup in {"incremental", "scoped_full"}:
# If the cleanup mode is incremental, source IDs are required.
# If the cleanup mode is incremental, source ids are required.
for source_id, hashed_doc in zip(source_ids, hashed_docs, strict=False):
if source_id is None:
msg = (
f"Source IDs are required when cleanup mode is "
f"Source ids are required when cleanup mode is "
f"incremental or scoped_full. "
f"Document that starts with "
f"content: {hashed_doc.page_content[:100]} "
@@ -828,7 +834,7 @@ async def aindex(
raise ValueError(msg)
if cleanup == "scoped_full":
scoped_full_cleanup_source_ids.add(source_id)
# Source IDs cannot be None after for loop above.
# source ids cannot be None after for loop above.
source_ids = cast("Sequence[str]", source_ids)
exists_batch = await record_manager.aexists(
@@ -888,7 +894,7 @@ async def aindex(
if cleanup == "incremental":
# Get the uids of the documents that were not returned by the loader.
# mypy isn't good enough to determine that source IDs cannot be None
# mypy isn't good enough to determine that source ids cannot be None
# here due to a check that's happening above, so we check again.
for source_id in source_ids:
if source_id is None:

View File

@@ -25,7 +25,7 @@ class RecordManager(ABC):
The record manager abstraction is used by the langchain indexing API.
The record manager keeps track of which documents have been
written into a `VectorStore` and when they were written.
written into a vectorstore and when they were written.
The indexing API computes hashes for each document and stores the hash
together with the write time and the source id in the record manager.
@@ -37,7 +37,7 @@ class RecordManager(ABC):
already been indexed, and to only index new documents.
The main benefit of this abstraction is that it works across many vectorstores.
To be supported, a `VectorStore` needs to only support the ability to add and
To be supported, a vectorstore needs to only support the ability to add and
delete documents by ID. Using the record manager, the indexing API will
be able to delete outdated documents and avoid redundant indexing of documents
that have already been indexed.
@@ -45,13 +45,13 @@ class RecordManager(ABC):
The main constraints of this abstraction are:
1. It relies on the time-stamps to determine which documents have been
indexed and which have not. This means that the time-stamps must be
monotonically increasing. The timestamp should be the timestamp
as measured by the server to minimize issues.
indexed and which have not. This means that the time-stamps must be
monotonically increasing. The timestamp should be the timestamp
as measured by the server to minimize issues.
2. The record manager is currently implemented separately from the
vectorstore, which means that the overall system becomes distributed
and may create issues with consistency. For example, writing to
record manager succeeds, but corresponding writing to `VectorStore` fails.
vectorstore, which means that the overall system becomes distributed
and may create issues with consistency. For example, writing to
record manager succeeds, but corresponding writing to vectorstore fails.
"""
def __init__(
@@ -61,7 +61,7 @@ class RecordManager(ABC):
"""Initialize the record manager.
Args:
namespace: The namespace for the record manager.
namespace (str): The namespace for the record manager.
"""
self.namespace = namespace
@@ -244,7 +244,7 @@ class InMemoryRecordManager(RecordManager):
"""Initialize the in-memory record manager.
Args:
namespace: The namespace for the record manager.
namespace (str): The namespace for the record manager.
"""
super().__init__(namespace)
# Each key points to a dictionary
@@ -278,10 +278,10 @@ class InMemoryRecordManager(RecordManager):
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
Defaults to `None`.
time_at_least: Optional timestamp. Implementation can use this
to optionally verify that the timestamp IS at least this time
in the system that stores.
in the system that stores. Defaults to `None`.
E.g., use to validate that the time in the postgres database
is equal to or larger than the given timestamp, if not
raise an error.
@@ -315,10 +315,10 @@ class InMemoryRecordManager(RecordManager):
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
Defaults to `None`.
time_at_least: Optional timestamp. Implementation can use this
to optionally verify that the timestamp IS at least this time
in the system that stores.
in the system that stores. Defaults to `None`.
E.g., use to validate that the time in the postgres database
is equal to or larger than the given timestamp, if not
raise an error.
@@ -361,13 +361,13 @@ class InMemoryRecordManager(RecordManager):
Args:
before: Filter to list records updated before this time.
Defaults to `None`.
after: Filter to list records updated after this time.
Defaults to `None`.
group_ids: Filter to list records with specific group IDs.
Defaults to `None`.
limit: optional limit on the number of records to return.
Defaults to `None`.
Returns:
A list of keys for the matching records.
@@ -397,13 +397,13 @@ class InMemoryRecordManager(RecordManager):
Args:
before: Filter to list records updated before this time.
Defaults to `None`.
after: Filter to list records updated after this time.
Defaults to `None`.
group_ids: Filter to list records with specific group IDs.
Defaults to `None`.
limit: optional limit on the number of records to return.
Defaults to `None`.
Returns:
A list of keys for the matching records.
@@ -460,7 +460,7 @@ class UpsertResponse(TypedDict):
class DeleteResponse(TypedDict, total=False):
"""A generic response for delete operation.
The fields in this response are optional and whether the `VectorStore`
The fields in this response are optional and whether the vectorstore
returns them or not is up to the implementation.
"""
@@ -508,6 +508,8 @@ class DocumentIndex(BaseRetriever):
1. Storing document in the index.
2. Fetching document by ID.
3. Searching for document using a query.
!!! version-added "Added in version 0.2.29"
"""
@abc.abstractmethod
@@ -518,40 +520,40 @@ class DocumentIndex(BaseRetriever):
if it is provided. If the ID is not provided, the upsert method is free
to generate an ID for the content.
When an ID is specified and the content already exists in the `VectorStore`,
When an ID is specified and the content already exists in the vectorstore,
the upsert method should update the content with the new data. If the content
does not exist, the upsert method should add the item to the `VectorStore`.
does not exist, the upsert method should add the item to the vectorstore.
Args:
items: Sequence of documents to add to the `VectorStore`.
items: Sequence of documents to add to the vectorstore.
**kwargs: Additional keyword arguments.
Returns:
A response object that contains the list of IDs that were
successfully added or updated in the `VectorStore` and the list of IDs that
UpsertResponse: A response object that contains the list of IDs that were
successfully added or updated in the vectorstore and the list of IDs that
failed to be added or updated.
"""
async def aupsert(
self, items: Sequence[Document], /, **kwargs: Any
) -> UpsertResponse:
"""Add or update documents in the `VectorStore`. Async version of `upsert`.
"""Add or update documents in the vectorstore. Async version of upsert.
The upsert functionality should utilize the ID field of the item
if it is provided. If the ID is not provided, the upsert method is free
to generate an ID for the item.
When an ID is specified and the item already exists in the `VectorStore`,
When an ID is specified and the item already exists in the vectorstore,
the upsert method should update the item with the new data. If the item
does not exist, the upsert method should add the item to the `VectorStore`.
does not exist, the upsert method should add the item to the vectorstore.
Args:
items: Sequence of documents to add to the `VectorStore`.
items: Sequence of documents to add to the vectorstore.
**kwargs: Additional keyword arguments.
Returns:
A response object that contains the list of IDs that were
successfully added or updated in the `VectorStore` and the list of IDs that
UpsertResponse: A response object that contains the list of IDs that were
successfully added or updated in the vectorstore and the list of IDs that
failed to be added or updated.
"""
return await run_in_executor(
@@ -568,13 +570,13 @@ class DocumentIndex(BaseRetriever):
Calling delete without any input parameters should raise a ValueError!
Args:
ids: List of IDs to delete.
**kwargs: Additional keyword arguments. This is up to the implementation.
ids: List of ids to delete.
kwargs: Additional keyword arguments. This is up to the implementation.
For example, can include an option to delete the entire index,
or else issue a non-blocking delete etc.
Returns:
A response object that contains the list of IDs that were
DeleteResponse: A response object that contains the list of IDs that were
successfully deleted and the list of IDs that failed to be deleted.
"""
@@ -586,12 +588,12 @@ class DocumentIndex(BaseRetriever):
Calling adelete without any input parameters should raise a ValueError!
Args:
ids: List of IDs to delete.
**kwargs: Additional keyword arguments. This is up to the implementation.
ids: List of ids to delete.
kwargs: Additional keyword arguments. This is up to the implementation.
For example, can include an option to delete the entire index.
Returns:
A response object that contains the list of IDs that were
DeleteResponse: A response object that contains the list of IDs that were
successfully deleted and the list of IDs that failed to be deleted.
"""
return await run_in_executor(
@@ -622,10 +624,10 @@ class DocumentIndex(BaseRetriever):
Args:
ids: List of IDs to get.
**kwargs: Additional keyword arguments. These are up to the implementation.
kwargs: Additional keyword arguments. These are up to the implementation.
Returns:
List of documents that were found.
list[Document]: List of documents that were found.
"""
async def aget(
@@ -648,10 +650,10 @@ class DocumentIndex(BaseRetriever):
Args:
ids: List of IDs to get.
**kwargs: Additional keyword arguments. These are up to the implementation.
kwargs: Additional keyword arguments. These are up to the implementation.
Returns:
List of documents that were found.
list[Document]: List of documents that were found.
"""
return await run_in_executor(
None,

View File

@@ -23,6 +23,8 @@ class InMemoryDocumentIndex(DocumentIndex):
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
!!! version-added "Added in version 0.2.29"
"""
store: dict[str, Document] = Field(default_factory=dict)
@@ -62,10 +64,10 @@ class InMemoryDocumentIndex(DocumentIndex):
"""Delete by IDs.
Args:
ids: List of IDs to delete.
ids: List of ids to delete.
Raises:
ValueError: If IDs is None.
ValueError: If ids is None.
Returns:
A response object that contains the list of IDs that were successfully

View File

@@ -1,30 +1,45 @@
"""Language models.
LangChain has two main classes to work with language models: chat models and
"old-fashioned" LLMs.
**Language Model** is a type of model that can generate text or complete
text prompts.
**Chat models**
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text).
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
Chat models support the assignment of distinct roles to conversation messages, helping
to distinguish messages from the AI, users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
The key abstraction for chat models is `BaseChatModel`. Implementations should inherit
from this class.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
See existing [chat model integrations](https://docs.langchain.com/oss/python/integrations/chat).
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are chat models).
These are traditionally older models (newer models generally are Chat Models,
see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
Although the underlying models are string in, string out, the LangChain wrappers also
allow these models to take messages as input. This gives them the same interface as
chat models. When messages are passed in as input, they will be formatted into a string
under the hood before being passed to the underlying model.
"""
from typing import TYPE_CHECKING

View File

@@ -35,7 +35,7 @@ def is_openai_data_block(
different type, this function will return False.
Returns:
`True` if the block is a valid OpenAI data block and matches the filter_
True if the block is a valid OpenAI data block and matches the filter_
(if provided).
"""
@@ -89,20 +89,21 @@ class ParsedDataUri(TypedDict):
def _parse_data_uri(uri: str) -> ParsedDataUri | None:
"""Parse a data URI into its components.
If parsing fails, return `None`. If either MIME type or data is missing, return
`None`.
If parsing fails, return None. If either MIME type or data is missing, return None.
Example:
```python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
```
.. code-block:: python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
"""
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
match = re.match(regex, uri)
@@ -139,7 +140,7 @@ def _normalize_messages(
directly; this may change in the future
- LangChain v0 standard content blocks for backward compatibility
!!! warning "Behavior changed in `langchain-core` 1.0.0"
!!! warning "Behavior changed in 1.0.0"
In previous versions, this function returned messages in LangChain v0 format.
Now, it returns messages in LangChain v1 format, which upgraded chat models now
expect to receive when passing back in message history. For backward
@@ -149,48 +150,48 @@ def _normalize_messages(
`URLContentBlock`:
```python
{
mime_type: NotRequired[str]
type: Literal['image', 'audio', 'file'],
source_type: Literal['url'],
url: str,
}
```
.. codeblock::
{
mime_type: NotRequired[str]
type: Literal['image', 'audio', 'file'],
source_type: Literal['url'],
url: str,
}
`Base64ContentBlock`:
```python
{
mime_type: NotRequired[str]
type: Literal['image', 'audio', 'file'],
source_type: Literal['base64'],
data: str,
}
```
.. codeblock::
{
mime_type: NotRequired[str]
type: Literal['image', 'audio', 'file'],
source_type: Literal['base64'],
data: str,
}
`IDContentBlock`:
(In practice, this was never used)
```python
{
type: Literal["image", "audio", "file"],
source_type: Literal["id"],
id: str,
}
```
.. codeblock::
{
type: Literal['image', 'audio', 'file'],
source_type: Literal['id'],
id: str,
}
`PlainTextContentBlock`:
```python
{
mime_type: NotRequired[str]
type: Literal['file'],
source_type: Literal['text'],
url: str,
}
```
.. codeblock::
{
mime_type: NotRequired[str]
type: Literal['file'],
source_type: Literal['text'],
url: str,
}
If a v1 message is passed in, it will be returned as-is, meaning it is safe to
always pass in v1 messages to this function for assurance.

View File

@@ -96,16 +96,9 @@ def _get_token_ids_default_method(text: str) -> list[int]:
LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
"""Input to a language model."""
LanguageModelOutput = BaseMessage | str
"""Output from a language model."""
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
"""Input/output interface for a language model."""
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
"""Type variable for the output of a language model."""
def _get_verbosity() -> bool:
@@ -130,20 +123,16 @@ class BaseLanguageModel(
* If instance of `BaseCache`, will use the provided cache.
Caching is not currently supported for streaming methods of models.
"""
"""
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to add to the run trace."""
tags: list[str] | None = Field(default=None, exclude=True)
"""Tags to add to the run trace."""
metadata: dict[str, Any] | None = Field(default=None, exclude=True)
"""Metadata to add to the run trace."""
custom_get_token_ids: Callable[[str], list[int]] | None = Field(
default=None, exclude=True
)
@@ -157,7 +146,7 @@ class BaseLanguageModel(
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
"""If verbose is `None`, set it.
This allows users to pass in `None` as verbose to access the global setting.
This allows users to pass in None as verbose to access the global setting.
Args:
verbose: The verbosity setting to use.
@@ -197,29 +186,22 @@ class BaseLanguageModel(
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of `PromptValue` objects.
A `PromptValue` is an object that can be converted to match the format
of any language model (string for pure text generation models and
`BaseMessage` objects for chat models).
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
prompts: List of PromptValues. A PromptValue is an object that can be
converted to match the format of any language model (string for pure
text generation models and BaseMessages for chat models).
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
An `LLMResult`, which contains a list of candidate `Generation` objects for
each input prompt and additional model provider-specific output.
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
@@ -241,29 +223,22 @@ class BaseLanguageModel(
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of `PromptValue` objects.
A `PromptValue` is an object that can be converted to match the format
of any language model (string for pure text generation models and
`BaseMessage` objects for chat models).
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
prompts: List of PromptValues. A PromptValue is an object that can be
converted to match the format of any language model (string for pure
text generation models and BaseMessages for chat models).
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
An `LLMResult`, which contains a list of candidate `Generation` objects for
each input prompt and additional model provider-specific output.
An `LLMResult`, which contains a list of candidate Generations for each
input prompt and additional model provider-specific output.
"""
@@ -281,14 +256,15 @@ class BaseLanguageModel(
return self.lc_attributes
def get_token_ids(self, text: str) -> list[int]:
"""Return the ordered IDs of the tokens in a text.
"""Return the ordered ids of the tokens in a text.
Args:
text: The string input to tokenize.
Returns:
A list of IDs corresponding to the tokens in the text, in order they occur
in the text.
A list of ids corresponding to the tokens in the text, in order they occur
in the text.
"""
if self.custom_get_token_ids is not None:
return self.custom_get_token_ids(text)

View File

@@ -15,7 +15,6 @@ from typing import TYPE_CHECKING, Any, Literal, cast
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import override
from langchain_core._api.beta_decorator import beta
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
AsyncCallbackManager,
@@ -76,8 +75,6 @@ from langchain_core.utils.utils import LC_ID_PREFIX, from_env
if TYPE_CHECKING:
import uuid
from langchain_model_profiles import ModelProfile # type: ignore[import-untyped]
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.tools import BaseTool
@@ -111,7 +108,7 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
"""Format messages for tracing in `on_chat_model_start`.
"""Format messages for tracing in ``on_chat_model_start``.
- Update image content blocks to OpenAI Chat Completions format (backward
compatibility).
@@ -188,7 +185,7 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
ValueError: If no generations are found in the stream.
Returns:
Chat result.
ChatResult: Chat result.
"""
generation = next(stream, None)
@@ -216,7 +213,7 @@ async def agenerate_from_stream(
stream: Iterator of `ChatGenerationChunk`.
Returns:
Chat result.
ChatResult: Chat result.
"""
chunks = [chunk async for chunk in stream]
@@ -243,52 +240,79 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
r"""Base class for chat models.
"""Base class for chat models.
Key imperative methods:
Methods that actually call the underlying model.
This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| Method | Input | Output | Description |
+===========================+================================================================+=====================================================================+==================================================================================================+
| `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| Method | Input | Output | Description |
| ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
| `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
| `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
| `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
| `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
| `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
| `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
| `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
| `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
| `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
Key declarative methods:
Methods for creating another `Runnable` using the chat model.
Methods for creating another Runnable using the ChatModel.
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
| Method | Description |
+==================================+===========================================================================================================+
| `bind_tools` | Create ChatModel that can call tools. |
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
| `with_structured_output` | Create wrapper that structures model output using schema. |
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
| `with_retry` | Create wrapper that retries model calls on failure. |
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
| Method | Description |
| ---------------------------- | ------------------------------------------------------------------------------------------ |
| `bind_tools` | Create chat model that can call tools. |
| `with_structured_output` | Create wrapper that structures model output using schema. |
| `with_retry` | Create wrapper that retries model calls on failure. |
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
Creating custom chat model:
Custom chat model implementations should inherit from this class.
Please reference the table below for information about which
methods and properties are required or optional for implementations.
| Method/Property | Description | Required |
| -------------------------------- | ------------------------------------------------------------------ | ----------------- |
+----------------------------------+--------------------------------------------------------------------+-------------------+
| Method/Property | Description | Required/Optional |
+==================================+====================================================================+===================+
| `_generate` | Use to generate a chat result from a prompt | Required |
+----------------------------------+--------------------------------------------------------------------+-------------------+
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
+----------------------------------+--------------------------------------------------------------------+-------------------+
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
+----------------------------------+--------------------------------------------------------------------+-------------------+
| `_stream` | Use to implement streaming | Optional |
+----------------------------------+--------------------------------------------------------------------+-------------------+
| `_agenerate` | Use to implement a native async method | Optional |
+----------------------------------+--------------------------------------------------------------------+-------------------+
| `_astream` | Use to implement async version of `_stream` | Optional |
+----------------------------------+--------------------------------------------------------------------+-------------------+
Follow the guide for more information on how to implement a custom Chat Model:
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
""" # noqa: E501
@@ -303,9 +327,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
- If `True`, will always bypass streaming case.
- If `'tool_calling'`, will bypass streaming case only when the model is called
with a `tools` keyword argument. In other words, LangChain will automatically
switch to non-streaming behavior (`invoke`) only when the tools argument is
provided. This offers the best of both worlds.
with a `tools` keyword argument. In other words, LangChain will automatically
switch to non-streaming behavior (`invoke`) only when the tools argument is
provided. This offers the best of both worlds.
- If `False` (Default), will always use streaming case if available.
The main reason for this flag is that code might be written using `stream` and
@@ -318,21 +342,20 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
)
"""Version of `AIMessage` output format to store in message content.
`AIMessage.content_blocks` will lazily parse the contents of `content` into a
`AIMessage.content_blocks` will lazily parse the contents of ``content`` into a
standard format. This flag can be used to additionally store the standard format
in message content, e.g., for serialization purposes.
Supported values:
- `'v0'`: provider-specific format in content (can lazily-parse with
`content_blocks`)
- `'v1'`: standardized format in content (consistent with `content_blocks`)
`.content_blocks`)
- `'v1'`: standardized format in content (consistent with `.content_blocks`)
Partner packages (e.g.,
[`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
field to roll out new content formats in a backward-compatible way.
Partner packages (e.g., `langchain-openai`) can also use this field to roll out
new content formats in a backward-compatible way.
!!! version-added "Added in `langchain-core` 1.0"
!!! version-added "Added in version 1.0"
"""
@@ -841,29 +864,24 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
type (e.g., pure text completion models vs chat models).
Args:
messages: List of list of messages.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
tags: The tags to apply.
metadata: The metadata to apply.
run_name: The name of the run.
run_id: The ID of the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
An `LLMResult`, which contains a list of candidate `Generations` for each
input prompt and additional model provider-specific output.
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
ls_structured_output_format = kwargs.pop(
@@ -964,29 +982,24 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
type (e.g., pure text completion models vs chat models).
Args:
messages: List of list of messages.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
tags: The tags to apply.
metadata: The metadata to apply.
run_name: The name of the run.
run_id: The ID of the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
An `LLMResult`, which contains a list of candidate `Generations` for each
input prompt and additional model provider-specific output.
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
ls_structured_output_format = kwargs.pop(
@@ -1515,136 +1528,125 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
Args:
schema: The output schema. Can be passed in as:
- An OpenAI function/tool schema,
- A JSON Schema,
- A `TypedDict` class,
- Or a Pydantic class.
- an OpenAI function/tool schema,
- a JSON Schema,
- a `TypedDict` class,
- or a Pydantic class.
If `schema` is a Pydantic class then the model output will be a
If ``schema`` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
for more on how to properly specify types and descriptions of
schema fields when specifying a Pydantic or `TypedDict` class.
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
If `False` then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If `True`
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
Raises:
ValueError: If there are any unsupported `kwargs`.
ValueError: If there are any unsupported ``kwargs``.
NotImplementedError: If the model does not implement
`with_structured_output()`.
``with_structured_output()``.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
an instance of ``schema`` (i.e., a Pydantic object).
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
Example: Pydantic schema (`include_raw=False`):
If ``include_raw`` is True, then Runnable outputs a dict with keys:
```python
from pydantic import BaseModel
- ``'raw'``: BaseMessage
- ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
- ``'parsing_error'``: BaseException | None
Example: Pydantic schema (include_raw=False):
.. code-block:: python
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
answer: str
justification: str
model = ChatModel(model="model-name", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
llm = ChatModel(model="model-name", temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
Example: Pydantic schema (`include_raw=True`):
Example: Pydantic schema (include_raw=True):
.. code-block:: python
```python
from pydantic import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
answer: str
justification: str
model = ChatModel(model="model-name", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, include_raw=True
)
llm = ChatModel(model="model-name", temperature=0)
structured_llm = llm.with_structured_output(
AnswerWithJustification, include_raw=True
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```
structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
Example: `dict` schema (`include_raw=False`):
Example: Dict schema (include_raw=False):
.. code-block:: python
```python
from pydantic import BaseModel
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel
from langchain_core.utils.function_calling import convert_to_openai_tool
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
answer: str
justification: str
dict_schema = convert_to_openai_tool(AnswerWithJustification)
model = ChatModel(model="model-name", temperature=0)
structured_model = model.with_structured_output(dict_schema)
dict_schema = convert_to_openai_tool(AnswerWithJustification)
llm = ChatModel(model="model-name", temperature=0)
structured_llm = llm.with_structured_output(dict_schema)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
!!! warning "Behavior changed in `langchain-core` 0.2.26"
Added support for `TypedDict` class.
!!! warning "Behavior changed in 0.2.26"
Added support for TypedDict class.
""" # noqa: E501
_ = kwargs.pop("method", None)
@@ -1685,47 +1687,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser
@property
@beta()
def profile(self) -> ModelProfile:
"""Return profiling information for the model.
This property relies on the `langchain-model-profiles` package to retrieve chat
model capabilities, such as context window sizes and supported features.
Raises:
ImportError: If `langchain-model-profiles` is not installed.
Returns:
A `ModelProfile` object containing profiling information for the model.
"""
try:
from langchain_model_profiles import get_model_profile # noqa: PLC0415
except ImportError as err:
informative_error_message = (
"To access model profiling information, please install the "
"`langchain-model-profiles` package: "
"`pip install langchain-model-profiles`."
)
raise ImportError(informative_error_message) from err
provider_id = self._llm_type
model_name = (
# Model name is not standardized across integrations. New integrations
# should prefer `model`.
getattr(self, "model", None)
or getattr(self, "model_name", None)
or getattr(self, "model_id", "")
)
return get_model_profile(provider_id, model_name) or {}
class SimpleChatModel(BaseChatModel):
"""Simplified implementation for a chat model to inherit from.
!!! note
This implementation is primarily here for backwards compatibility. For new
implementations, please use `BaseChatModel` directly.
implementations, please use ``BaseChatModel`` directly.
"""
@@ -1777,12 +1745,9 @@ def _gen_info_and_msg_metadata(
}
_MAX_CLEANUP_DEPTH = 100
def _cleanup_llm_representation(serialized: Any, depth: int) -> None:
"""Remove non-serializable objects from a serialized object."""
if depth > _MAX_CLEANUP_DEPTH: # Don't cooperate for pathological cases
if depth > 100: # Don't cooperate for pathological cases
return
if not isinstance(serialized, dict):

View File

@@ -1,4 +1,4 @@
"""Fake chat models for testing purposes."""
"""Fake ChatModel for testing purposes."""
import asyncio
import re
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
class FakeMessagesListChatModel(BaseChatModel):
"""Fake chat model for testing purposes."""
"""Fake ``ChatModel`` for testing purposes."""
responses: list[BaseMessage]
"""List of responses to **cycle** through in order."""
@@ -57,7 +57,7 @@ class FakeListChatModelError(Exception):
class FakeListChatModel(SimpleChatModel):
"""Fake chat model for testing purposes."""
"""Fake ChatModel for testing purposes."""
responses: list[str]
"""List of responses to **cycle** through in order."""
@@ -228,10 +228,10 @@ class GenericFakeChatModel(BaseChatModel):
"""Generic fake chat model that can be used to test the chat model interface.
* Chat model should be usable in both sync and async tests
* Invokes `on_llm_new_token` to allow for testing of callback related code for new
tokens.
* Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
tokens.
* Includes logic to break messages into message chunk to facilitate testing of
streaming.
streaming.
"""
@@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel):
to make the interface more generic if needed.
!!! note
if you want to pass a list, you can use `iter` to convert it to an iterator.
if you want to pass a list, you can use ``iter`` to convert it to an iterator.
!!! warning
Streaming is not implemented yet. We should try to implement it in the future by

View File

@@ -1,7 +1,4 @@
"""Base interface for traditional large language models (LLMs) to expose.
These are traditionally older models (newer models generally are chat models).
"""
"""Base interface for large language models to expose."""
from __future__ import annotations
@@ -77,8 +74,8 @@ def create_base_retry_decorator(
Args:
error_types: List of error types to retry on.
max_retries: Number of retries.
run_manager: Callback manager for the run.
max_retries: Number of retries. Default is 1.
run_manager: Callback manager for the run. Default is None.
Returns:
A retry decorator.
@@ -94,17 +91,13 @@ def create_base_retry_decorator(
if isinstance(run_manager, AsyncCallbackManagerForLLMRun):
coro = run_manager.on_retry(retry_state)
try:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
asyncio.run(coro)
loop = asyncio.get_event_loop()
if loop.is_running():
# TODO: Fix RUF006 - this task should have a reference
# and be awaited somewhere
loop.create_task(coro) # noqa: RUF006
else:
if loop.is_running():
# TODO: Fix RUF006 - this task should have a reference
# and be awaited somewhere
loop.create_task(coro) # noqa: RUF006
else:
asyncio.run(coro)
asyncio.run(coro)
except Exception as e:
_log_error_once(f"Error in on_retry: {e}")
else:
@@ -160,7 +153,7 @@ def get_prompts(
Args:
params: Dictionary of parameters.
prompts: List of prompts.
cache: Cache object.
cache: Cache object. Default is None.
Returns:
A tuple of existing prompts, llm_string, missing prompt indexes,
@@ -196,7 +189,7 @@ async def aget_prompts(
Args:
params: Dictionary of parameters.
prompts: List of prompts.
cache: Cache object.
cache: Cache object. Default is None.
Returns:
A tuple of existing prompts, llm_string, missing prompt indexes,
@@ -651,12 +644,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
Args:
prompts: The prompts to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
If stop tokens are not supported consider raising `NotImplementedError`.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
If stop tokens are not supported consider raising NotImplementedError.
run_manager: Callback manager for the run.
Returns:
@@ -674,12 +664,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
Args:
prompts: The prompts to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
If stop tokens are not supported consider raising `NotImplementedError`.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
If stop tokens are not supported consider raising NotImplementedError.
run_manager: Callback manager for the run.
Returns:
@@ -711,14 +698,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
run_manager: Callback manager for the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Yields:
Generation chunks.
@@ -740,14 +724,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
run_manager: Callback manager for the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Yields:
Generation chunks.
@@ -854,18 +835,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of string prompts.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
tags: List of tags to associate with each prompt. If provided, the length
of the list must match the length of the prompts list.
metadata: List of metadata dictionaries to associate with each prompt. If
@@ -875,18 +852,17 @@ class BaseLLM(BaseLanguageModel[str], ABC):
length of the list must match the length of the prompts list.
run_id: List of run IDs to associate with each prompt. If provided, the
length of the list must match the length of the prompts list.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Raises:
ValueError: If prompts is not a list.
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
`run_name` (if provided) does not match the length of prompts.
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
``run_name`` (if provided) does not match the length of prompts.
Returns:
An `LLMResult`, which contains a list of candidate `Generations` for each
input prompt and additional model provider-specific output.
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
if not isinstance(prompts, list):
msg = (
@@ -1129,18 +1105,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of string prompts.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
tags: List of tags to associate with each prompt. If provided, the length
of the list must match the length of the prompts list.
metadata: List of metadata dictionaries to associate with each prompt. If
@@ -1150,17 +1122,16 @@ class BaseLLM(BaseLanguageModel[str], ABC):
length of the list must match the length of the prompts list.
run_id: List of run IDs to associate with each prompt. If provided, the
length of the list must match the length of the prompts list.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Raises:
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
`run_name` (if provided) does not match the length of prompts.
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
``run_name`` (if provided) does not match the length of prompts.
Returns:
An `LLMResult`, which contains a list of candidate `Generations` for each
input prompt and additional model provider-specific output.
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
if isinstance(metadata, list):
metadata = [
@@ -1369,9 +1340,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
ValueError: If the file path is not a string or Path object.
Example:
```python
llm.save(file_path="path/llm.yaml")
```
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
save_path = Path(file_path)
@@ -1416,6 +1389,11 @@ class LLM(BaseLLM):
`astream` will use `_astream` if provided, otherwise it will implement
a fallback behavior that will use `_stream` if `_stream` is implemented,
and use `_acall` if `_stream` is not implemented.
Please see the following guide for more information on how to
implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
"""
@abstractmethod
@@ -1432,16 +1410,12 @@ class LLM(BaseLLM):
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
If stop tokens are not supported consider raising `NotImplementedError`.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
If stop tokens are not supported consider raising NotImplementedError.
run_manager: Callback manager for the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
The model output as a string. SHOULD NOT include the prompt.
@@ -1462,16 +1436,12 @@ class LLM(BaseLLM):
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
If stop tokens are not supported consider raising `NotImplementedError`.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
If stop tokens are not supported consider raising NotImplementedError.
run_manager: Callback manager for the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
The model output as a string. SHOULD NOT include the prompt.

View File

@@ -17,7 +17,7 @@ def default(obj: Any) -> Any:
obj: The object to serialize to json if it is a Serializable object.
Returns:
A JSON serializable object or a SerializedNotImplemented object.
A json serializable object or a SerializedNotImplemented object.
"""
if isinstance(obj, Serializable):
return obj.to_json()
@@ -38,16 +38,17 @@ def _dump_pydantic_models(obj: Any) -> Any:
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
"""Return a JSON string representation of an object.
"""Return a json string representation of an object.
Args:
obj: The object to dump.
pretty: Whether to pretty print the json. If `True`, the json will be
indented with 2 spaces (if no indent is provided as part of `kwargs`).
**kwargs: Additional arguments to pass to `json.dumps`
pretty: Whether to pretty print the json. If true, the json will be
indented with 2 spaces (if no indent is provided as part of kwargs).
Default is False.
kwargs: Additional arguments to pass to json.dumps
Returns:
A JSON string representation of the object.
A json string representation of the object.
Raises:
ValueError: If `default` is passed as a kwarg.
@@ -71,12 +72,14 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
def dumpd(obj: Any) -> Any:
"""Return a dict representation of an object.
!!! note
Unfortunately this function is not as efficient as it could be because it first
dumps the object to a json string and then loads it back into a dictionary.
Args:
obj: The object to dump.
Returns:
Dictionary that can be serialized to json using `json.dumps`.
dictionary that can be serialized to json using json.dumps
"""
# Unfortunately this function is not as efficient as it could be because it first
# dumps the object to a json string and then loads it back into a dictionary.
return json.loads(dumps(obj))

View File

@@ -63,13 +63,16 @@ class Reviver:
Args:
secrets_map: A map of secrets to load. If a secret is not found in
the map, it will be loaded from the environment if `secrets_from_env`
is True.
is True. Defaults to `None`.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
to allow to be deserialized. Defaults to `None`.
secrets_from_env: Whether to load secrets from the environment.
Defaults to `True`.
additional_import_mappings: A dictionary of additional namespace mappings
You can use this to override default mappings or add new mappings.
Defaults to `None`.
ignore_unserializable_fields: Whether to ignore unserializable fields.
Defaults to `False`.
"""
self.secrets_from_env = secrets_from_env
self.secrets_map = secrets_map or {}
@@ -104,7 +107,7 @@ class Reviver:
ValueError: If trying to deserialize something that cannot
be deserialized in the current version of langchain-core.
NotImplementedError: If the object is not implemented and
`ignore_unserializable_fields` is False.
``ignore_unserializable_fields`` is False.
"""
if (
value.get("lc") == 1
@@ -197,13 +200,16 @@ def loads(
text: The string to load.
secrets_map: A map of secrets to load. If a secret is not found in
the map, it will be loaded from the environment if `secrets_from_env`
is True.
is True. Defaults to `None`.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
to allow to be deserialized. Defaults to `None`.
secrets_from_env: Whether to load secrets from the environment.
Defaults to `True`.
additional_import_mappings: A dictionary of additional namespace mappings
You can use this to override default mappings or add new mappings.
Defaults to `None`.
ignore_unserializable_fields: Whether to ignore unserializable fields.
Defaults to `False`.
Returns:
Revived LangChain objects.
@@ -239,13 +245,16 @@ def load(
obj: The object to load.
secrets_map: A map of secrets to load. If a secret is not found in
the map, it will be loaded from the environment if `secrets_from_env`
is True.
is True. Defaults to `None`.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
to allow to be deserialized. Defaults to `None`.
secrets_from_env: Whether to load secrets from the environment.
Defaults to `True`.
additional_import_mappings: A dictionary of additional namespace mappings
You can use this to override default mappings or add new mappings.
Defaults to `None`.
ignore_unserializable_fields: Whether to ignore unserializable fields.
Defaults to `False`.
Returns:
Revived LangChain objects.
@@ -265,8 +274,6 @@ def load(
return reviver(loaded_obj)
if isinstance(obj, list):
return [_load(o) for o in obj]
if isinstance(obj, str) and obj in reviver.secrets_map:
return reviver.secrets_map[obj]
return obj
return _load(obj)

View File

@@ -25,16 +25,16 @@ class BaseSerialized(TypedDict):
id: list[str]
"""The unique identifier of the object."""
name: NotRequired[str]
"""The name of the object."""
"""The name of the object. Optional."""
graph: NotRequired[dict[str, Any]]
"""The graph of the object."""
"""The graph of the object. Optional."""
class SerializedConstructor(BaseSerialized):
"""Serialized constructor."""
type: Literal["constructor"]
"""The type of the object. Must be `'constructor'`."""
"""The type of the object. Must be ``'constructor'``."""
kwargs: dict[str, Any]
"""The constructor arguments."""
@@ -43,16 +43,16 @@ class SerializedSecret(BaseSerialized):
"""Serialized secret."""
type: Literal["secret"]
"""The type of the object. Must be `'secret'`."""
"""The type of the object. Must be ``'secret'``."""
class SerializedNotImplemented(BaseSerialized):
"""Serialized not implemented."""
type: Literal["not_implemented"]
"""The type of the object. Must be `'not_implemented'`."""
"""The type of the object. Must be ``'not_implemented'``."""
repr: str | None
"""The representation of the object."""
"""The representation of the object. Optional."""
def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
@@ -61,7 +61,7 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
Args:
value: The value.
key: The key.
model: The Pydantic model.
model: The pydantic model.
Returns:
Whether the value is different from the default.
@@ -93,21 +93,18 @@ class Serializable(BaseModel, ABC):
It relies on the following methods and properties:
- `is_lc_serializable`: Is this class serializable?
By design, even if a class inherits from `Serializable`, it is not serializable
by default. This is to prevent accidental serialization of objects that should
not be serialized.
- `get_lc_namespace`: Get the namespace of the LangChain object.
During deserialization, this namespace is used to identify
the correct class to instantiate.
Please see the `Reviver` class in `langchain_core.load.load` for more details.
During deserialization an additional mapping is handle classes that have moved
or been renamed across package versions.
- `lc_secrets`: A map of constructor argument names to secret ids.
- `lc_attributes`: List of additional attribute names that should be included
as part of the serialized representation.
By design, even if a class inherits from Serializable, it is not serializable by
default. This is to prevent accidental serialization of objects that should not
be serialized.
- ``get_lc_namespace``: Get the namespace of the langchain object.
During deserialization, this namespace is used to identify
the correct class to instantiate.
Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
During deserialization an additional mapping is handle
classes that have moved or been renamed across package versions.
- ``lc_secrets``: A map of constructor argument names to secret ids.
- ``lc_attributes``: List of additional attribute names that should be included
as part of the serialized representation.
"""
# Remove default BaseModel init docstring.
@@ -119,24 +116,24 @@ class Serializable(BaseModel, ABC):
def is_lc_serializable(cls) -> bool:
"""Is this class serializable?
By design, even if a class inherits from `Serializable`, it is not serializable
by default. This is to prevent accidental serialization of objects that should
not be serialized.
By design, even if a class inherits from Serializable, it is not serializable by
default. This is to prevent accidental serialization of objects that should not
be serialized.
Returns:
Whether the class is serializable. Default is `False`.
Whether the class is serializable. Default is False.
"""
return False
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
"""Get the namespace of the langchain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is `["langchain", "llms", "openai"]`
namespace is ["langchain", "llms", "openai"]
Returns:
The namespace.
The namespace as a list of strings.
"""
return cls.__module__.split(".")
@@ -144,7 +141,8 @@ class Serializable(BaseModel, ABC):
def lc_secrets(self) -> dict[str, str]:
"""A map of constructor argument names to secret ids.
For example, `{"openai_api_key": "OPENAI_API_KEY"}`
For example,
{"openai_api_key": "OPENAI_API_KEY"}
"""
return {}
@@ -153,7 +151,6 @@ class Serializable(BaseModel, ABC):
"""List of attribute names that should be included in the serialized kwargs.
These attributes must be accepted by the constructor.
Default is an empty dictionary.
"""
return {}
@@ -197,7 +194,7 @@ class Serializable(BaseModel, ABC):
ValueError: If the class has deprecated attributes.
Returns:
A JSON serializable object or a `SerializedNotImplemented` object.
A json serializable object or a SerializedNotImplemented object.
"""
if not self.is_lc_serializable():
return self.to_json_not_implemented()
@@ -272,7 +269,7 @@ class Serializable(BaseModel, ABC):
"""Serialize a "not implemented" object.
Returns:
`SerializedNotImplemented`.
SerializedNotImplemented.
"""
return to_json_not_implemented(self)
@@ -287,8 +284,8 @@ def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool:
Returns:
Whether the field is useful. If the field is required, it is useful.
If the field is not required, it is useful if the value is not `None`.
If the field is not required and the value is `None`, it is useful if the
If the field is not required, it is useful if the value is not None.
If the field is not required and the value is None, it is useful if the
default value is different from the value.
"""
field = type(inst).model_fields.get(key)
@@ -347,10 +344,10 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
"""Serialize a "not implemented" object.
Args:
obj: Object to serialize.
obj: object to serialize.
Returns:
`SerializedNotImplemented`
SerializedNotImplemented
"""
id_: list[str] = []
try:

View File

@@ -10,10 +10,11 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any
from pydantic import ConfigDict
from langchain_core._api import deprecated
from langchain_core.load.serializable import Serializable
from langchain_core.runnables import run_in_executor
from pydantic import ConfigDict
@deprecated(
@@ -35,25 +36,28 @@ class BaseMemory(Serializable, ABC):
the latest input.
Example:
```python
class SimpleMemory(BaseMemory):
memories: dict[str, Any] = dict()
.. code-block:: python
@property
def memory_variables(self) -> list[str]:
return list(self.memories.keys())
class SimpleMemory(BaseMemory):
memories: dict[str, Any] = dict()
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
return self.memories
@property
def memory_variables(self) -> list[str]:
return list(self.memories.keys())
def save_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
pass
def load_memory_variables(
self, inputs: dict[str, Any]
) -> dict[str, str]:
return self.memories
def save_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
pass
def clear(self) -> None:
pass
def clear(self) -> None:
pass
```
"""
model_config = ConfigDict(

View File

@@ -1,4 +1,19 @@
"""**Messages** are objects used in prompts and chat conversations."""
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
@@ -9,9 +24,6 @@ if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
InputTokenDetails,
OutputTokenDetails,
UsageMetadata,
)
from langchain_core.messages.base import (
BaseMessage,
@@ -90,12 +102,10 @@ __all__ = (
"HumanMessage",
"HumanMessageChunk",
"ImageContentBlock",
"InputTokenDetails",
"InvalidToolCall",
"MessageLikeRepresentation",
"NonStandardAnnotation",
"NonStandardContentBlock",
"OutputTokenDetails",
"PlainTextContentBlock",
"ReasoningContentBlock",
"RemoveMessage",
@@ -109,7 +119,6 @@ __all__ = (
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"UsageMetadata",
"VideoContentBlock",
"_message_from_dict",
"convert_to_messages",
@@ -151,7 +160,6 @@ _dynamic_imports = {
"HumanMessageChunk": "human",
"NonStandardAnnotation": "content",
"NonStandardContentBlock": "content",
"OutputTokenDetails": "ai",
"PlainTextContentBlock": "content",
"ReasoningContentBlock": "content",
"RemoveMessage": "modifier",
@@ -161,14 +169,12 @@ _dynamic_imports = {
"SystemMessage": "system",
"SystemMessageChunk": "system",
"ImageContentBlock": "content",
"InputTokenDetails": "ai",
"InvalidToolCall": "tool",
"TextContentBlock": "content",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"UsageMetadata": "ai",
"VideoContentBlock": "content",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",

View File

@@ -40,18 +40,18 @@ class InputTokenDetails(TypedDict, total=False):
Does *not* need to sum to full input token count. Does *not* need to have all keys.
Example:
```python
{
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
}
```
.. code-block:: python
{
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
}
!!! version-added "Added in version 0.3.9"
May also hold extra provider-specific keys.
!!! version-added "Added in `langchain-core` 0.3.9"
"""
audio: int
@@ -76,16 +76,14 @@ class OutputTokenDetails(TypedDict, total=False):
Does *not* need to sum to full output token count. Does *not* need to have all keys.
Example:
```python
{
"audio": 10,
"reasoning": 200,
}
```
.. code-block:: python
May also hold extra provider-specific keys.
{
"audio": 10,
"reasoning": 200,
}
!!! version-added "Added in `langchain-core` 0.3.9"
!!! version-added "Added in version 0.3.9"
"""
@@ -106,30 +104,26 @@ class UsageMetadata(TypedDict):
This is a standard representation of token usage that is consistent across models.
Example:
```python
{
"input_tokens": 350,
"output_tokens": 240,
"total_tokens": 590,
"input_token_details": {
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
},
"output_token_details": {
"audio": 10,
"reasoning": 200,
},
}
```
.. code-block:: python
!!! warning "Behavior changed in `langchain-core` 0.3.9"
{
"input_tokens": 350,
"output_tokens": 240,
"total_tokens": 590,
"input_token_details": {
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
},
"output_token_details": {
"audio": 10,
"reasoning": 200,
},
}
!!! warning "Behavior changed in 0.3.9"
Added `input_token_details` and `output_token_details`.
!!! note "LangSmith SDK"
The LangSmith SDK also has a `UsageMetadata` class. While the two share fields,
LangSmith's `UsageMetadata` has additional fields to capture cost information
used by the LangSmith platform.
"""
input_tokens: int
@@ -137,7 +131,7 @@ class UsageMetadata(TypedDict):
output_tokens: int
"""Count of output (or completion) tokens. Sum of all output token types."""
total_tokens: int
"""Total token count. Sum of `input_tokens` + `output_tokens`."""
"""Total token count. Sum of input_tokens + output_tokens."""
input_token_details: NotRequired[InputTokenDetails]
"""Breakdown of input token counts.
@@ -147,31 +141,34 @@ class UsageMetadata(TypedDict):
"""Breakdown of output token counts.
Does *not* need to sum to full output token count. Does *not* need to have all keys.
"""
class AIMessage(BaseMessage):
"""Message from an AI.
An `AIMessage` is returned from a chat model as a response to a prompt.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model and standardized fields
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
"""
tool_calls: list[ToolCall] = []
"""If present, tool calls associated with the message."""
"""If provided, tool calls associated with the message."""
invalid_tool_calls: list[InvalidToolCall] = []
"""If present, tool calls with parsing errors associated with the message."""
"""If provided, tool calls with parsing errors associated with the message."""
usage_metadata: UsageMetadata | None = None
"""If present, usage metadata for a message, such as token counts.
"""If provided, usage metadata for a message, such as token counts.
This is a standard representation of token usage that is consistent across models.
"""
type: Literal["ai"] = "ai"
"""The type of the message (used for deserialization)."""
"""The type of the message (used for deserialization). Defaults to "ai"."""
@overload
def __init__(
@@ -194,14 +191,14 @@ class AIMessage(BaseMessage):
content_blocks: list[types.ContentBlock] | None = None,
**kwargs: Any,
) -> None:
"""Initialize an `AIMessage`.
"""Initialize `AIMessage`.
Specify `content` as positional arg or `content_blocks` for typing.
Specify ``content`` as positional arg or ``content_blocks`` for typing.
Args:
content: The content of the message.
content_blocks: Typed standard content.
**kwargs: Additional arguments to pass to the parent class.
kwargs: Additional arguments to pass to the parent class.
"""
if content_blocks is not None:
# If there are tool calls in content_blocks, but not in tool_calls, add them
@@ -220,11 +217,7 @@ class AIMessage(BaseMessage):
@property
def lc_attributes(self) -> dict:
"""Attributes to be serialized.
Includes all attributes, even if they are derived from other initialization
arguments.
"""
"""Attrs to be serialized even if they are derived from other init args."""
return {
"tool_calls": self.tool_calls,
"invalid_tool_calls": self.invalid_tool_calls,
@@ -232,7 +225,7 @@ class AIMessage(BaseMessage):
@property
def content_blocks(self) -> list[types.ContentBlock]:
"""Return standard, typed `ContentBlock` dicts from the message.
"""Return content blocks of the message.
If the message has a known model provider, use the provider-specific translator
first before falling back to best-effort parsing. For details, see the property
@@ -338,10 +331,11 @@ class AIMessage(BaseMessage):
@override
def pretty_repr(self, html: bool = False) -> str:
"""Return a pretty representation of the message for display.
"""Return a pretty representation of the message.
Args:
html: Whether to return an HTML-formatted string.
Defaults to `False`.
Returns:
A pretty representation of the message.
@@ -378,27 +372,31 @@ class AIMessage(BaseMessage):
class AIMessageChunk(AIMessage, BaseMessageChunk):
"""Message chunk from an AI (yielded when streaming)."""
"""Message chunk from an AI."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
"""The type of the message (used for deserialization)."""
"""The type of the message (used for deserialization).
Defaults to ``AIMessageChunk``.
"""
tool_call_chunks: list[ToolCallChunk] = []
"""If provided, tool call chunks associated with the message."""
chunk_position: Literal["last"] | None = None
"""Optional span represented by an aggregated `AIMessageChunk`.
"""Optional span represented by an aggregated AIMessageChunk.
If a chunk with `chunk_position="last"` is aggregated into a stream,
`tool_call_chunks` in message content will be parsed into `tool_calls`.
If a chunk with ``chunk_position="last"`` is aggregated into a stream,
``tool_call_chunks`` in message content will be parsed into `tool_calls`.
"""
@property
def lc_attributes(self) -> dict:
"""Attributes to be serialized, even if they are derived from other initialization args.""" # noqa: E501
"""Attrs to be serialized even if they are derived from other init args."""
return {
"tool_calls": self.tool_calls,
"invalid_tool_calls": self.invalid_tool_calls,
@@ -406,7 +404,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
@property
def content_blocks(self) -> list[types.ContentBlock]:
"""Return standard, typed `ContentBlock` dicts from the message."""
"""Return content blocks of the message."""
if self.response_metadata.get("output_version") == "v1":
return cast("list[types.ContentBlock]", self.content)
@@ -547,15 +545,12 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
and call_id in id_to_tc
):
self.content[idx] = cast("dict[str, Any]", id_to_tc[call_id])
if "extras" in block:
# mypy does not account for instance check for dict above
self.content[idx]["extras"] = block["extras"] # type: ignore[index]
return self
@model_validator(mode="after")
def init_server_tool_calls(self) -> Self:
"""Parse `server_tool_call_chunks`."""
"""Parse server_tool_call_chunks."""
if (
self.chunk_position == "last"
and self.response_metadata.get("output_version") == "v1"
@@ -601,14 +596,14 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
def add_ai_message_chunks(
left: AIMessageChunk, *others: AIMessageChunk
) -> AIMessageChunk:
"""Add multiple `AIMessageChunk`s together.
"""Add multiple ``AIMessageChunk``s together.
Args:
left: The first `AIMessageChunk`.
*others: Other `AIMessageChunk`s to add.
left: The first ``AIMessageChunk``.
*others: Other ``AIMessageChunk``s to add.
Returns:
The resulting `AIMessageChunk`.
The resulting ``AIMessageChunk``.
"""
content = merge_content(left.content, *(o.content for o in others))
@@ -655,13 +650,13 @@ def add_ai_message_chunks(
chunk_id = id_
break
else:
# second pass: prefer lc_run-* IDs over lc_* IDs
# second pass: prefer lc_run-* ids over lc_* ids
for id_ in candidates:
if id_ and id_.startswith(LC_ID_PREFIX):
chunk_id = id_
break
else:
# third pass: take any remaining ID (auto-generated lc_* IDs)
# third pass: take any remaining id (auto-generated lc_* ids)
for id_ in candidates:
if id_:
chunk_id = id_
@@ -686,42 +681,43 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
"""Recursively add two UsageMetadata objects.
Example:
```python
from langchain_core.messages.ai import add_usage
.. code-block:: python
left = UsageMetadata(
input_tokens=5,
output_tokens=0,
total_tokens=5,
input_token_details=InputTokenDetails(cache_read=3),
)
right = UsageMetadata(
input_tokens=0,
output_tokens=10,
total_tokens=10,
output_token_details=OutputTokenDetails(reasoning=4),
)
from langchain_core.messages.ai import add_usage
add_usage(left, right)
```
left = UsageMetadata(
input_tokens=5,
output_tokens=0,
total_tokens=5,
input_token_details=InputTokenDetails(cache_read=3),
)
right = UsageMetadata(
input_tokens=0,
output_tokens=10,
total_tokens=10,
output_token_details=OutputTokenDetails(reasoning=4),
)
add_usage(left, right)
results in
```python
UsageMetadata(
input_tokens=5,
output_tokens=10,
total_tokens=15,
input_token_details=InputTokenDetails(cache_read=3),
output_token_details=OutputTokenDetails(reasoning=4),
)
```
.. code-block:: python
UsageMetadata(
input_tokens=5,
output_tokens=10,
total_tokens=15,
input_token_details=InputTokenDetails(cache_read=3),
output_token_details=OutputTokenDetails(reasoning=4),
)
Args:
left: The first `UsageMetadata` object.
right: The second `UsageMetadata` object.
left: The first ``UsageMetadata`` object.
right: The second ``UsageMetadata`` object.
Returns:
The sum of the two `UsageMetadata` objects.
The sum of the two ``UsageMetadata`` objects.
"""
if not (left or right):
@@ -744,47 +740,48 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
def subtract_usage(
left: UsageMetadata | None, right: UsageMetadata | None
) -> UsageMetadata:
"""Recursively subtract two `UsageMetadata` objects.
"""Recursively subtract two ``UsageMetadata`` objects.
Token counts cannot be negative so the actual operation is `max(left - right, 0)`.
Token counts cannot be negative so the actual operation is ``max(left - right, 0)``.
Example:
```python
from langchain_core.messages.ai import subtract_usage
.. code-block:: python
left = UsageMetadata(
input_tokens=5,
output_tokens=10,
total_tokens=15,
input_token_details=InputTokenDetails(cache_read=4),
)
right = UsageMetadata(
input_tokens=3,
output_tokens=8,
total_tokens=11,
output_token_details=OutputTokenDetails(reasoning=4),
)
from langchain_core.messages.ai import subtract_usage
subtract_usage(left, right)
```
left = UsageMetadata(
input_tokens=5,
output_tokens=10,
total_tokens=15,
input_token_details=InputTokenDetails(cache_read=4),
)
right = UsageMetadata(
input_tokens=3,
output_tokens=8,
total_tokens=11,
output_token_details=OutputTokenDetails(reasoning=4),
)
subtract_usage(left, right)
results in
```python
UsageMetadata(
input_tokens=2,
output_tokens=2,
total_tokens=4,
input_token_details=InputTokenDetails(cache_read=4),
output_token_details=OutputTokenDetails(reasoning=0),
)
```
.. code-block:: python
UsageMetadata(
input_tokens=2,
output_tokens=2,
total_tokens=4,
input_token_details=InputTokenDetails(cache_read=4),
output_token_details=OutputTokenDetails(reasoning=0),
)
Args:
left: The first `UsageMetadata` object.
right: The second `UsageMetadata` object.
left: The first ``UsageMetadata`` object.
right: The second ``UsageMetadata`` object.
Returns:
The resulting `UsageMetadata` after subtraction.
The resulting ``UsageMetadata`` after subtraction.
"""
if not (left or right):

View File

@@ -48,13 +48,13 @@ class TextAccessor(str):
Exists to maintain backward compatibility while transitioning from method-based to
property-based text access in message objects. In LangChain <v1.0, message text was
accessed via `.text()` method calls. In v1.0=<, the preferred pattern is property
access via `.text`.
accessed via ``.text()`` method calls. In v1.0=<, the preferred pattern is property
access via ``.text``.
Rather than breaking existing code immediately, `TextAccessor` allows both
Rather than breaking existing code immediately, ``TextAccessor`` allows both
patterns:
- Modern property access: `message.text` (returns string directly)
- Legacy method access: `message.text()` (callable, emits deprecation warning)
- Modern property access: ``message.text`` (returns string directly)
- Legacy method access: ``message.text()`` (callable, emits deprecation warning)
"""
@@ -67,12 +67,12 @@ class TextAccessor(str):
def __call__(self) -> str:
"""Enable method-style text access for backward compatibility.
This method exists solely to support legacy code that calls `.text()`
as a method. New code should use property access (`.text`) instead.
This method exists solely to support legacy code that calls ``.text()``
as a method. New code should use property access (``.text``) instead.
!!! deprecated
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
Use `.text` as a property instead. This method will be removed in 2.0.0.
As of `langchain-core` 1.0.0, calling ``.text()`` as a method is deprecated.
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
Returns:
The string content, identical to property access.
@@ -92,15 +92,11 @@ class TextAccessor(str):
class BaseMessage(Serializable):
"""Base abstract message class.
Messages are the inputs and outputs of a chat model.
Examples include [`HumanMessage`][langchain.messages.HumanMessage],
[`AIMessage`][langchain.messages.AIMessage], and
[`SystemMessage`][langchain.messages.SystemMessage].
Messages are the inputs and outputs of a ``ChatModel``.
"""
content: str | list[str | dict]
"""The contents of the message."""
"""The string contents of the message."""
additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message.
@@ -163,14 +159,14 @@ class BaseMessage(Serializable):
content_blocks: list[types.ContentBlock] | None = None,
**kwargs: Any,
) -> None:
"""Initialize a `BaseMessage`.
"""Initialize `BaseMessage`.
Specify `content` as positional arg or `content_blocks` for typing.
Specify ``content`` as positional arg or ``content_blocks`` for typing.
Args:
content: The contents of the message.
content: The string contents of the message.
content_blocks: Typed standard content.
**kwargs: Additional arguments to pass to the parent class.
kwargs: Additional arguments to pass to the parent class.
"""
if content_blocks is not None:
super().__init__(content=content_blocks, **kwargs)
@@ -188,10 +184,10 @@ class BaseMessage(Serializable):
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
"""Get the namespace of the langchain object.
Returns:
`["langchain", "schema", "messages"]`
``["langchain", "schema", "messages"]``
"""
return ["langchain", "schema", "messages"]
@@ -199,7 +195,7 @@ class BaseMessage(Serializable):
def content_blocks(self) -> list[types.ContentBlock]:
r"""Load content blocks from the message content.
!!! version-added "Added in `langchain-core` 1.0.0"
!!! version-added "Added in version 1.0.0"
"""
# Needed here to avoid circular import, as these classes import BaseMessages
@@ -263,11 +259,11 @@ class BaseMessage(Serializable):
def text(self) -> TextAccessor:
"""Get the text content of the message as a string.
Can be used as both property (`message.text`) and method (`message.text()`).
Can be used as both property (``message.text``) and method (``message.text()``).
!!! deprecated
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
Use `.text` as a property instead. This method will be removed in 2.0.0.
As of langchain-core 1.0.0, calling ``.text()`` as a method is deprecated.
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
Returns:
The text content of the message.
@@ -311,7 +307,7 @@ class BaseMessage(Serializable):
Args:
html: Whether to format the message as HTML. If `True`, the message will be
formatted with HTML tags.
formatted with HTML tags. Default is False.
Returns:
A pretty representation of the message.
@@ -335,8 +331,8 @@ def merge_content(
"""Merge multiple message contents.
Args:
first_content: The first `content`. Can be a string or a list.
contents: The other `content`s. Can be a string or a list.
first_content: The first ``content``. Can be a string or a list.
contents: The other ``content``s. Can be a string or a list.
Returns:
The merged content.
@@ -392,9 +388,9 @@ class BaseMessageChunk(BaseMessage):
For example,
`AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
``AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")``
will give `AIMessageChunk(content="Hello World")`
will give ``AIMessageChunk(content="Hello World")``
"""
if isinstance(other, BaseMessageChunk):
@@ -444,7 +440,7 @@ def message_to_dict(message: BaseMessage) -> dict:
Returns:
Message as a dict. The dict will have a `type` key with the message type
and a `data` key with the message data as a dict.
and a ``data`` key with the message data as a dict.
"""
return {"type": message.type, "data": message.model_dump()}
@@ -468,7 +464,7 @@ def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
Args:
title: The title.
bold: Whether to bold the title.
bold: Whether to bold the title. Default is False.
Returns:
The title representation.

View File

@@ -1,7 +1,7 @@
"""Derivations of standard content blocks from provider content.
`AIMessage` will first attempt to use a provider-specific translator if
`model_provider` is set in `response_metadata` on the message. Consequently, each
``model_provider`` is set in `response_metadata` on the message. Consequently, each
provider translator must handle all possible content response types from the provider,
including text.
@@ -23,13 +23,13 @@ if TYPE_CHECKING:
PROVIDER_TRANSLATORS: dict[str, dict[str, Callable[..., list[types.ContentBlock]]]] = {}
"""Map model provider names to translator functions.
The dictionary maps provider names (e.g. `'openai'`, `'anthropic'`) to another
The dictionary maps provider names (e.g. ``'openai'``, ``'anthropic'``) to another
dictionary with two keys:
- `'translate_content'`: Function to translate `AIMessage` content.
- `'translate_content_chunk'`: Function to translate `AIMessageChunk` content.
- ``'translate_content'``: Function to translate `AIMessage` content.
- ``'translate_content_chunk'``: Function to translate ``AIMessageChunk`` content.
When calling `content_blocks` on an `AIMessage` or `AIMessageChunk`, if
`model_provider` is set in `response_metadata`, the corresponding translator
When calling `.content_blocks` on an `AIMessage` or ``AIMessageChunk``, if
``model_provider`` is set in `response_metadata`, the corresponding translator
functions will be used to parse the content into blocks. Otherwise, best-effort parsing
in `BaseMessage` will be used.
"""
@@ -43,9 +43,9 @@ def register_translator(
"""Register content translators for a provider in `PROVIDER_TRANSLATORS`.
Args:
provider: The model provider name (e.g. `'openai'`, `'anthropic'`).
provider: The model provider name (e.g. ``'openai'``, ``'anthropic'``).
translate_content: Function to translate `AIMessage` content.
translate_content_chunk: Function to translate `AIMessageChunk` content.
translate_content_chunk: Function to translate ``AIMessageChunk`` content.
"""
PROVIDER_TRANSLATORS[provider] = {
"translate_content": translate_content,
@@ -62,7 +62,7 @@ def get_translator(
provider: The model provider name.
Returns:
Dictionary with `'translate_content'` and `'translate_content_chunk'`
Dictionary with ``'translate_content'`` and ``'translate_content_chunk'``
functions, or None if no translator is registered for the provider. In such
case, best-effort parsing in `BaseMessage` will be used.
"""
@@ -72,10 +72,10 @@ def get_translator(
def _register_translators() -> None:
"""Register all translators in langchain-core.
A unit test ensures all modules in `block_translators` are represented here.
A unit test ensures all modules in ``block_translators`` are represented here.
For translators implemented outside langchain-core, they can be registered by
calling `register_translator` from within the integration package.
calling ``register_translator`` from within the integration package.
"""
from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415
_register_anthropic_translator,

View File

@@ -31,12 +31,12 @@ def _convert_to_v1_from_anthropic_input(
) -> list[types.ContentBlock]:
"""Convert Anthropic format blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a ``'non_standard'`` block with the original block stored in the ``value``
field. This function attempts to unpack those blocks and convert any blocks that
might be Anthropic format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
If conversion fails, the block is left as a ``'non_standard'`` block.
Args:
content: List of content blocks to process.

View File

@@ -35,12 +35,12 @@ def _convert_to_v1_from_converse_input(
) -> list[types.ContentBlock]:
"""Convert Bedrock Converse format blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a ``'non_standard'`` block with the original block stored in the ``value``
field. This function attempts to unpack those blocks and convert any blocks that
might be Converse format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
If conversion fails, the block is left as a ``'non_standard'`` block.
Args:
content: List of content blocks to process.

View File

@@ -105,12 +105,12 @@ def _convert_to_v1_from_genai_input(
Called when message isn't an `AIMessage` or `model_provider` isn't set on
`response_metadata`.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a ``'non_standard'`` block with the original block stored in the ``value``
field. This function attempts to unpack those blocks and convert any blocks that
might be GenAI format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
If conversion fails, the block is left as a ``'non_standard'`` block.
Args:
content: List of content blocks to process.
@@ -282,7 +282,7 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
standard content blocks for returning.
Args:
message: The `AIMessage` or `AIMessageChunk` to convert.
message: The AIMessage or AIMessageChunk to convert.
Returns:
List of standard content blocks derived from the message content.
@@ -368,7 +368,7 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
else:
# Assume it's raw base64 without data URI
try:
# Validate base64 and decode for MIME type detection
# Validate base64 and decode for mime type detection
decoded_bytes = base64.b64decode(url, validate=True)
image_url_b64_block = {
@@ -379,7 +379,7 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
try:
import filetype # type: ignore[import-not-found] # noqa: PLC0415
# Guess MIME type based on file bytes
# Guess mime type based on file bytes
mime_type = None
kind = filetype.guess(decoded_bytes)
if kind:
@@ -453,13 +453,10 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
"status": status, # type: ignore[typeddict-item]
"output": item.get("code_execution_result", ""),
}
server_tool_result_block["extras"] = {"block_type": item_type}
# Preserve original outcome in extras
if outcome is not None:
server_tool_result_block["extras"]["outcome"] = outcome
server_tool_result_block["extras"] = {"outcome": outcome}
converted_blocks.append(server_tool_result_block)
elif item_type == "text":
converted_blocks.append(cast("types.TextContentBlock", item))
else:
# Unknown type, preserve as non-standard
converted_blocks.append({"type": "non_standard", "value": item})

View File

@@ -1,9 +1,37 @@
"""Derivations of standard content blocks from Google (VertexAI) content."""
from langchain_core.messages.block_translators.google_genai import (
translate_content,
translate_content_chunk,
)
import warnings
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
WARNED = False
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
"""Derive standard content blocks from a message with Google (VertexAI) content."""
global WARNED # noqa: PLW0603
if not WARNED:
warning_message = (
"Content block standardization is not yet fully supported for Google "
"VertexAI."
)
warnings.warn(warning_message, stacklevel=2)
WARNED = True
raise NotImplementedError
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
"""Derive standard content blocks from a chunk with Google (VertexAI) content."""
global WARNED # noqa: PLW0603
if not WARNED:
warning_message = (
"Content block standardization is not yet fully supported for Google "
"VertexAI."
)
warnings.warn(warning_message, stacklevel=2)
WARNED = True
raise NotImplementedError
def _register_google_vertexai_translator() -> None:

View File

@@ -1,135 +1,39 @@
"""Derivations of standard content blocks from Groq content."""
import json
import re
from typing import Any
import warnings
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
WARNED = False
def _populate_extras(
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
) -> types.ContentBlock:
"""Mutate a block, populating extras."""
if standard_block.get("type") == "non_standard":
return standard_block
for key, value in block.items():
if key not in known_fields:
if "extras" not in standard_block:
# Below type-ignores are because mypy thinks a non-standard block can
# get here, although we exclude them above.
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
standard_block["extras"][key] = value # type: ignore[typeddict-item]
return standard_block
def _parse_code_json(s: str) -> dict:
"""Extract Python code from Groq built-in tool content.
Extracts the value of the 'code' field from a string of the form:
{"code": some_arbitrary_text_with_unescaped_quotes}
As Groq may not escape quotes in the executed tools, e.g.:
```
'{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
```
""" # noqa: E501
m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
if not m:
msg = (
"Could not extract Python code from Groq tool arguments. "
"Expected a JSON object with a 'code' field."
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
"""Derive standard content blocks from a message with Groq content."""
global WARNED # noqa: PLW0603
if not WARNED:
warning_message = (
"Content block standardization is not yet fully supported for Groq."
)
raise ValueError(msg)
return {"code": m.group(1)}
warnings.warn(warning_message, stacklevel=2)
WARNED = True
raise NotImplementedError
def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
"""Convert groq message content to v1 format."""
content_blocks: list[types.ContentBlock] = []
if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
content_blocks.append(reasoning_block)
if executed_tools := message.additional_kwargs.get("executed_tools"):
for idx, executed_tool in enumerate(executed_tools):
args: dict[str, Any] | None = None
if arguments := executed_tool.get("arguments"):
try:
args = json.loads(arguments)
except json.JSONDecodeError:
if executed_tool.get("type") == "python":
try:
args = _parse_code_json(arguments)
except ValueError:
continue
elif (
executed_tool.get("type") == "function"
and executed_tool.get("name") == "python"
):
# GPT-OSS
args = {"code": arguments}
else:
continue
if isinstance(args, dict):
name = ""
if executed_tool.get("type") == "search":
name = "web_search"
elif executed_tool.get("type") == "python" or (
executed_tool.get("type") == "function"
and executed_tool.get("name") == "python"
):
name = "code_interpreter"
server_tool_call: types.ServerToolCall = {
"type": "server_tool_call",
"name": name,
"id": str(idx),
"args": args,
}
content_blocks.append(server_tool_call)
if tool_output := executed_tool.get("output"):
tool_result: types.ServerToolResult = {
"type": "server_tool_result",
"tool_call_id": str(idx),
"output": tool_output,
"status": "success",
}
known_fields = {"type", "arguments", "index", "output"}
_populate_extras(tool_result, executed_tool, known_fields)
content_blocks.append(tool_result)
if isinstance(message.content, str) and message.content:
content_blocks.append({"type": "text", "text": message.content})
for tool_call in message.tool_calls:
content_blocks.append( # noqa: PERF401
{
"type": "tool_call",
"name": tool_call["name"],
"args": tool_call["args"],
"id": tool_call.get("id"),
}
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
"""Derive standard content blocks from a message chunk with Groq content."""
global WARNED # noqa: PLW0603
if not WARNED:
warning_message = (
"Content block standardization is not yet fully supported for Groq."
)
return content_blocks
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with groq content."""
return _convert_to_v1_from_groq(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message chunk with groq content."""
return _convert_to_v1_from_groq(message)
warnings.warn(warning_message, stacklevel=2)
WARNED = True
raise NotImplementedError
def _register_groq_translator() -> None:
"""Register the groq translator with the central registry.
"""Register the Groq translator with the central registry.
Run automatically when the module is imported.
"""

View File

@@ -10,12 +10,12 @@ def _convert_v0_multimodal_input_to_v1(
) -> list[types.ContentBlock]:
"""Convert v0 multimodal blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a ``'non_standard'`` block with the original block stored in the ``value``
field. This function attempts to unpack those blocks and convert any v0 format
blocks to v1 format.
If conversion fails, the block is left as a `'non_standard'` block.
If conversion fails, the block is left as a ``'non_standard'`` block.
Args:
content: List of content blocks to process.

View File

@@ -18,7 +18,7 @@ if TYPE_CHECKING:
def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
"""Convert `ImageContentBlock` to format expected by OpenAI Chat Completions."""
"""Convert ``ImageContentBlock`` to format expected by OpenAI Chat Completions."""
if "url" in block:
return {
"type": "image_url",
@@ -155,12 +155,12 @@ def _convert_to_v1_from_chat_completions_input(
) -> list[types.ContentBlock]:
"""Convert OpenAI Chat Completions format blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a ``'non_standard'`` block with the original block stored in the ``value``
field. This function attempts to unpack those blocks and convert any blocks that
might be OpenAI format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
If conversion fails, the block is left as a ``'non_standard'`` block.
Args:
content: List of content blocks to process.
@@ -263,7 +263,7 @@ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
"""Convert v0 AIMessage into `output_version="responses/v1"` format."""
"""Convert v0 AIMessage into ``output_version="responses/v1"`` format."""
from langchain_core.messages import AIMessageChunk # noqa: PLC0415
# Only update ChatOpenAI v0.3 AIMessages

View File

@@ -19,7 +19,7 @@ class ChatMessage(BaseMessage):
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization)."""
"""The type of the message (used during serialization). Defaults to "chat"."""
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
@@ -29,7 +29,11 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
"""The type of the message (used during serialization)."""
"""The type of the message (used during serialization).
Defaults to ``'ChatMessageChunk'``.
"""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]

View File

@@ -5,7 +5,7 @@
change in future releases.
This module provides standardized data structures for representing inputs to and
outputs from LLMs. The core abstraction is the **Content Block**, a `TypedDict`.
outputs from LLMs. The core abstraction is the **Content Block**, a ``TypedDict``.
**Rationale**
@@ -20,59 +20,59 @@ blocks into the format required by its API.
**Extensibility**
Data **not yet mapped** to a standard block may be represented using the
`NonStandardContentBlock`, which allows for provider-specific data to be included
``NonStandardContentBlock``, which allows for provider-specific data to be included
without losing the benefits of type checking and validation.
Furthermore, provider-specific fields **within** a standard block are fully supported
by default in the `extras` field of each block. This allows for additional metadata
by default in the ``extras`` field of each block. This allows for additional metadata
to be included without breaking the standard structure.
!!! warning
Do not heavily rely on the `extras` field for provider-specific data! This field
Do not heavily rely on the ``extras`` field for provider-specific data! This field
is subject to deprecation in future releases as we move towards PEP 728.
!!! note
Following widespread adoption of [PEP 728](https://peps.python.org/pep-0728/), we
will add `extra_items=Any` as a param to Content Blocks. This will signify to type
Following widespread adoption of `PEP 728 <https://peps.python.org/pep-0728/>`__, we
will add ``extra_items=Any`` as a param to Content Blocks. This will signify to type
checkers that additional provider-specific fields are allowed outside of the
`extras` field, and that will become the new standard approach to adding
``extras`` field, and that will become the new standard approach to adding
provider-specific metadata.
??? note
**Example with PEP 728 provider-specific fields:**
```python
# Content block definition
# NOTE: `extra_items=Any`
class TextContentBlock(TypedDict, extra_items=Any):
type: Literal["text"]
id: NotRequired[str]
text: str
annotations: NotRequired[list[Annotation]]
index: NotRequired[int]
```
.. code-block:: python
```python
from langchain_core.messages.content import TextContentBlock
# Content block definition
# NOTE: `extra_items=Any`
class TextContentBlock(TypedDict, extra_items=Any):
type: Literal["text"]
id: NotRequired[str]
text: str
annotations: NotRequired[list[Annotation]]
index: NotRequired[int]
# Create a text content block with provider-specific fields
my_block: TextContentBlock = {
# Add required fields
"type": "text",
"text": "Hello, world!",
# Additional fields not specified in the TypedDict
# These are valid with PEP 728 and are typed as Any
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
"custom_field": "any value",
}
.. code-block:: python
# Mutating an existing block to add provider-specific fields
openai_data = my_block["openai_metadata"] # Type: Any
```
from langchain_core.messages.content import TextContentBlock
PEP 728 is enabled with `# type: ignore[call-arg]` comments to suppress
# Create a text content block with provider-specific fields
my_block: TextContentBlock = {
# Add required fields
"type": "text",
"text": "Hello, world!",
# Additional fields not specified in the TypedDict
# These are valid with PEP 728 and are typed as Any
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
"custom_field": "any value",
}
# Mutating an existing block to add provider-specific fields
openai_data = my_block["openai_metadata"] # Type: Any
PEP 728 is enabled with ``# type: ignore[call-arg]`` comments to suppress
warnings from type checkers that don't yet support it. The functionality works
correctly in Python 3.13+ and will be fully supported as the ecosystem catches
up.
@@ -81,51 +81,52 @@ to be included without breaking the standard structure.
The module defines several types of content blocks, including:
- `TextContentBlock`: Standard text output.
- `Citation`: For annotations that link text output to a source document.
- `ToolCall`: For function calling.
- `ReasoningContentBlock`: To capture a model's thought process.
- ``TextContentBlock``: Standard text output.
- ``Citation``: For annotations that link text output to a source document.
- ``ToolCall``: For function calling.
- ``ReasoningContentBlock``: To capture a model's thought process.
- Multimodal data:
- `ImageContentBlock`
- `AudioContentBlock`
- `VideoContentBlock`
- `PlainTextContentBlock` (e.g. .txt or .md files)
- `FileContentBlock` (e.g. PDFs, etc.)
- ``ImageContentBlock``
- ``AudioContentBlock``
- ``VideoContentBlock``
- ``PlainTextContentBlock`` (e.g. .txt or .md files)
- ``FileContentBlock`` (e.g. PDFs, etc.)
**Example Usage**
```python
# Direct construction:
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
.. code-block:: python
multimodal_message: AIMessage(
content_blocks=[
TextContentBlock(type="text", text="What is shown in this image?"),
ImageContentBlock(
type="image",
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
mime_type="image/png",
),
]
)
# Direct construction:
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
# Using factories:
from langchain_core.messages.content import create_text_block, create_image_block
multimodal_message: AIMessage(
content_blocks=[
TextContentBlock(type="text", text="What is shown in this image?"),
ImageContentBlock(
type="image",
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
mime_type="image/png",
),
]
)
multimodal_message: AIMessage(
content=[
create_text_block("What is shown in this image?"),
create_image_block(
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
mime_type="image/png",
),
]
)
```
# Using factories:
from langchain_core.messages.content import create_text_block, create_image_block
multimodal_message: AIMessage(
content=[
create_text_block("What is shown in this image?"),
create_image_block(
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
mime_type="image/png",
),
]
)
Factory functions offer benefits such as:
- Automatic ID generation (when not provided)
- No need to manually specify the `type` field
"""
from typing import Any, Literal, get_args, get_type_hints
@@ -139,12 +140,12 @@ class Citation(TypedDict):
"""Annotation for citing data from a document.
!!! note
`start`/`end` indices refer to the **response text**,
``start``/``end`` indices refer to the **response text**,
not the source text. This means that the indices are relative to the model's
response, not the original document (as specified in the `url`).
response, not the original document (as specified in the ``url``).
!!! note "Factory function"
`create_citation` may also be used as a factory to create a `Citation`.
!!! note
``create_citation`` may also be used as a factory to create a ``Citation``.
Benefits include:
* Automatic ID generation (when not provided)
@@ -156,12 +157,10 @@ class Citation(TypedDict):
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Content block identifier.
Either:
"""Content block identifier. Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -175,10 +174,10 @@ class Citation(TypedDict):
"""
start_index: NotRequired[int]
"""Start index of the **response text** (`TextContentBlock.text`)."""
"""Start index of the **response text** (``TextContentBlock.text``)."""
end_index: NotRequired[int]
"""End index of the **response text** (`TextContentBlock.text`)"""
"""End index of the **response text** (``TextContentBlock.text``)"""
cited_text: NotRequired[str]
"""Excerpt of source text being cited."""
@@ -203,9 +202,8 @@ class NonStandardAnnotation(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -214,7 +212,6 @@ class NonStandardAnnotation(TypedDict):
Annotation = Citation | NonStandardAnnotation
"""A union of all defined `Annotation` types."""
class TextContentBlock(TypedDict):
@@ -223,9 +220,9 @@ class TextContentBlock(TypedDict):
This typically represents the main text content of a message, such as the response
from a language model or the text of a user message.
!!! note "Factory function"
`create_text_block` may also be used as a factory to create a
`TextContentBlock`. Benefits include:
!!! note
``create_text_block`` may also be used as a factory to create a
``TextContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -239,9 +236,8 @@ class TextContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -249,7 +245,7 @@ class TextContentBlock(TypedDict):
"""Block text."""
annotations: NotRequired[list[Annotation]]
"""`Citation`s and other annotations."""
"""``Citation``s and other annotations."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
@@ -259,19 +255,20 @@ class TextContentBlock(TypedDict):
class ToolCall(TypedDict):
"""Represents an AI's request to call a tool.
"""Represents a request to call a tool.
Example:
```python
{"name": "foo", "args": {"a": 1}, "id": "123"}
```
.. code-block:: python
{"name": "foo", "args": {"a": 1}, "id": "123"}
This represents a request to call the tool named "foo" with arguments {"a": 1}
and an identifier of "123".
!!! note "Factory function"
`create_tool_call` may also be used as a factory to create a
`ToolCall`. Benefits include:
!!! note
``create_tool_call`` may also be used as a factory to create a
``ToolCall``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -304,22 +301,24 @@ class ToolCall(TypedDict):
class ToolCallChunk(TypedDict):
"""A chunk of a tool call (yielded when streaming).
"""A chunk of a tool call (e.g., as part of a stream).
When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``),
all string attributes are concatenated. Chunks are only merged if their
values of `index` are equal and not `None`.
values of ``index`` are equal and not `None`.
Example:
```python
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
(
AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
```
.. code-block:: python
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
(
AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
"""
# TODO: Consider making fields NotRequired[str] in the future.
@@ -386,10 +385,7 @@ class InvalidToolCall(TypedDict):
class ServerToolCall(TypedDict):
"""Tool call that is executed server-side.
For example: code execution, web search, etc.
"""
"""Tool call that is executed server-side."""
type: Literal["server_tool_call"]
"""Used for discrimination."""
@@ -411,7 +407,7 @@ class ServerToolCall(TypedDict):
class ServerToolCallChunk(TypedDict):
"""A chunk of a server-side tool call (yielded when streaming)."""
"""A chunk of a tool call (as part of a stream)."""
type: Literal["server_tool_call_chunk"]
"""Used for discrimination."""
@@ -460,9 +456,9 @@ class ServerToolResult(TypedDict):
class ReasoningContentBlock(TypedDict):
"""Reasoning output from a LLM.
!!! note "Factory function"
`create_reasoning_block` may also be used as a factory to create a
`ReasoningContentBlock`. Benefits include:
!!! note
``create_reasoning_block`` may also be used as a factory to create a
``ReasoningContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -476,9 +472,8 @@ class ReasoningContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -486,7 +481,7 @@ class ReasoningContentBlock(TypedDict):
"""Reasoning text.
Either the thought summary or the raw reasoning text itself. This is often parsed
from `<think>` tags in the model's response.
from ``<think>`` tags in the model's response.
"""
@@ -503,9 +498,9 @@ class ReasoningContentBlock(TypedDict):
class ImageContentBlock(TypedDict):
"""Image data.
!!! note "Factory function"
`create_image_block` may also be used as a factory to create a
`ImageContentBlock`. Benefits include:
!!! note
``create_image_block`` may also be used as a factory to create a
``ImageContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -519,9 +514,8 @@ class ImageContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -531,7 +525,7 @@ class ImageContentBlock(TypedDict):
mime_type: NotRequired[str]
"""MIME type of the image. Required for base64.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#image)
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#image>`__
"""
@@ -551,9 +545,9 @@ class ImageContentBlock(TypedDict):
class VideoContentBlock(TypedDict):
"""Video data.
!!! note "Factory function"
`create_video_block` may also be used as a factory to create a
`VideoContentBlock`. Benefits include:
!!! note
``create_video_block`` may also be used as a factory to create a
``VideoContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -567,9 +561,8 @@ class VideoContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -579,7 +572,7 @@ class VideoContentBlock(TypedDict):
mime_type: NotRequired[str]
"""MIME type of the video. Required for base64.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#video)
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#video>`__
"""
@@ -599,9 +592,9 @@ class VideoContentBlock(TypedDict):
class AudioContentBlock(TypedDict):
"""Audio data.
!!! note "Factory function"
`create_audio_block` may also be used as a factory to create an
`AudioContentBlock`. Benefits include:
!!! note
``create_audio_block`` may also be used as a factory to create an
``AudioContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -614,9 +607,8 @@ class AudioContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -626,7 +618,7 @@ class AudioContentBlock(TypedDict):
mime_type: NotRequired[str]
"""MIME type of the audio. Required for base64.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#audio)
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#audio>`__
"""
@@ -644,21 +636,21 @@ class AudioContentBlock(TypedDict):
class PlainTextContentBlock(TypedDict):
"""Plaintext data (e.g., from a `.txt` or `.md` document).
"""Plaintext data (e.g., from a document).
!!! note
A `PlainTextContentBlock` existed in `langchain-core<1.0.0`. Although the
A ``PlainTextContentBlock`` existed in ``langchain-core<1.0.0``. Although the
name has carried over, the structure has changed significantly. The only shared
keys between the old and new versions are `type` and `text`, though the
`type` value has changed from `'text'` to `'text-plain'`.
keys between the old and new versions are `type` and ``text``, though the
`type` value has changed from ``'text'`` to ``'text-plain'``.
!!! note
Title and context are optional fields that may be passed to the model. See
Anthropic [example](https://docs.claude.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
Anthropic `example <https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content>`__.
!!! note "Factory function"
`create_plaintext_block` may also be used as a factory to create a
`PlainTextContentBlock`. Benefits include:
!!! note
``create_plaintext_block`` may also be used as a factory to create a
``PlainTextContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -672,9 +664,8 @@ class PlainTextContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -707,18 +698,18 @@ class PlainTextContentBlock(TypedDict):
class FileContentBlock(TypedDict):
"""File data that doesn't fit into other multimodal block types.
"""File data that doesn't fit into other multimodal blocks.
This block is intended for files that are not images, audio, or plaintext. For
example, it can be used for PDFs, Word documents, etc.
If the file is an image, audio, or plaintext, you should use the corresponding
content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
`PlainTextContentBlock`).
content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``,
``PlainTextContentBlock``).
!!! note "Factory function"
`create_file_block` may also be used as a factory to create a
`FileContentBlock`. Benefits include:
!!! note
``create_file_block`` may also be used as a factory to create a
``FileContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -732,9 +723,8 @@ class FileContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
@@ -744,7 +734,7 @@ class FileContentBlock(TypedDict):
mime_type: NotRequired[str]
"""MIME type of the file. Required for base64.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml)
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml>`__
"""
@@ -767,21 +757,21 @@ class FileContentBlock(TypedDict):
class NonStandardContentBlock(TypedDict):
"""Provider-specific content data.
"""Provider-specific data.
This block contains data for which there is not yet a standard type.
The purpose of this block should be to simply hold a provider-specific payload.
If a provider's non-standard output includes reasoning and tool calls, it should be
the adapter's job to parse that payload and emit the corresponding standard
`ReasoningContentBlock` and `ToolCalls`.
``ReasoningContentBlock`` and ``ToolCalls``.
Has no `extras` field, as provider-specific data should be included in the
`value` field.
Has no ``extras`` field, as provider-specific data should be included in the
``value`` field.
!!! note "Factory function"
`create_non_standard_block` may also be used as a factory to create a
`NonStandardContentBlock`. Benefits include:
!!! note
``create_non_standard_block`` may also be used as a factory to create a
``NonStandardContentBlock``. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
@@ -795,14 +785,13 @@ class NonStandardContentBlock(TypedDict):
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
"""
value: dict[str, Any]
"""Provider-specific content data."""
"""Provider-specific data."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
@@ -816,7 +805,6 @@ DataContentBlock = (
| PlainTextContentBlock
| FileContentBlock
)
"""A union of all defined multimodal data `ContentBlock` types."""
ToolContentBlock = (
ToolCall | ToolCallChunk | ServerToolCall | ServerToolCallChunk | ServerToolResult
@@ -830,7 +818,6 @@ ContentBlock = (
| DataContentBlock
| ToolContentBlock
)
"""A union of all defined `ContentBlock` types and aliases."""
KNOWN_BLOCK_TYPES = {
@@ -855,7 +842,7 @@ KNOWN_BLOCK_TYPES = {
"non_standard",
# citation and non_standard_annotation intentionally omitted
}
"""These are block types known to `langchain-core>=1.0.0`.
"""These are block types known to ``langchain-core>=1.0.0``.
If a block has a type not in this set, it is considered to be provider-specific.
"""
@@ -867,7 +854,7 @@ def _get_data_content_block_types() -> tuple[str, ...]:
Example: ("image", "video", "audio", "text-plain", "file")
Note that old style multimodal blocks type literals with new style blocks.
Specifically, "image", "audio", and "file".
Speficially, "image", "audio", and "file".
See the docstring of `_normalize_messages` in `language_models._utils` for details.
"""
@@ -894,7 +881,7 @@ def is_data_content_block(block: dict) -> bool:
block: The content block to check.
Returns:
`True` if the content block is a data content block, `False` otherwise.
True if the content block is a data content block, False otherwise.
"""
if block.get("type") not in _get_data_content_block_types():
@@ -906,7 +893,7 @@ def is_data_content_block(block: dict) -> bool:
# 'text' is checked to support v0 PlainTextContentBlock types
# We must guard against new style TextContentBlock which also has 'text' `type`
# by ensuring the presence of `source_type`
# by ensuring the presense of `source_type`
if block["type"] == "text" and "source_type" not in block: # noqa: SIM103 # This is more readable
return False
@@ -936,20 +923,20 @@ def create_text_block(
index: int | str | None = None,
**kwargs: Any,
) -> TextContentBlock:
"""Create a `TextContentBlock`.
"""Create a ``TextContentBlock``.
Args:
text: The text content of the block.
id: Content block identifier. Generated automatically if not provided.
annotations: `Citation`s and other annotations for the text.
annotations: ``Citation``s and other annotations for the text.
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `TextContentBlock`.
A properly formatted ``TextContentBlock``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
block = TextContentBlock(
@@ -979,7 +966,7 @@ def create_image_block(
index: int | str | None = None,
**kwargs: Any,
) -> ImageContentBlock:
"""Create an `ImageContentBlock`.
"""Create an ``ImageContentBlock``.
Args:
url: URL of the image.
@@ -990,15 +977,15 @@ def create_image_block(
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `ImageContentBlock`.
A properly formatted ``ImageContentBlock``.
Raises:
ValueError: If no image source is provided or if `base64` is used without
`mime_type`.
ValueError: If no image source is provided or if ``base64`` is used without
``mime_type``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
@@ -1035,7 +1022,7 @@ def create_video_block(
index: int | str | None = None,
**kwargs: Any,
) -> VideoContentBlock:
"""Create a `VideoContentBlock`.
"""Create a ``VideoContentBlock``.
Args:
url: URL of the video.
@@ -1046,15 +1033,15 @@ def create_video_block(
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `VideoContentBlock`.
A properly formatted ``VideoContentBlock``.
Raises:
ValueError: If no video source is provided or if `base64` is used without
`mime_type`.
ValueError: If no video source is provided or if ``base64`` is used without
``mime_type``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
@@ -1095,7 +1082,7 @@ def create_audio_block(
index: int | str | None = None,
**kwargs: Any,
) -> AudioContentBlock:
"""Create an `AudioContentBlock`.
"""Create an ``AudioContentBlock``.
Args:
url: URL of the audio.
@@ -1106,15 +1093,15 @@ def create_audio_block(
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `AudioContentBlock`.
A properly formatted ``AudioContentBlock``.
Raises:
ValueError: If no audio source is provided or if `base64` is used without
`mime_type`.
ValueError: If no audio source is provided or if ``base64`` is used without
``mime_type``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
@@ -1155,7 +1142,7 @@ def create_file_block(
index: int | str | None = None,
**kwargs: Any,
) -> FileContentBlock:
"""Create a `FileContentBlock`.
"""Create a ``FileContentBlock``.
Args:
url: URL of the file.
@@ -1166,15 +1153,15 @@ def create_file_block(
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `FileContentBlock`.
A properly formatted ``FileContentBlock``.
Raises:
ValueError: If no file source is provided or if `base64` is used without
`mime_type`.
ValueError: If no file source is provided or if ``base64`` is used without
``mime_type``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
@@ -1216,7 +1203,7 @@ def create_plaintext_block(
index: int | str | None = None,
**kwargs: Any,
) -> PlainTextContentBlock:
"""Create a `PlainTextContentBlock`.
"""Create a ``PlainTextContentBlock``.
Args:
text: The plaintext content.
@@ -1229,11 +1216,11 @@ def create_plaintext_block(
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `PlainTextContentBlock`.
A properly formatted ``PlainTextContentBlock``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
block = PlainTextContentBlock(
@@ -1272,7 +1259,7 @@ def create_tool_call(
index: int | str | None = None,
**kwargs: Any,
) -> ToolCall:
"""Create a `ToolCall`.
"""Create a ``ToolCall``.
Args:
name: The name of the tool to be called.
@@ -1281,11 +1268,11 @@ def create_tool_call(
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `ToolCall`.
A properly formatted ``ToolCall``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
block = ToolCall(
@@ -1311,7 +1298,7 @@ def create_reasoning_block(
index: int | str | None = None,
**kwargs: Any,
) -> ReasoningContentBlock:
"""Create a `ReasoningContentBlock`.
"""Create a ``ReasoningContentBlock``.
Args:
reasoning: The reasoning text or thought summary.
@@ -1319,11 +1306,11 @@ def create_reasoning_block(
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `ReasoningContentBlock`.
A properly formatted ``ReasoningContentBlock``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
block = ReasoningContentBlock(
@@ -1352,7 +1339,7 @@ def create_citation(
id: str | None = None,
**kwargs: Any,
) -> Citation:
"""Create a `Citation`.
"""Create a ``Citation``.
Args:
url: URL of the document source.
@@ -1363,11 +1350,11 @@ def create_citation(
id: Content block identifier. Generated automatically if not provided.
Returns:
A properly formatted `Citation`.
A properly formatted ``Citation``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
block = Citation(type="citation", id=ensure_id(id))
@@ -1396,19 +1383,19 @@ def create_non_standard_block(
id: str | None = None,
index: int | str | None = None,
) -> NonStandardContentBlock:
"""Create a `NonStandardContentBlock`.
"""Create a ``NonStandardContentBlock``.
Args:
value: Provider-specific content data.
value: Provider-specific data.
id: Content block identifier. Generated automatically if not provided.
index: Index of block in aggregate response. Used during streaming.
Returns:
A properly formatted `NonStandardContentBlock`.
A properly formatted ``NonStandardContentBlock``.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
"""
block = NonStandardContentBlock(

View File

@@ -15,11 +15,11 @@ from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing the result of executing a tool back to a model.
`FunctionMessage` are an older version of the `ToolMessage` schema, and
``FunctionMessage`` are an older version of the `ToolMessage` schema, and
do not contain the `tool_call_id` field.
The `tool_call_id` field is used to associate the tool call request with the
tool call response. Useful in situations where a chat model is able
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
"""The name of the function that was executed."""
type: Literal["function"] = "function"
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization). Defaults to ``'function'``."""
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
@@ -38,7 +38,11 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization).
Defaults to ``'FunctionMessageChunk'``.
"""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]

View File

@@ -7,27 +7,33 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class HumanMessage(BaseMessage):
"""Message from the user.
"""Message from a human.
A `HumanMessage` is a message that is passed in from a user to the model.
`HumanMessage`s are messages that are passed in from a human to the model.
Example:
```python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
HumanMessage(content="What is your name?"),
]
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
HumanMessage(content="What is your name?"),
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
```
"""
type: Literal["human"] = "human"
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization).
Defaults to ``'human'``.
"""
@overload
def __init__(
@@ -50,7 +56,7 @@ class HumanMessage(BaseMessage):
content_blocks: list[types.ContentBlock] | None = None,
**kwargs: Any,
) -> None:
"""Specify `content` as positional arg or `content_blocks` for typing."""
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
if content_blocks is not None:
super().__init__(
content=cast("str | list[str | dict]", content_blocks),
@@ -67,4 +73,5 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization).
Defaults to "HumanMessageChunk"."""

View File

@@ -9,7 +9,7 @@ class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(
self,
@@ -20,7 +20,7 @@ class RemoveMessage(BaseMessage):
Args:
id: The ID of the message to remove.
**kwargs: Additional fields to pass to the message.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.

View File

@@ -13,21 +13,27 @@ class SystemMessage(BaseMessage):
of input messages.
Example:
```python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
HumanMessage(content="What is your name?"),
]
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
HumanMessage(content="What is your name?"),
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
```
"""
type: Literal["system"] = "system"
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization).
Defaults to ``'system'``.
"""
@overload
def __init__(
@@ -50,7 +56,7 @@ class SystemMessage(BaseMessage):
content_blocks: list[types.ContentBlock] | None = None,
**kwargs: Any,
) -> None:
"""Specify `content` as positional arg or `content_blocks` for typing."""
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
if content_blocks is not None:
super().__init__(
content=cast("str | list[str | dict]", content_blocks),
@@ -67,4 +73,8 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization).
Defaults to ``'SystemMessageChunk'``.
"""

View File

@@ -16,8 +16,8 @@ from langchain_core.utils._merge import merge_dicts, merge_obj
class ToolOutputMixin:
"""Mixin for objects that tools can return directly.
If a custom BaseTool is invoked with a `ToolCall` and the output of custom code is
not an instance of `ToolOutputMixin`, the output will automatically be coerced to
If a custom BaseTool is invoked with a ``ToolCall`` and the output of custom code is
not an instance of ``ToolOutputMixin``, the output will automatically be coerced to
a string and wrapped in a `ToolMessage`.
"""
@@ -27,38 +27,41 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
"""Message for passing the result of executing a tool back to a model.
`ToolMessage` objects contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
is encoded inside the ``content`` field.
Example: A `ToolMessage` representing a result of `42` from a tool call with id
Example: A `ToolMessage` representing a result of ``42`` from a tool call with id
```python
from langchain_core.messages import ToolMessage
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
```
Example: A `ToolMessage` where only part of the tool output is sent to the model
and the full output is passed in to artifact.
and the full output is passed in to artifact.
```python
from langchain_core.messages import ToolMessage
!!! version-added "Added in version 0.2.17"
tool_output = {
"stdout": "From the graph we can see that the correlation between "
"x and y is ...",
"stderr": None,
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
}
.. code-block:: python
ToolMessage(
content=tool_output["stdout"],
artifact=tool_output,
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
)
```
from langchain_core.messages import ToolMessage
tool_output = {
"stdout": "From the graph we can see that the correlation between "
"x and y is ...",
"stderr": None,
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
}
ToolMessage(
content=tool_output["stdout"],
artifact=tool_output,
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
)
The `tool_call_id` field is used to associate the tool call request with the
tool call response. Useful in situations where a chat model is able
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
@@ -67,7 +70,11 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
"""Tool call that this message is responding to."""
type: Literal["tool"] = "tool"
"""The type of the message (used for serialization)."""
"""The type of the message (used for serialization).
Defaults to ``'tool'``.
"""
artifact: Any = None
"""Artifact of the Tool execution which is not meant to be sent to the model.
@@ -76,15 +83,21 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
a subset of the full tool output is being passed as message content but the full
output is needed in other parts of the code.
!!! version-added "Added in version 0.2.17"
"""
status: Literal["success", "error"] = "success"
"""Status of the tool invocation."""
"""Status of the tool invocation.
!!! version-added "Added in version 0.2.24"
"""
additional_kwargs: dict = Field(default_factory=dict, repr=False)
"""Currently inherited from `BaseMessage`, but not used."""
"""Currently inherited from BaseMessage, but not used."""
response_metadata: dict = Field(default_factory=dict, repr=False)
"""Currently inherited from `BaseMessage`, but not used."""
"""Currently inherited from BaseMessage, but not used."""
@model_validator(mode="before")
@classmethod
@@ -152,12 +165,12 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
content_blocks: list[types.ContentBlock] | None = None,
**kwargs: Any,
) -> None:
"""Initialize a `ToolMessage`.
"""Initialize `ToolMessage`.
Specify `content` as positional arg or `content_blocks` for typing.
Specify ``content`` as positional arg or ``content_blocks`` for typing.
Args:
content: The contents of the message.
content: The string contents of the message.
content_blocks: Typed standard content.
**kwargs: Additional fields.
"""
@@ -203,15 +216,16 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
class ToolCall(TypedDict):
"""Represents an AI's request to call a tool.
"""Represents a request to call a tool.
Example:
```python
{"name": "foo", "args": {"a": 1}, "id": "123"}
```
This represents a request to call the tool named `'foo'` with arguments
`{"a": 1}` and an identifier of `'123'`.
.. code-block:: python
{"name": "foo", "args": {"a": 1}, "id": "123"}
This represents a request to call the tool named ``'foo'`` with arguments
``{"a": 1}`` and an identifier of ``'123'``.
"""
@@ -249,22 +263,24 @@ def tool_call(
class ToolCallChunk(TypedDict):
"""A chunk of a tool call (yielded when streaming).
"""A chunk of a tool call (e.g., as part of a stream).
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``),
all string attributes are concatenated. Chunks are only merged if their
values of `index` are equal and not None.
values of ``index`` are equal and not None.
Example:
```python
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
(
AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
```
.. code-block:: python
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
(
AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
"""
name: str | None

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +1,17 @@
"""`OutputParser` classes parse the output of an LLM call into structured data.
"""**OutputParser** classes parse the output of an LLM call.
!!! tip "Structured output"
**Class hierarchy:**
Output parsers emerged as an early solution to the challenge of obtaining structured
output from LLMs.
.. code-block::
Today, most LLMs support [structured output](https://docs.langchain.com/oss/python/langchain/models#structured-outputs)
natively. In such cases, using output parsers may be unnecessary, and you should
leverage the model's built-in capabilities for structured output. Refer to the
[documentation of your chosen model](https://docs.langchain.com/oss/python/integrations/providers/overview)
for guidance on how to achieve structured output directly.
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
Output parsers remain valuable when working with models that do not support
structured output natively, or when you require additional processing or validation
of the model's output beyond its inherent capabilities.
"""
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing import TYPE_CHECKING

View File

@@ -31,13 +31,13 @@ class BaseLLMOutputParser(ABC, Generic[T]):
@abstractmethod
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
"""Parse a list of candidate model `Generation` objects into a specific format.
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of `Generation` to be parsed. The `Generation` objects are
assumed to be different candidate outputs for a single model input.
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
for parsers that can parse partial results. Default is False.
Returns:
Structured output.
@@ -46,17 +46,17 @@ class BaseLLMOutputParser(ABC, Generic[T]):
async def aparse_result(
self, result: list[Generation], *, partial: bool = False
) -> T:
"""Async parse a list of candidate model `Generation` objects into a specific format.
"""Async parse a list of candidate model Generations into a specific format.
Args:
result: A list of `Generation` to be parsed. The Generations are assumed
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
for parsers that can parse partial results. Default is False.
Returns:
Structured output.
""" # noqa: E501
"""
return await run_in_executor(None, self.parse_result, result, partial=partial)
@@ -134,31 +134,29 @@ class BaseOutputParser(
Output parsers help structure language model responses.
Example:
```python
# Implement a simple boolean output parser
.. code-block:: python
class BooleanOutputParser(BaseOutputParser[bool]):
true_val: str = "YES"
false_val: str = "NO"
class BooleanOutputParser(BaseOutputParser[bool]):
true_val: str = "YES"
false_val: str = "NO"
def parse(self, text: str) -> bool:
cleaned_text = text.strip().upper()
if cleaned_text not in (
self.true_val.upper(),
self.false_val.upper(),
):
raise OutputParserException(
f"BooleanOutputParser expected output value to either be "
f"{self.true_val} or {self.false_val} (case-insensitive). "
f"Received {cleaned_text}."
)
return cleaned_text == self.true_val.upper()
def parse(self, text: str) -> bool:
cleaned_text = text.strip().upper()
if cleaned_text not in (
self.true_val.upper(),
self.false_val.upper(),
):
raise OutputParserException(
f"BooleanOutputParser expected output value to either be "
f"{self.true_val} or {self.false_val} (case-insensitive). "
f"Received {cleaned_text}."
)
return cleaned_text == self.true_val.upper()
@property
def _type(self) -> str:
return "boolean_output_parser"
@property
def _type(self) -> str:
return "boolean_output_parser"
```
"""
@property
@@ -175,7 +173,7 @@ class BaseOutputParser(
This property is inferred from the first type argument of the class.
Raises:
TypeError: If the class doesn't have an inferable `OutputType`.
TypeError: If the class doesn't have an inferable OutputType.
"""
for base in self.__class__.mro():
if hasattr(base, "__pydantic_generic_metadata__"):
@@ -237,16 +235,16 @@ class BaseOutputParser(
@override
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
"""Parse a list of candidate model `Generation` objects into a specific format.
"""Parse a list of candidate model Generations into a specific format.
The return value is parsed from only the first `Generation` in the result, which
is assumed to be the highest-likelihood `Generation`.
The return value is parsed from only the first Generation in the result, which
is assumed to be the highest-likelihood Generation.
Args:
result: A list of `Generation` to be parsed. The `Generation` objects are
assumed to be different candidate outputs for a single model input.
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
for parsers that can parse partial results. Default is False.
Returns:
Structured output.
@@ -267,20 +265,20 @@ class BaseOutputParser(
async def aparse_result(
self, result: list[Generation], *, partial: bool = False
) -> T:
"""Async parse a list of candidate model `Generation` objects into a specific format.
"""Async parse a list of candidate model Generations into a specific format.
The return value is parsed from only the first `Generation` in the result, which
is assumed to be the highest-likelihood `Generation`.
The return value is parsed from only the first Generation in the result, which
is assumed to be the highest-likelihood Generation.
Args:
result: A list of `Generation` to be parsed. The `Generation` objects are
assumed to be different candidate outputs for a single model input.
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
for parsers that can parse partial results. Default is False.
Returns:
Structured output.
""" # noqa: E501
"""
return await run_in_executor(None, self.parse_result, result, partial=partial)
async def aparse(self, text: str) -> T:
@@ -302,13 +300,13 @@ class BaseOutputParser(
) -> Any:
"""Parse the output of an LLM call with the input prompt for context.
The prompt is largely provided in the event the `OutputParser` wants
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: String output of a language model.
prompt: Input `PromptValue`.
prompt: Input PromptValue.
Returns:
Structured output.

Some files were not shown because too many files have changed in this diff Show More