mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-03 15:55:44 +00:00
Compare commits
152 Commits
langchain-
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85567f1dc3 | ||
|
|
6f4978041e | ||
|
|
f1fca4f46f | ||
|
|
2b899fe961 | ||
|
|
3152d25811 | ||
|
|
3b8cb3d4b6 | ||
|
|
15047ae28a | ||
|
|
888fa3a2fb | ||
|
|
90346b8a35 | ||
|
|
2d5efd7b29 | ||
|
|
1d2273597a | ||
|
|
9dd494ddcd | ||
|
|
2fa07b19f6 | ||
|
|
a022e3c14d | ||
|
|
e0e11423d9 | ||
|
|
34de8ec1f3 | ||
|
|
3d288fd610 | ||
|
|
055cccde28 | ||
|
|
361514d11d | ||
|
|
90b68059f5 | ||
|
|
87ad5276e4 | ||
|
|
5489df75d7 | ||
|
|
c6b3f5b888 | ||
|
|
15db024811 | ||
|
|
6d73003b17 | ||
|
|
13259a109a | ||
|
|
aa78be574a | ||
|
|
d0dd1b30d1 | ||
|
|
0338a15192 | ||
|
|
e10d99b728 | ||
|
|
c9018f81ec | ||
|
|
31718492c7 | ||
|
|
2209878f48 | ||
|
|
dd77dbe3ab | ||
|
|
eb19e12527 | ||
|
|
551e86a517 | ||
|
|
8734c05f64 | ||
|
|
0c8cbfb7de | ||
|
|
89c3428d85 | ||
|
|
707e96c541 | ||
|
|
26e0a00c4c | ||
|
|
d0f8f00e7e | ||
|
|
a39132787c | ||
|
|
296994ebf0 | ||
|
|
b5b31eec88 | ||
|
|
8f6851c349 | ||
|
|
0788461abd | ||
|
|
3bfd1f6d8a | ||
|
|
d83c3a12bf | ||
|
|
79200cf3c2 | ||
|
|
bcb6789888 | ||
|
|
89b7933ef1 | ||
|
|
4da5a8081f | ||
|
|
53e9f00804 | ||
|
|
6e25e185f6 | ||
|
|
68ceeb64f6 | ||
|
|
edae976b81 | ||
|
|
9f4366bc9d | ||
|
|
99e0a60aab | ||
|
|
d38729fbac | ||
|
|
ff0d21cfd5 | ||
|
|
9140a7cb86 | ||
|
|
41fe18bc80 | ||
|
|
9105573cb3 | ||
|
|
fff87e95d1 | ||
|
|
9beb29a34c | ||
|
|
ca00f5aed9 | ||
|
|
637777b8e7 | ||
|
|
1cf851e054 | ||
|
|
961f965f0c | ||
|
|
760fc3bc12 | ||
|
|
e3fc7d8aa6 | ||
|
|
2b3b209e40 | ||
|
|
78903ac285 | ||
|
|
f361acc11c | ||
|
|
ed185c0026 | ||
|
|
6dc34beb71 | ||
|
|
c2205f88e6 | ||
|
|
abdbe185c5 | ||
|
|
c1b816cb7e | ||
|
|
0559558715 | ||
|
|
75965474fc | ||
|
|
5dc014fdf4 | ||
|
|
291a9fcea1 | ||
|
|
dd994b9d7f | ||
|
|
83901b30e3 | ||
|
|
bcfa21a6e7 | ||
|
|
af1da28459 | ||
|
|
ed2ee4e8cc | ||
|
|
f293c8ffd6 | ||
|
|
714c370191 | ||
|
|
a29d4e9c3a | ||
|
|
74983f8a96 | ||
|
|
11c5b86981 | ||
|
|
383f4c0ee9 | ||
|
|
045e7ad4a1 | ||
|
|
0e80291804 | ||
|
|
c99773b652 | ||
|
|
5f9e3e33cd | ||
|
|
6fc21afbc9 | ||
|
|
50445d4a27 | ||
|
|
11a2efe49b | ||
|
|
d8a680ee57 | ||
|
|
f405a2c57d | ||
|
|
3576e690fa | ||
|
|
057ac361ef | ||
|
|
d9675a4a20 | ||
|
|
c27271f3ae | ||
|
|
a3e4f4c2e3 | ||
|
|
b5030badbe | ||
|
|
b6132fc23e | ||
|
|
f33b1b3d77 | ||
|
|
c382788342 | ||
|
|
e193a1f273 | ||
|
|
eb70672f4a | ||
|
|
87df179ca9 | ||
|
|
982a950ccf | ||
|
|
c2435eeca5 | ||
|
|
68c56440cf | ||
|
|
31eeb50ce0 | ||
|
|
0039b3b046 | ||
|
|
ffb1a08871 | ||
|
|
d13823043d | ||
|
|
b665b81a0e | ||
|
|
6b9b177b89 | ||
|
|
b1acf8d931 | ||
|
|
97f731da7e | ||
|
|
1bf29da0d6 | ||
|
|
2c3fec014f | ||
|
|
4c38157ee0 | ||
|
|
b5f8e87e2f | ||
|
|
6a2efd060e | ||
|
|
cda336295f | ||
|
|
02f4256cb6 | ||
|
|
492ba3d127 | ||
|
|
cbf8d46d3e | ||
|
|
58598f01b0 | ||
|
|
89fe7e1ac1 | ||
|
|
a24712f7f7 | ||
|
|
8446fef00d | ||
|
|
8bcdfbb24e | ||
|
|
b8ebc14a23 | ||
|
|
aa442bc52f | ||
|
|
2e024b7ede | ||
|
|
c8205ff511 | ||
|
|
ea0a25d7fe | ||
|
|
29b5df3881 | ||
|
|
690b620b7f | ||
|
|
c55c9785be | ||
|
|
20e04fc3dd | ||
|
|
078137f0ba | ||
|
|
d0f5a1cc96 |
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(uv run:*)",
|
||||
"Bash(make:*)",
|
||||
"WebSearch",
|
||||
"WebFetch(domain:ai.pydantic.dev)",
|
||||
"WebFetch(domain:openai.github.io)",
|
||||
"Bash(uv run:*)",
|
||||
"Bash(python3:*)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"Bash(gh pr view:*)",
|
||||
"Bash(gh pr diff:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}
|
||||
18
.github/scripts/check_diff.py
vendored
18
.github/scripts/check_diff.py
vendored
@@ -132,21 +132,21 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
if job == "codspeed":
|
||||
py_versions = ["3.12"] # 3.13 is not yet supported
|
||||
elif dir_ == "libs/core":
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13"]
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
# custom logic for specific directories
|
||||
|
||||
elif dir_ == "libs/langchain" and job == "extended-tests":
|
||||
py_versions = ["3.10", "3.13"]
|
||||
py_versions = ["3.10", "3.14"]
|
||||
elif dir_ == "libs/langchain_v1":
|
||||
py_versions = ["3.10", "3.13"]
|
||||
elif dir_ in {"libs/cli"}:
|
||||
elif dir_ in {"libs/cli", "libs/partners/chroma", "libs/partners/nomic"}:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
|
||||
elif dir_ == ".":
|
||||
# unable to install with 3.13 because tokenizers doesn't support 3.13 yet
|
||||
py_versions = ["3.10", "3.12"]
|
||||
else:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
py_versions = ["3.10", "3.14"]
|
||||
|
||||
return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]
|
||||
|
||||
@@ -257,7 +257,15 @@ if __name__ == "__main__":
|
||||
".github/scripts/check_diff.py",
|
||||
)
|
||||
):
|
||||
# add all LANGCHAIN_DIRS for infra changes
|
||||
# Infrastructure changes (workflows, actions, CI scripts) trigger tests on
|
||||
# all core packages as a safety measure. This ensures that changes to CI/CD
|
||||
# infrastructure don't inadvertently break package testing, even if the change
|
||||
# appears unrelated (e.g., documentation build workflows). This is intentionally
|
||||
# conservative to catch unexpected side effects from workflow modifications.
|
||||
#
|
||||
# Example: A PR modifying .github/workflows/api_doc_build.yml will trigger
|
||||
# lint/test jobs for libs/core, libs/text-splitters, libs/langchain, and
|
||||
# libs/langchain_v1, even though the workflow may only affect documentation.
|
||||
dirs_to_run["extended-test"].update(LANGCHAIN_DIRS)
|
||||
|
||||
if file.startswith("libs/core"):
|
||||
|
||||
5
.github/scripts/get_min_versions.py
vendored
5
.github/scripts/get_min_versions.py
vendored
@@ -2,7 +2,6 @@
|
||||
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from typing import Optional
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
@@ -54,7 +53,7 @@ def get_pypi_versions(package_name: str) -> List[str]:
|
||||
return list(response.json()["releases"].keys())
|
||||
|
||||
|
||||
def get_minimum_version(package_name: str, spec_string: str) -> Optional[str]:
|
||||
def get_minimum_version(package_name: str, spec_string: str) -> str | None:
|
||||
"""Find the minimum published version that satisfies the given constraints.
|
||||
|
||||
Args:
|
||||
@@ -114,7 +113,7 @@ def get_min_version_from_toml(
|
||||
versions_for: str,
|
||||
python_version: str,
|
||||
*,
|
||||
include: Optional[list] = None,
|
||||
include: list | None = None,
|
||||
):
|
||||
# Parse the TOML file
|
||||
with open(toml_path, "rb") as file:
|
||||
|
||||
2
.github/workflows/_release.yml
vendored
2
.github/workflows/_release.yml
vendored
@@ -385,7 +385,6 @@ jobs:
|
||||
test-prior-published-packages-against-new-core:
|
||||
# Installs the new core with old partners: Installs the new unreleased core
|
||||
# alongside the previously published partner packages and runs integration tests
|
||||
if: false # temporarily skip
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
@@ -476,6 +475,7 @@ jobs:
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
- test-prior-published-packages-against-new-core
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# This permission is used for trusted publishing:
|
||||
|
||||
2
.github/workflows/check_diffs.yml
vendored
2
.github/workflows/check_diffs.yml
vendored
@@ -186,7 +186,7 @@ jobs:
|
||||
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: "📦 Install UV Package Manager"
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
|
||||
23
.github/workflows/integration_tests.yml
vendored
23
.github/workflows/integration_tests.yml
vendored
@@ -23,10 +23,8 @@ permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
|
||||
POETRY_LIBS: ("libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
# Generate dynamic test matrix based on input parameters or defaults
|
||||
@@ -60,7 +58,6 @@ jobs:
|
||||
echo $matrix
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
# Run integration tests against partner libraries with live API credentials
|
||||
# Tests are run with Poetry or UV depending on the library's setup
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
name: "🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}"
|
||||
@@ -95,17 +92,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + Poetry"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + UV"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -123,15 +110,7 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: "📦 Install Dependencies (Poetry)"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: "📦 Install Dependencies (UV)"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
- name: "📦 Install Dependencies"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
|
||||
31
.github/workflows/pr_labeler_title.yml
vendored
31
.github/workflows/pr_labeler_title.yml
vendored
@@ -1,6 +1,7 @@
|
||||
# Label PRs based on their titles.
|
||||
#
|
||||
# See `.github/pr-title-labeler.yml` to see rules for each label/title pattern.
|
||||
# Uses conventional commit types from PR titles to apply labels.
|
||||
# Note: Scope-based labeling (e.g., integration labels) is handled by pr_labeler_file.yml
|
||||
|
||||
name: "🏷️ PR Title Labeler"
|
||||
|
||||
@@ -8,7 +9,7 @@ on:
|
||||
# Safe since we're not checking out or running the PR's code
|
||||
# Never check out the PR's head in a pull_request_target job
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, edited]
|
||||
types: [opened, edited]
|
||||
|
||||
jobs:
|
||||
pr-title-labeler:
|
||||
@@ -20,14 +21,24 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
- name: Label PR based on title
|
||||
# Archived repo; latest commit (v0.1.0)
|
||||
uses: grafana/pr-labeler-action@f19222d3ef883d2ca5f04420fdfe8148003763f0
|
||||
uses: bcoe/conventional-release-labels@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
configuration-path: .github/pr-title-labeler.yml
|
||||
type_labels: >-
|
||||
{
|
||||
"feat": "feature",
|
||||
"fix": "fix",
|
||||
"docs": "documentation",
|
||||
"style": "linting",
|
||||
"refactor": "refactor",
|
||||
"perf": "performance",
|
||||
"test": "tests",
|
||||
"build": "infra",
|
||||
"ci": "infra",
|
||||
"chore": "infra",
|
||||
"revert": "revert",
|
||||
"release": "release",
|
||||
"breaking": "breaking"
|
||||
}
|
||||
ignored_types: '[]'
|
||||
|
||||
@@ -78,7 +78,16 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
echo "Checking out $repo to $REPO_NAME"
|
||||
git clone --depth 1 https://github.com/$repo.git $REPO_NAME
|
||||
|
||||
# Special handling for langchain-tavily: checkout by commit hash
|
||||
if [[ "$REPO_NAME" == "langchain-tavily" ]]; then
|
||||
git clone https://github.com/$repo.git $REPO_NAME
|
||||
cd $REPO_NAME
|
||||
git checkout f3515654724a9e87bdfe2c2f509d6cdde646e563
|
||||
cd ..
|
||||
else
|
||||
git clone --depth 1 --branch v0.3 https://github.com/$repo.git $REPO_NAME
|
||||
fi
|
||||
done
|
||||
|
||||
- name: "🐍 Setup Python ${{ env.PYTHON_VERSION }}"
|
||||
@@ -106,7 +115,10 @@ jobs:
|
||||
working-directory: langchain
|
||||
run: |
|
||||
# Install all partner packages in editable mode with overrides
|
||||
python -m uv pip install $(ls ./libs/partners | xargs -I {} echo "./libs/partners/{}") --overrides ./docs/vercel_overrides.txt
|
||||
python -m uv pip install $(ls ./libs/partners | grep -v azure-ai | xargs -I {} echo "./libs/partners/{}") --overrides ./docs/vercel_overrides.txt --prerelease=allow
|
||||
|
||||
# Install langchain-azure-ai with tools extra
|
||||
python -m uv pip install "./libs/partners/azure-ai[tools]" --overrides ./docs/vercel_overrides.txt --prerelease=allow
|
||||
|
||||
# Install core langchain and other main packages
|
||||
python -m uv pip install libs/core libs/langchain libs/text-splitters libs/community libs/experimental libs/standard-tests
|
||||
10
AGENTS.md
10
AGENTS.md
@@ -149,23 +149,25 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (``'low'``, ``'normal'``, ``'high'``).
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
10
CLAUDE.md
10
CLAUDE.md
@@ -149,23 +149,25 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (``'low'``, ``'normal'``, ``'high'``).
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
|
||||
Please see the following guides for migrating LangChain code:
|
||||
|
||||
* Migrate to [LangChain v1.0](https://docs.langchain.com/oss/python/migrate/langchain-v1)
|
||||
* Migrate to [LangChain v0.3](https://python.langchain.com/docs/versions/v0_3/)
|
||||
* Migrate to [LangChain v0.2](https://python.langchain.com/docs/versions/v0_2/)
|
||||
* Migrating from [LangChain 0.0.x Chains](https://python.langchain.com/docs/versions/migrating_chains/)
|
||||
* Upgrade to [LangGraph Memory](https://python.langchain.com/docs/versions/migrating_memory/)
|
||||
|
||||
The [LangChain CLI](https://python.langchain.com/docs/versions/v0_3/#migrate-using-langchain-cli) can help you automatically upgrade your code to use non-deprecated imports.
|
||||
This will be especially helpful if you're still on either version 0.0.x or 0.1.x of LangChain.
|
||||
|
||||
30
README.md
30
README.md
@@ -12,13 +12,16 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://opensource.org/licenses/MIT" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/l/langchain-core?style=flat-square" alt="PyPI - License">
|
||||
<img src="https://img.shields.io/pypi/l/langchain" alt="PyPI - License">
|
||||
</a>
|
||||
<a href="https://pypistats.org/packages/langchain-core" target="_blank">
|
||||
<a href="https://pypistats.org/packages/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads">
|
||||
</a>
|
||||
<a href="https://pypi.org/project/langchain/#history" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/v/langchain?label=%20" alt="Version">
|
||||
</a>
|
||||
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square" alt="Open in Dev Containers">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode" alt="Open in Dev Containers">
|
||||
</a>
|
||||
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20">
|
||||
@@ -34,14 +37,14 @@
|
||||
LangChain is a framework for building LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves.
|
||||
|
||||
```bash
|
||||
pip install -U langchain
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/).
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/oss/python/langchain/overview).
|
||||
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building controllable agent workflows.
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our framework for building controllable agent workflows.
|
||||
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
@@ -62,16 +65,13 @@ While the LangChain framework can be used standalone, it also integrates seamles
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Conceptual Guides](https://docs.langchain.com/oss/python/langchain/overview): Explanations of key
|
||||
concepts behind the LangChain framework.
|
||||
- [Tutorials](https://docs.langchain.com/oss/python/learn): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [API Reference](https://reference.langchain.com/python/): Detailed reference on
|
||||
- [Learn](https://docs.langchain.com/oss/python/learn): Use cases, conceptual overviews, and more.
|
||||
- [API Reference](https://reference.langchain.com/python): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.
|
||||
- [LangChain Forum](https://forum.langchain.com): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com): Ask questions & chat with our documentation.
|
||||
|
||||
@@ -1,2 +1,32 @@
|
||||
# Packages
|
||||
|
||||
> [!IMPORTANT]
|
||||
> [**View all LangChain integrations packages**](https://docs.langchain.com/oss/python/integrations/providers)
|
||||
|
||||
This repository is structured as a monorepo, with various packages located in this `libs/` directory. Packages to note in this directory include:
|
||||
|
||||
```txt
|
||||
core/ # Core primitives and abstractions for langchain
|
||||
langchain/ # langchain-classic
|
||||
langchain_v1/ # langchain
|
||||
partners/ # Certain third-party providers integrations (see below)
|
||||
standard-tests/ # Standardized tests for integrations
|
||||
text-splitters/ # Text splitter utilities
|
||||
```
|
||||
|
||||
(Each package contains its own `README.md` file with specific details about that package.)
|
||||
|
||||
## Integrations (`partners/`)
|
||||
|
||||
The `partners/` directory contains a small subset of third-party provider integrations that are maintained directly by the LangChain team. These include, but are not limited to:
|
||||
|
||||
* [OpenAI](https://pypi.org/project/langchain-openai/)
|
||||
* [Anthropic](https://pypi.org/project/langchain-anthropic/)
|
||||
* [Ollama](https://pypi.org/project/langchain-ollama/)
|
||||
* [DeepSeek](https://pypi.org/project/langchain-deepseek/)
|
||||
* [xAI](https://pypi.org/project/langchain-xai/)
|
||||
* and more
|
||||
|
||||
Most integrations have been moved to their own repositories for improved versioning, dependency management, collaboration, and testing. This includes packages from popular providers such as [Google](https://github.com/langchain-ai/langchain-google) and [AWS](https://github.com/langchain-ai/langchain-aws). Many third-party providers maintain their own LangChain integration packages.
|
||||
|
||||
For a full list of all LangChain integrations, please refer to the [LangChain Integrations documentation](https://docs.langchain.com/oss/python/integrations/providers).
|
||||
|
||||
@@ -1,6 +1,30 @@
|
||||
# langchain-cli
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful
|
||||
for getting started with LangChain Templates!
|
||||
[](https://pypi.org/project/langchain-cli/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-cli)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
## Quick Install
|
||||
|
||||
```bash
|
||||
pip install langchain-cli
|
||||
```
|
||||
|
||||
## 🤔 What is this?
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful for getting started with LangChain Templates!
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
[CLI Docs](https://github.com/langchain-ai/langchain/blob/master/libs/cli/DOCS.md)
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -19,8 +19,8 @@ And you should configure credentials by setting the following environment variab
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__()
|
||||
llm.invoke("Sing a ballad of LangChain.")
|
||||
model = Chat__ModuleName__()
|
||||
model.invoke("Sing a ballad of LangChain.")
|
||||
```
|
||||
|
||||
## Embeddings
|
||||
@@ -41,6 +41,6 @@ embeddings.embed_query("What is the meaning of life?")
|
||||
```python
|
||||
from __module_name__ import __ModuleName__LLM
|
||||
|
||||
llm = __ModuleName__LLM()
|
||||
llm.invoke("The meaning of life is")
|
||||
model = __ModuleName__LLM()
|
||||
model.invoke("The meaning of life is")
|
||||
```
|
||||
|
||||
@@ -1,262 +1,264 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"model = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = model.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"llm = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,236 +1,238 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"model = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = model.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"llm = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = llm.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -185,7 +185,7 @@
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -1,204 +1,204 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -1,271 +1,271 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""__ModuleName__ chat models."""
|
||||
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
from typing import Any, Dict, Iterator, List
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
CallbackManagerForLLMRun,
|
||||
@@ -26,30 +26,30 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
model: str
|
||||
model:
|
||||
Name of __ModuleName__ model to use.
|
||||
temperature: float
|
||||
temperature:
|
||||
Sampling temperature.
|
||||
max_tokens: Optional[int]
|
||||
max_tokens:
|
||||
Max number of tokens to generate.
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — client params:
|
||||
timeout: Optional[float]
|
||||
timeout:
|
||||
Timeout for requests.
|
||||
max_retries: int
|
||||
max_retries:
|
||||
Max number of retries.
|
||||
api_key: Optional[str]
|
||||
api_key:
|
||||
__ModuleName__ API key. If not passed in will be read from env var
|
||||
__MODULE_NAME___API_KEY.
|
||||
|
||||
@@ -57,226 +57,224 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
timeout=None,
|
||||
max_retries=2,
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
model = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
timeout=None,
|
||||
max_retries=2,
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
Invoke:
|
||||
.. code-block:: python
|
||||
```python
|
||||
messages = [
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
model.invoke(messages)
|
||||
```
|
||||
|
||||
messages = [
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
llm.invoke(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Stream:
|
||||
.. code-block:: python
|
||||
```python
|
||||
for chunk in model.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```
|
||||
|
||||
for chunk in llm.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
stream = model.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
```
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
stream = llm.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
await model.ainvoke(messages)
|
||||
|
||||
await llm.ainvoke(messages)
|
||||
# stream:
|
||||
# async for chunk in (await model.astream(messages))
|
||||
|
||||
# stream:
|
||||
# async for chunk in (await llm.astream(messages))
|
||||
|
||||
# batch:
|
||||
# await llm.abatch([messages])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
# batch:
|
||||
# await model.abatch([messages])
|
||||
```
|
||||
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
# TODO: Delete if .bind_tools() isn't supported.
|
||||
Tool calling:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
class GetWeather(BaseModel):
|
||||
'''Get the current weather in a given location'''
|
||||
|
||||
class GetWeather(BaseModel):
|
||||
'''Get the current weather in a given location'''
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
class GetPopulation(BaseModel):
|
||||
'''Get the current population in a given location'''
|
||||
|
||||
class GetPopulation(BaseModel):
|
||||
'''Get the current population in a given location'''
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
|
||||
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
See ``Chat__ModuleName__.bind_tools()`` method for more.
|
||||
See `Chat__ModuleName__.bind_tools()` method for more.
|
||||
|
||||
# TODO: Delete if .with_structured_output() isn't supported.
|
||||
Structured output:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
class Joke(BaseModel):
|
||||
'''Joke to tell user.'''
|
||||
|
||||
class Joke(BaseModel):
|
||||
'''Joke to tell user.'''
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
|
||||
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
|
||||
structured_model = model.with_structured_output(Joke)
|
||||
structured_model.invoke("Tell me a joke about cats")
|
||||
```
|
||||
|
||||
structured_llm = llm.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats")
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
See ``Chat__ModuleName__.with_structured_output()`` for more.
|
||||
See `Chat__ModuleName__.with_structured_output()` for more.
|
||||
|
||||
# TODO: Delete if JSON mode response format isn't supported.
|
||||
JSON mode:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_model = model.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_llm = llm.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_llm.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if image inputs aren't supported.
|
||||
Image input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
import base64
|
||||
import httpx
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
import base64
|
||||
import httpx
|
||||
from langchain_core.messages import HumanMessage
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
# TODO: Replace with appropriate message content format.
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = model.invoke([message])
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
# TODO: Replace with appropriate message content format.
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = llm.invoke([message])
|
||||
ai_msg.content
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if audio inputs aren't supported.
|
||||
Audio input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Example input
|
||||
```
|
||||
|
||||
# TODO: Example input
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if video inputs aren't supported.
|
||||
Video input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Example input
|
||||
```
|
||||
|
||||
# TODO: Example input
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if token usage metadata isn't supported.
|
||||
Token usage:
|
||||
.. code-block:: python
|
||||
```python
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
```
|
||||
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
|
||||
```python
|
||||
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
|
||||
```
|
||||
|
||||
# TODO: Delete if logprobs aren't supported.
|
||||
Logprobs:
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_llm = llm.bind(logprobs=True)
|
||||
ai_msg = logprobs_llm.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_model = model.bind(logprobs=True)
|
||||
ai_msg = logprobs_model.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
```
|
||||
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
Response metadata
|
||||
.. code-block:: python
|
||||
```python
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
```
|
||||
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
model_name: str = Field(alias="model")
|
||||
"""The name of the model"""
|
||||
parrot_buffer_length: int
|
||||
"""The number of characters from the last message of the prompt to be echoed."""
|
||||
temperature: Optional[float] = None
|
||||
max_tokens: Optional[int] = None
|
||||
timeout: Optional[int] = None
|
||||
stop: Optional[List[str]] = None
|
||||
temperature: float | None = None
|
||||
max_tokens: int | None = None
|
||||
timeout: int | None = None
|
||||
stop: list[str] | None = None
|
||||
max_retries: int = 2
|
||||
|
||||
@property
|
||||
@@ -302,8 +300,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
stop: list[str] | None = None,
|
||||
run_manager: CallbackManagerForLLMRun | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
"""Override the _generate method to implement the chat model logic.
|
||||
@@ -314,11 +312,11 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Args:
|
||||
messages: the prompt composed of a list of messages.
|
||||
stop: a list of strings on which the model should stop generating.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
run_manager: A run manager with callbacks for the LLM.
|
||||
"""
|
||||
# Replace this with actual logic to generate a response from a list
|
||||
@@ -348,8 +346,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
def _stream(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
stop: list[str] | None = None,
|
||||
run_manager: CallbackManagerForLLMRun | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[ChatGenerationChunk]:
|
||||
"""Stream the output of the model.
|
||||
@@ -362,11 +360,11 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Args:
|
||||
messages: the prompt composed of a list of messages.
|
||||
stop: a list of strings on which the model should stop generating.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
run_manager: A run manager with callbacks for the LLM.
|
||||
"""
|
||||
last_message = messages[-1]
|
||||
@@ -410,8 +408,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# async def _astream(
|
||||
# self,
|
||||
# messages: List[BaseMessage],
|
||||
# stop: Optional[List[str]] = None,
|
||||
# run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
# stop: list[str] | None = None,
|
||||
# run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
||||
# **kwargs: Any,
|
||||
# ) -> AsyncIterator[ChatGenerationChunk]:
|
||||
|
||||
@@ -419,7 +417,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# async def _agenerate(
|
||||
# self,
|
||||
# messages: List[BaseMessage],
|
||||
# stop: Optional[List[str]] = None,
|
||||
# run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
# stop: list[str] | None = None,
|
||||
# run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
||||
# **kwargs: Any,
|
||||
# ) -> ChatResult:
|
||||
|
||||
@@ -14,55 +14,55 @@ class __ModuleName__Loader(BaseLoader):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_community.document_loaders import __ModuleName__Loader
|
||||
|
||||
from langchain_community.document_loaders import __ModuleName__Loader
|
||||
|
||||
loader = __ModuleName__Loader(
|
||||
# required params = ...
|
||||
# other params = ...
|
||||
)
|
||||
loader = __ModuleName__Loader(
|
||||
# required params = ...
|
||||
# other params = ...
|
||||
)
|
||||
```
|
||||
|
||||
Lazy load:
|
||||
.. code-block:: python
|
||||
```python
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
# async variant:
|
||||
# docs_lazy = await loader.alazy_load()
|
||||
|
||||
# async variant:
|
||||
# docs_lazy = await loader.alazy_load()
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
```
|
||||
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
```python
|
||||
TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if async load is not implemented
|
||||
Async load:
|
||||
.. code-block:: python
|
||||
```python
|
||||
docs = await loader.aload()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
```
|
||||
|
||||
docs = await loader.aload()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
```python
|
||||
TODO: Example output
|
||||
|
||||
```
|
||||
"""
|
||||
|
||||
# TODO: This method must be implemented to load documents.
|
||||
|
||||
@@ -8,13 +8,13 @@ class __ModuleName__Embeddings(Embeddings):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
@@ -25,50 +25,50 @@ class __ModuleName__Embeddings(Embeddings):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__ import __ModuleName__Embeddings
|
||||
|
||||
from __module_name__ import __ModuleName__Embeddings
|
||||
|
||||
embed = __ModuleName__Embeddings(
|
||||
model="...",
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
embed = __ModuleName__Embeddings(
|
||||
model="...",
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
embed.embed_query(input_text)
|
||||
```
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
embed.embed_query(input_text)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Embed multiple text:
|
||||
.. code-block:: python
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
embed.embed_documents(input_texts)
|
||||
```
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
embed.embed_documents(input_texts)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
await embed.aembed_query(input_text)
|
||||
|
||||
await embed.aembed_query(input_text)
|
||||
# multiple:
|
||||
# await embed.aembed_documents(input_texts)
|
||||
```
|
||||
|
||||
# multiple:
|
||||
# await embed.aembed_documents(input_texts)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, model: str):
|
||||
|
||||
@@ -14,13 +14,13 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars, etc.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args:
|
||||
@@ -31,58 +31,58 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __package_name__ import __ModuleName__Retriever
|
||||
|
||||
from __package_name__ import __ModuleName__Retriever
|
||||
|
||||
retriever = __ModuleName__Retriever(
|
||||
# ...
|
||||
)
|
||||
retriever = __ModuleName__Retriever(
|
||||
# ...
|
||||
)
|
||||
```
|
||||
|
||||
Usage:
|
||||
.. code-block:: python
|
||||
```python
|
||||
query = "..."
|
||||
|
||||
query = "..."
|
||||
retriever.invoke(query)
|
||||
```
|
||||
|
||||
retriever.invoke(query)
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
Use within a chain:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from langchain_openai import ChatOpenAI
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
\"\"\"Answer the question based only on the context provided.
|
||||
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
\"\"\"Answer the question based only on the context provided.
|
||||
Context: {context}
|
||||
|
||||
Context: {context}
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| model
|
||||
| StrOutputParser()
|
||||
)
|
||||
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| llm
|
||||
| StrOutputParser()
|
||||
)
|
||||
chain.invoke("...")
|
||||
```
|
||||
|
||||
chain.invoke("...")
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -12,13 +12,13 @@ class __ModuleName__Toolkit(BaseToolkit):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars, etc.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args:
|
||||
@@ -29,42 +29,42 @@ class __ModuleName__Toolkit(BaseToolkit):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __package_name__ import __ModuleName__Toolkit
|
||||
|
||||
from __package_name__ import __ModuleName__Toolkit
|
||||
|
||||
toolkit = __ModuleName__Toolkit(
|
||||
# ...
|
||||
)
|
||||
toolkit = __ModuleName__Toolkit(
|
||||
# ...
|
||||
)
|
||||
```
|
||||
|
||||
Tools:
|
||||
.. code-block:: python
|
||||
```python
|
||||
toolkit.get_tools()
|
||||
```
|
||||
|
||||
toolkit.get_tools()
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
Use within an agent:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
agent_executor = create_react_agent(llm, tools)
|
||||
|
||||
agent_executor = create_react_agent(llm, tools)
|
||||
example_query = "..."
|
||||
|
||||
example_query = "..."
|
||||
events = agent_executor.stream(
|
||||
{"messages": [("user", example_query)]},
|
||||
stream_mode="values",
|
||||
)
|
||||
for event in events:
|
||||
event["messages"][-1].pretty_print()
|
||||
```
|
||||
|
||||
events = agent_executor.stream(
|
||||
{"messages": [("user", example_query)]},
|
||||
stream_mode="values",
|
||||
)
|
||||
for event in events:
|
||||
event["messages"][-1].pretty_print()
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""__ModuleName__ tools."""
|
||||
|
||||
from typing import Optional, Type
|
||||
from typing import Type
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
CallbackManagerForToolRun,
|
||||
@@ -27,42 +27,42 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override]
|
||||
|
||||
Setup:
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
Instantiation:
|
||||
.. code-block:: python
|
||||
|
||||
tool = __ModuleName__Tool(
|
||||
# TODO: init params
|
||||
)
|
||||
```python
|
||||
tool = __ModuleName__Tool(
|
||||
# TODO: init params
|
||||
)
|
||||
```
|
||||
|
||||
Invocation with args:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: invoke args
|
||||
tool.invoke({...})
|
||||
```
|
||||
|
||||
# TODO: invoke args
|
||||
tool.invoke({...})
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: output of invocation
|
||||
```python
|
||||
# TODO: output of invocation
|
||||
```
|
||||
|
||||
Invocation with ToolCall:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: invoke args
|
||||
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
|
||||
```
|
||||
|
||||
# TODO: invoke args
|
||||
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: output of invocation
|
||||
```python
|
||||
# TODO: output of invocation
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
# TODO: Set tool name and description
|
||||
@@ -74,12 +74,12 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override]
|
||||
"""The schema that is passed to the model when performing tool calling."""
|
||||
|
||||
# TODO: Add any other init params for the tool.
|
||||
# param1: Optional[str]
|
||||
# param1: str | None
|
||||
# """param1 determines foobar"""
|
||||
|
||||
# TODO: Replaced (a, b) with real tool arguments.
|
||||
def _run(
|
||||
self, a: int, b: int, *, run_manager: Optional[CallbackManagerForToolRun] = None
|
||||
self, a: int, b: int, *, run_manager: CallbackManagerForToolRun | None = None
|
||||
) -> str:
|
||||
return str(a + b + 80)
|
||||
|
||||
@@ -90,6 +90,6 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override]
|
||||
# a: int,
|
||||
# b: int,
|
||||
# *,
|
||||
# run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
# run_manager: AsyncCallbackManagerForToolRun | None = None,
|
||||
# ) -> str:
|
||||
# ...
|
||||
|
||||
@@ -8,7 +8,6 @@ from typing import (
|
||||
Callable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
@@ -29,133 +28,133 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable `__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — indexing params:
|
||||
collection_name: str
|
||||
collection_name:
|
||||
Name of the collection.
|
||||
embedding_function: Embeddings
|
||||
embedding_function:
|
||||
Embedding function to use.
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — client params:
|
||||
client: Optional[Client]
|
||||
client:
|
||||
Client to use.
|
||||
connection_args: Optional[dict]
|
||||
connection_args:
|
||||
Connection arguments.
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__.vectorstores import __ModuleName__VectorStore
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
from __module_name__.vectorstores import __ModuleName__VectorStore
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
vector_store = __ModuleName__VectorStore(
|
||||
collection_name="foo",
|
||||
embedding_function=OpenAIEmbeddings(),
|
||||
connection_args={"uri": "./foo.db"},
|
||||
# other params...
|
||||
)
|
||||
vector_store = __ModuleName__VectorStore(
|
||||
collection_name="foo",
|
||||
embedding_function=OpenAIEmbeddings(),
|
||||
connection_args={"uri": "./foo.db"},
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant variables.
|
||||
Add Documents:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_core.documents import Document
|
||||
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
|
||||
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
|
||||
document_3 = Document(page_content="i will be deleted :(")
|
||||
|
||||
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
|
||||
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
|
||||
document_3 = Document(page_content="i will be deleted :(")
|
||||
|
||||
documents = [document_1, document_2, document_3]
|
||||
ids = ["1", "2", "3"]
|
||||
vector_store.add_documents(documents=documents, ids=ids)
|
||||
documents = [document_1, document_2, document_3]
|
||||
ids = ["1", "2", "3"]
|
||||
vector_store.add_documents(documents=documents, ids=ids)
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant variables.
|
||||
Delete Documents:
|
||||
.. code-block:: python
|
||||
|
||||
vector_store.delete(ids=["3"])
|
||||
```python
|
||||
vector_store.delete(ids=["3"])
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search(query="thud",k=1)
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search(query="thud",k=1)
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search with filter:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search with score:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search_with_score(query="qux",k=1)
|
||||
for doc, score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search_with_score(query="qux",k=1)
|
||||
for doc, score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# add documents
|
||||
# await vector_store.aadd_documents(documents=documents, ids=ids)
|
||||
|
||||
# add documents
|
||||
# await vector_store.aadd_documents(documents=documents, ids=ids)
|
||||
# delete documents
|
||||
# await vector_store.adelete(ids=["3"])
|
||||
|
||||
# delete documents
|
||||
# await vector_store.adelete(ids=["3"])
|
||||
# search
|
||||
# results = vector_store.asimilarity_search(query="thud",k=1)
|
||||
|
||||
# search
|
||||
# results = vector_store.asimilarity_search(query="thud",k=1)
|
||||
# search with score
|
||||
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
|
||||
for doc,score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
# search with score
|
||||
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
|
||||
for doc,score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Use as Retriever:
|
||||
.. code-block:: python
|
||||
```python
|
||||
retriever = vector_store.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
|
||||
)
|
||||
retriever.invoke("thud")
|
||||
```
|
||||
|
||||
retriever = vector_store.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
|
||||
)
|
||||
retriever.invoke("thud")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(self, embedding: Embeddings) -> None:
|
||||
@@ -172,7 +171,7 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
cls: Type[__ModuleName__VectorStore],
|
||||
texts: List[str],
|
||||
embedding: Embeddings,
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
metadatas: list[dict] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> __ModuleName__VectorStore:
|
||||
store = cls(
|
||||
@@ -187,7 +186,7 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
# cls: Type[VST],
|
||||
# texts: List[str],
|
||||
# embedding: Embeddings,
|
||||
# metadatas: Optional[List[dict]] = None,
|
||||
# metadatas: list[dict] | None = None,
|
||||
# **kwargs: Any,
|
||||
# ) -> VST:
|
||||
# return await asyncio.get_running_loop().run_in_executor(
|
||||
@@ -201,7 +200,7 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
def add_documents(
|
||||
self,
|
||||
documents: List[Document],
|
||||
ids: Optional[List[str]] = None,
|
||||
ids: list[str] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> List[str]:
|
||||
"""Add documents to the store."""
|
||||
@@ -215,7 +214,7 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
id_iterator: Iterator[Optional[str]] = (
|
||||
id_iterator: Iterator[str | None] = (
|
||||
iter(ids) if ids else iter(doc.id for doc in documents)
|
||||
)
|
||||
|
||||
@@ -238,19 +237,19 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
# async def aadd_documents(
|
||||
# self,
|
||||
# documents: List[Document],
|
||||
# ids: Optional[List[str]] = None,
|
||||
# ids: list[str] | None = None,
|
||||
# **kwargs: Any,
|
||||
# ) -> List[str]:
|
||||
# raise NotImplementedError
|
||||
|
||||
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
|
||||
def delete(self, ids: list[str] | None = None, **kwargs: Any) -> None:
|
||||
if ids:
|
||||
for _id in ids:
|
||||
self._database.pop(_id, None)
|
||||
|
||||
# optional: add custom async implementations
|
||||
# async def adelete(
|
||||
# self, ids: Optional[List[str]] = None, **kwargs: Any
|
||||
# self, ids: list[str] | None = None, **kwargs: Any
|
||||
# ) -> None:
|
||||
# raise NotImplementedError
|
||||
|
||||
@@ -287,7 +286,7 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
filter: Optional[Callable[[Document], bool]] = None,
|
||||
filter: Callable[[Document], bool] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> List[tuple[Document, float, List[float]]]:
|
||||
# get all docs with fixed order in list
|
||||
|
||||
@@ -24,7 +24,7 @@ def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
|
||||
This code works
|
||||
|
||||
Args:
|
||||
pkg_name (str): The name of the partner package.
|
||||
pkg_name: The name of the partner package.
|
||||
|
||||
Returns:
|
||||
List of 2-tuples containing old and new import paths.
|
||||
|
||||
@@ -65,7 +65,7 @@ def is_subclass(class_obj: type, classes_: list[type]) -> bool:
|
||||
classes_: A list of classes to check against.
|
||||
|
||||
Returns:
|
||||
True if `class_obj` is a subclass of any class in `classes_`, False otherwise.
|
||||
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
|
||||
"""
|
||||
return any(
|
||||
issubclass(class_obj, kls)
|
||||
|
||||
@@ -13,7 +13,7 @@ def get_package_root(cwd: Path | None = None) -> Path:
|
||||
|
||||
Args:
|
||||
cwd: The current working directory to start the search from.
|
||||
If None, uses the current working directory of the process.
|
||||
If `None`, uses the current working directory of the process.
|
||||
|
||||
Returns:
|
||||
The path to the package root directory.
|
||||
|
||||
@@ -20,18 +20,30 @@ description = "CLI for interacting with LangChain"
|
||||
readme = "README.md"
|
||||
|
||||
[project.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-cli%3D%3D0%22&expanded=true"
|
||||
repository = "https://github.com/langchain-ai/langchain"
|
||||
homepage = "https://docs.langchain.com/"
|
||||
repository = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
|
||||
changelog = "https://github.com/langchain-ai/langchain/releases?q=%22langchain-cli%3D%3D1%22"
|
||||
twitter = "https://x.com/LangChainAI"
|
||||
slack = "https://www.langchain.com/join-community"
|
||||
reddit = "https://www.reddit.com/r/LangChain/"
|
||||
|
||||
[project.scripts]
|
||||
langchain = "langchain_cli.cli:app"
|
||||
langchain-cli = "langchain_cli.cli:app"
|
||||
|
||||
[dependency-groups]
|
||||
dev = ["pytest>=7.4.2,<9.0.0", "pytest-watcher>=0.3.4,<1.0.0"]
|
||||
lint = ["ruff>=0.13.1,<0.14", "mypy>=1.18.1,<1.19"]
|
||||
test = ["langchain-core", "langchain"]
|
||||
dev = [
|
||||
"pytest>=7.4.2,<9.0.0",
|
||||
"pytest-watcher>=0.3.4,<1.0.0"
|
||||
]
|
||||
lint = [
|
||||
"ruff>=0.13.1,<0.14",
|
||||
"mypy>=1.18.1,<1.19"
|
||||
]
|
||||
test = [
|
||||
"langchain-core",
|
||||
"langchain"
|
||||
]
|
||||
typing = ["langchain"]
|
||||
test_integration = []
|
||||
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
# 🦜🍎️ LangChain Core
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypi.org/project/langchain-core/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-core)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
|
||||
## Quick Install
|
||||
|
||||
@@ -9,16 +16,14 @@
|
||||
pip install langchain-core
|
||||
```
|
||||
|
||||
## What is it?
|
||||
## 🤔 What is this?
|
||||
|
||||
LangChain Core contains the base abstractions that power the the LangChain ecosystem.
|
||||
LangChain Core contains the base abstractions that power the LangChain ecosystem.
|
||||
|
||||
These abstractions are designed to be as modular and simple as possible.
|
||||
|
||||
The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
|
||||
|
||||
For full documentation see the [API reference](https://reference.langchain.com/python/).
|
||||
|
||||
## ⛰️ Why build on top of LangChain Core?
|
||||
|
||||
The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
|
||||
@@ -27,12 +32,16 @@ The LangChain ecosystem is built on top of `langchain-core`. Some of the benefit
|
||||
- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
|
||||
- **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_core/).
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning Policy](https://docs.langchain.com/oss/python/versioning).
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing).
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""``langchain-core`` defines the base abstractions for the LangChain ecosystem.
|
||||
"""`langchain-core` defines the base abstractions for the LangChain ecosystem.
|
||||
|
||||
The interfaces for core components like chat models, LLMs, vector stores, retrievers,
|
||||
and more are defined here. The universal invocation protocol (Runnables) along with
|
||||
|
||||
@@ -6,7 +6,6 @@ This module is only relevant for LangChain developers, not for users.
|
||||
|
||||
This module and its submodules are for internal use only. Do not use them in your
|
||||
own code. We may change the API at any time with no warning.
|
||||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -40,40 +40,37 @@ def beta(
|
||||
"""Decorator to mark a function, a class, or a property as beta.
|
||||
|
||||
When marking a classmethod, a staticmethod, or a property, the
|
||||
``@beta`` decorator should go *under* ``@classmethod`` and
|
||||
``@staticmethod`` (i.e., `beta` should directly decorate the
|
||||
underlying callable), but *over* ``@property``.
|
||||
`@beta` decorator should go *under* `@classmethod` and
|
||||
`@staticmethod` (i.e., `beta` should directly decorate the
|
||||
underlying callable), but *over* `@property`.
|
||||
|
||||
When marking a class ``C`` intended to be used as a base class in a
|
||||
multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method
|
||||
(if ``C`` instead inherited its ``__init__`` from its own base class, then
|
||||
``@beta`` would mess up ``__init__`` inheritance when installing its
|
||||
own (annotation-emitting) ``C.__init__``).
|
||||
When marking a class `C` intended to be used as a base class in a
|
||||
multiple inheritance hierarchy, `C` *must* define an `__init__` method
|
||||
(if `C` instead inherited its `__init__` from its own base class, then
|
||||
`@beta` would mess up `__init__` inheritance when installing its
|
||||
own (annotation-emitting) `C.__init__`).
|
||||
|
||||
Args:
|
||||
message : str, optional
|
||||
message:
|
||||
Override the default beta message. The %(since)s,
|
||||
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
|
||||
and %(removal)s format specifiers will be replaced by the
|
||||
values of the respective arguments passed to this function.
|
||||
name : str, optional
|
||||
name:
|
||||
The name of the beta object.
|
||||
obj_type : str, optional
|
||||
obj_type:
|
||||
The object type being beta.
|
||||
addendum : str, optional
|
||||
addendum:
|
||||
Additional text appended directly to the final message.
|
||||
|
||||
Returns:
|
||||
A decorator which can be used to mark functions or classes as beta.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@beta
|
||||
def the_function_to_annotate():
|
||||
pass
|
||||
|
||||
```python
|
||||
@beta
|
||||
def the_function_to_annotate():
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
def beta(
|
||||
|
||||
@@ -82,62 +82,59 @@ def deprecated(
|
||||
"""Decorator to mark a function, a class, or a property as deprecated.
|
||||
|
||||
When deprecating a classmethod, a staticmethod, or a property, the
|
||||
``@deprecated`` decorator should go *under* ``@classmethod`` and
|
||||
``@staticmethod`` (i.e., `deprecated` should directly decorate the
|
||||
underlying callable), but *over* ``@property``.
|
||||
`@deprecated` decorator should go *under* `@classmethod` and
|
||||
`@staticmethod` (i.e., `deprecated` should directly decorate the
|
||||
underlying callable), but *over* `@property`.
|
||||
|
||||
When deprecating a class ``C`` intended to be used as a base class in a
|
||||
multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method
|
||||
(if ``C`` instead inherited its ``__init__`` from its own base class, then
|
||||
``@deprecated`` would mess up ``__init__`` inheritance when installing its
|
||||
own (deprecation-emitting) ``C.__init__``).
|
||||
When deprecating a class `C` intended to be used as a base class in a
|
||||
multiple inheritance hierarchy, `C` *must* define an `__init__` method
|
||||
(if `C` instead inherited its `__init__` from its own base class, then
|
||||
`@deprecated` would mess up `__init__` inheritance when installing its
|
||||
own (deprecation-emitting) `C.__init__`).
|
||||
|
||||
Parameters are the same as for `warn_deprecated`, except that *obj_type*
|
||||
defaults to 'class' if decorating a class, 'attribute' if decorating a
|
||||
property, and 'function' otherwise.
|
||||
|
||||
Args:
|
||||
since : str
|
||||
since:
|
||||
The release at which this API became deprecated.
|
||||
message : str, optional
|
||||
message:
|
||||
Override the default deprecation message. The %(since)s,
|
||||
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
|
||||
and %(removal)s format specifiers will be replaced by the
|
||||
values of the respective arguments passed to this function.
|
||||
name : str, optional
|
||||
name:
|
||||
The name of the deprecated object.
|
||||
alternative : str, optional
|
||||
alternative:
|
||||
An alternative API that the user may use in place of the
|
||||
deprecated API. The deprecation warning will tell the user
|
||||
about this alternative if provided.
|
||||
alternative_import: str, optional
|
||||
alternative_import:
|
||||
An alternative import that the user may use instead.
|
||||
pending : bool, optional
|
||||
If True, uses a PendingDeprecationWarning instead of a
|
||||
pending:
|
||||
If `True`, uses a `PendingDeprecationWarning` instead of a
|
||||
DeprecationWarning. Cannot be used together with removal.
|
||||
obj_type : str, optional
|
||||
obj_type:
|
||||
The object type being deprecated.
|
||||
addendum : str, optional
|
||||
addendum:
|
||||
Additional text appended directly to the final message.
|
||||
removal : str, optional
|
||||
removal:
|
||||
The expected removal version. With the default (an empty
|
||||
string), a removal version is automatically computed from
|
||||
since. Set to other Falsy values to not schedule a removal
|
||||
date. Cannot be used together with pending.
|
||||
package: str, optional
|
||||
package:
|
||||
The package of the deprecated object.
|
||||
|
||||
Returns:
|
||||
A decorator to mark a function or class as deprecated.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@deprecated("1.4.0")
|
||||
def the_function_to_deprecate():
|
||||
pass
|
||||
|
||||
```python
|
||||
@deprecated("1.4.0")
|
||||
def the_function_to_deprecate():
|
||||
pass
|
||||
```
|
||||
"""
|
||||
_validate_deprecation_params(
|
||||
removal, alternative, alternative_import, pending=pending
|
||||
@@ -372,7 +369,7 @@ def deprecated(
|
||||
components = [
|
||||
_message,
|
||||
f"Use {_alternative} instead." if _alternative else "",
|
||||
f"Use ``{_alternative_import}`` instead." if _alternative_import else "",
|
||||
f"Use `{_alternative_import}` instead." if _alternative_import else "",
|
||||
_addendum,
|
||||
]
|
||||
details = " ".join([component.strip() for component in components if component])
|
||||
@@ -440,7 +437,7 @@ def warn_deprecated(
|
||||
alternative_import:
|
||||
An alternative import that the user may use instead.
|
||||
pending:
|
||||
If True, uses a PendingDeprecationWarning instead of a
|
||||
If `True`, uses a `PendingDeprecationWarning` instead of a
|
||||
DeprecationWarning. Cannot be used together with removal.
|
||||
obj_type:
|
||||
The object type being deprecated.
|
||||
@@ -550,12 +547,10 @@ def rename_parameter(
|
||||
A decorator indicating that a parameter was renamed.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@_api.rename_parameter("3.1", "bad_name", "good_name")
|
||||
def func(good_name): ...
|
||||
|
||||
```python
|
||||
@_api.rename_parameter("3.1", "bad_name", "good_name")
|
||||
def func(good_name): ...
|
||||
```
|
||||
"""
|
||||
|
||||
def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]:
|
||||
|
||||
@@ -13,7 +13,7 @@ def import_attr(
|
||||
|
||||
Args:
|
||||
attr_name: The name of the attribute to import.
|
||||
module_name: The name of the module to import from. If None, the attribute
|
||||
module_name: The name of the module to import from. If `None`, the attribute
|
||||
is imported from the package itself.
|
||||
package: The name of the package where the module is located.
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
"""Schema definitions for representing agent actions, observations, and return values.
|
||||
|
||||
**ATTENTION** The schema definitions are provided for backwards compatibility.
|
||||
!!! warning
|
||||
The schema definitions are provided for backwards compatibility.
|
||||
|
||||
!!! important
|
||||
!!! warning
|
||||
New agents should be built using the
|
||||
[langgraph library](https://github.com/langchain-ai/langgraph), which provides a
|
||||
simpler and more flexible way to define agents.
|
||||
@@ -16,10 +17,10 @@ Agents use language models to choose a sequence of actions to take.
|
||||
A basic agent works in the following manner:
|
||||
|
||||
1. Given a prompt an agent uses an LLM to request an action to take
|
||||
(e.g., a tool to run).
|
||||
(e.g., a tool to run).
|
||||
2. The agent executes the action (e.g., runs the tool), and receives an observation.
|
||||
3. The agent returns the observation to the LLM, which can then be used to generate
|
||||
the next action.
|
||||
the next action.
|
||||
4. When the agent reaches a stopping condition, it returns a final return value.
|
||||
|
||||
The schemas for the agents themselves are defined in langchain.agents.agent.
|
||||
@@ -83,10 +84,10 @@ class AgentAction(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "agent"]``
|
||||
`["langchain", "schema", "agent"]`
|
||||
"""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
@@ -111,7 +112,7 @@ class AgentActionMessageLog(AgentAction):
|
||||
if (tool, tool_input) cannot be used to fully recreate the LLM
|
||||
prediction, and you need that LLM prediction (for future agent iteration).
|
||||
Compared to `log`, this is useful when the underlying LLM is a
|
||||
ChatModel (and therefore returns messages rather than a string)."""
|
||||
chat model (and therefore returns messages rather than a string)."""
|
||||
# Ignoring type because we're overriding the type from AgentAction.
|
||||
# And this is the correct thing to do in this case.
|
||||
# The type literal is used for serialization purposes.
|
||||
@@ -160,10 +161,10 @@ class AgentFinish(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "agent"]``
|
||||
`["langchain", "schema", "agent"]`
|
||||
"""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
|
||||
@@ -1,24 +1,15 @@
|
||||
"""Cache classes.
|
||||
"""`caches` provides an optional caching layer for language models.
|
||||
|
||||
!!! warning
|
||||
Beta Feature!
|
||||
This is a beta feature! Please be wary of deploying experimental code to production
|
||||
unless you've taken appropriate precautions.
|
||||
|
||||
**Cache** provides an optional caching layer for LLMs.
|
||||
A cache is useful for two reasons:
|
||||
|
||||
Cache is useful for two reasons:
|
||||
|
||||
- It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
- It can speed up your application by reducing the number of API calls you make
|
||||
to the LLM provider.
|
||||
|
||||
Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
|
||||
1. It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
2. It can speed up your application by reducing the number of API calls you make to the
|
||||
LLM provider.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -40,8 +31,8 @@ class BaseCache(ABC):
|
||||
|
||||
The cache interface consists of the following methods:
|
||||
|
||||
- lookup: Look up a value based on a prompt and llm_string.
|
||||
- update: Update the cache based on a prompt and llm_string.
|
||||
- lookup: Look up a value based on a prompt and `llm_string`.
|
||||
- update: Update the cache based on a prompt and `llm_string`.
|
||||
- clear: Clear the cache.
|
||||
|
||||
In addition, the cache interface provides an async version of each method.
|
||||
@@ -53,14 +44,14 @@ class BaseCache(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -69,27 +60,27 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the lookup method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
|
||||
@@ -98,14 +89,14 @@ class BaseCache(ABC):
|
||||
"""Clear cache that can take additional keyword arguments."""
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -114,29 +105,29 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.lookup, prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the look up method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.update, prompt, llm_string, return_val)
|
||||
@@ -154,12 +145,11 @@ class InMemoryCache(BaseCache):
|
||||
|
||||
Args:
|
||||
maxsize: The maximum number of items to store in the cache.
|
||||
If None, the cache has no maximum size.
|
||||
If `None`, the cache has no maximum size.
|
||||
If the cache exceeds the maximum size, the oldest items are removed.
|
||||
Default is None.
|
||||
|
||||
Raises:
|
||||
ValueError: If maxsize is less than or equal to 0.
|
||||
ValueError: If `maxsize` is less than or equal to `0`.
|
||||
"""
|
||||
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
if maxsize is not None and maxsize <= 0:
|
||||
@@ -168,28 +158,28 @@ class InMemoryCache(BaseCache):
|
||||
self._maxsize = maxsize
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self._cache.get((prompt, llm_string), None)
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
if self._maxsize is not None and len(self._cache) == self._maxsize:
|
||||
@@ -202,30 +192,30 @@ class InMemoryCache(BaseCache):
|
||||
self._cache = {}
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self.lookup(prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
self.update(prompt, llm_string, return_val)
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
"""**Callback handlers** allow listening to events in LangChain."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -35,10 +35,10 @@ class RetrieverManagerMixin:
|
||||
"""Run when Retriever errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_retriever_end(
|
||||
@@ -52,10 +52,10 @@ class RetrieverManagerMixin:
|
||||
"""Run when Retriever ends running.
|
||||
|
||||
Args:
|
||||
documents (Sequence[Document]): The documents retrieved.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
documents: The documents retrieved.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -76,12 +76,11 @@ class LLMManagerMixin:
|
||||
For both chat models and non-chat models (legacy LLMs).
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
chunk: The new generated chunk, containing content and other information.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_end(
|
||||
@@ -95,10 +94,10 @@ class LLMManagerMixin:
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
response: The response which was generated.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_error(
|
||||
@@ -112,10 +111,10 @@ class LLMManagerMixin:
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -133,10 +132,10 @@ class ChainManagerMixin:
|
||||
"""Run when chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_error(
|
||||
@@ -150,10 +149,10 @@ class ChainManagerMixin:
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_action(
|
||||
@@ -167,10 +166,10 @@ class ChainManagerMixin:
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_finish(
|
||||
@@ -184,10 +183,10 @@ class ChainManagerMixin:
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -205,10 +204,10 @@ class ToolManagerMixin:
|
||||
"""Run when the tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_error(
|
||||
@@ -222,10 +221,10 @@ class ToolManagerMixin:
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -248,16 +247,16 @@ class CallbackManagerMixin:
|
||||
!!! warning
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
`on_chat_model_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chat_model_start(
|
||||
@@ -275,16 +274,16 @@ class CallbackManagerMixin:
|
||||
|
||||
!!! warning
|
||||
This method is called for chat models. If you're implementing a handler for
|
||||
a non-chat model, you should use ``on_llm_start`` instead.
|
||||
a non-chat model, you should use `on_llm_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
messages (list[list[BaseMessage]]): The messages.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chat model.
|
||||
messages: The messages.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
# NotImplementedError is thrown intentionally
|
||||
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||
@@ -305,13 +304,13 @@ class CallbackManagerMixin:
|
||||
"""Run when the Retriever starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized Retriever.
|
||||
query (str): The query.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized Retriever.
|
||||
query: The query.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_start(
|
||||
@@ -328,13 +327,13 @@ class CallbackManagerMixin:
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_start(
|
||||
@@ -352,14 +351,14 @@ class CallbackManagerMixin:
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
inputs (Optional[dict[str, Any]]): The inputs.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
input_str: The input string.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
inputs: The inputs.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -377,10 +376,10 @@ class RunManagerMixin:
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
text: The text.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_retry(
|
||||
@@ -394,10 +393,10 @@ class RunManagerMixin:
|
||||
"""Run on a retry event.
|
||||
|
||||
Args:
|
||||
retry_state (RetryCallState): The retry state.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
retry_state: The retry state.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_custom_event(
|
||||
@@ -415,7 +414,7 @@ class RunManagerMixin:
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
@@ -497,16 +496,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
!!! warning
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
`on_chat_model_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chat_model_start(
|
||||
@@ -524,16 +523,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
!!! warning
|
||||
This method is called for chat models. If you're implementing a handler for
|
||||
a non-chat model, you should use ``on_llm_start`` instead.
|
||||
a non-chat model, you should use `on_llm_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
messages (list[list[BaseMessage]]): The messages.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chat model.
|
||||
messages: The messages.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
# NotImplementedError is thrown intentionally
|
||||
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||
@@ -555,13 +554,12 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
For both chat models and non-chat models (legacy LLMs).
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
chunk: The new generated chunk, containing content and other information.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_llm_end(
|
||||
@@ -576,11 +574,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the model ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
response: The response which was generated.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_llm_error(
|
||||
@@ -599,7 +597,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
- response (LLMResult): The response which was generated before
|
||||
the error occurred.
|
||||
"""
|
||||
@@ -618,13 +616,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chain_end(
|
||||
@@ -639,11 +637,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chain_error(
|
||||
@@ -658,11 +656,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_start(
|
||||
@@ -680,14 +678,14 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
inputs (Optional[dict[str, Any]]): The inputs.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized tool.
|
||||
input_str: The input string.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
inputs: The inputs.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_end(
|
||||
@@ -702,11 +700,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_error(
|
||||
@@ -721,11 +719,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_text(
|
||||
@@ -740,11 +738,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
text: The text.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retry(
|
||||
@@ -758,10 +756,10 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on a retry event.
|
||||
|
||||
Args:
|
||||
retry_state (RetryCallState): The retry state.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
retry_state: The retry state.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_agent_action(
|
||||
@@ -776,11 +774,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_agent_finish(
|
||||
@@ -795,11 +793,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_start(
|
||||
@@ -816,13 +814,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the retriever start.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized retriever.
|
||||
query (str): The query.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
metadata (Optional[dict[str, Any]]): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_end(
|
||||
@@ -837,11 +835,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the retriever end.
|
||||
|
||||
Args:
|
||||
documents (Sequence[Document]): The documents retrieved.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
documents: The documents retrieved.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_error(
|
||||
@@ -856,11 +854,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on retriever error.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_custom_event(
|
||||
@@ -878,7 +876,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
@@ -906,16 +904,13 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Initialize callback manager.
|
||||
|
||||
Args:
|
||||
handlers (list[BaseCallbackHandler]): The handlers.
|
||||
inheritable_handlers (Optional[list[BaseCallbackHandler]]):
|
||||
The inheritable handlers. Default is None.
|
||||
parent_run_id (Optional[UUID]): The parent run ID. Default is None.
|
||||
tags (Optional[list[str]]): The tags. Default is None.
|
||||
inheritable_tags (Optional[list[str]]): The inheritable tags.
|
||||
Default is None.
|
||||
metadata (Optional[dict[str, Any]]): The metadata. Default is None.
|
||||
inheritable_metadata (Optional[dict[str, Any]]): The inheritable metadata.
|
||||
Default is None.
|
||||
handlers: The handlers.
|
||||
inheritable_handlers: The inheritable handlers.
|
||||
parent_run_id: The parent run ID.
|
||||
tags: The tags.
|
||||
inheritable_tags: The inheritable tags.
|
||||
metadata: The metadata.
|
||||
inheritable_metadata: The inheritable metadata.
|
||||
"""
|
||||
self.handlers: list[BaseCallbackHandler] = handlers
|
||||
self.inheritable_handlers: list[BaseCallbackHandler] = (
|
||||
@@ -946,35 +941,29 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
within merge_configs.
|
||||
|
||||
Returns:
|
||||
BaseCallbackManager: The merged callback manager of the same type
|
||||
as the current object.
|
||||
The merged callback manager of the same type as the current object.
|
||||
|
||||
Example: Merging two callback managers.
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.callbacks.manager import (
|
||||
CallbackManager,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
CallbackManager,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
|
||||
manager = CallbackManager(
|
||||
handlers=[StdOutCallbackHandler()], tags=["tag2"]
|
||||
)
|
||||
with trace_as_chain_group(
|
||||
"My Group Name", tags=["tag1"]
|
||||
) as group_manager:
|
||||
merged_manager = group_manager.merge(manager)
|
||||
print(merged_manager.handlers)
|
||||
# [
|
||||
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
|
||||
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
|
||||
# ]
|
||||
|
||||
print(merged_manager.tags)
|
||||
# ['tag2', 'tag1']
|
||||
manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"])
|
||||
with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager:
|
||||
merged_manager = group_manager.merge(manager)
|
||||
print(merged_manager.handlers)
|
||||
# [
|
||||
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
|
||||
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
|
||||
# ]
|
||||
|
||||
print(merged_manager.tags)
|
||||
# ['tag2', 'tag1']
|
||||
```
|
||||
""" # noqa: E501
|
||||
manager = self.__class__(
|
||||
parent_run_id=self.parent_run_id or other.parent_run_id,
|
||||
@@ -1011,8 +1000,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add a handler to the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to add.
|
||||
inherit (bool): Whether to inherit the handler. Default is True.
|
||||
handler: The handler to add.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
if handler not in self.handlers:
|
||||
self.handlers.append(handler)
|
||||
@@ -1023,7 +1012,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove a handler from the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to remove.
|
||||
handler: The handler to remove.
|
||||
"""
|
||||
if handler in self.handlers:
|
||||
self.handlers.remove(handler)
|
||||
@@ -1038,8 +1027,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Set handlers as the only handlers on the callback manager.
|
||||
|
||||
Args:
|
||||
handlers (list[BaseCallbackHandler]): The handlers to set.
|
||||
inherit (bool): Whether to inherit the handlers. Default is True.
|
||||
handlers: The handlers to set.
|
||||
inherit: Whether to inherit the handlers.
|
||||
"""
|
||||
self.handlers = []
|
||||
self.inheritable_handlers = []
|
||||
@@ -1054,8 +1043,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Set handler as the only handler on the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to set.
|
||||
inherit (bool): Whether to inherit the handler. Default is True.
|
||||
handler: The handler to set.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
self.set_handlers([handler], inherit=inherit)
|
||||
|
||||
@@ -1067,8 +1056,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add tags to the callback manager.
|
||||
|
||||
Args:
|
||||
tags (list[str]): The tags to add.
|
||||
inherit (bool): Whether to inherit the tags. Default is True.
|
||||
tags: The tags to add.
|
||||
inherit: Whether to inherit the tags.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1081,7 +1070,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove tags from the callback manager.
|
||||
|
||||
Args:
|
||||
tags (list[str]): The tags to remove.
|
||||
tags: The tags to remove.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1097,8 +1086,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add metadata to the callback manager.
|
||||
|
||||
Args:
|
||||
metadata (dict[str, Any]): The metadata to add.
|
||||
inherit (bool): Whether to inherit the metadata. Default is True.
|
||||
metadata: The metadata to add.
|
||||
inherit: Whether to inherit the metadata.
|
||||
"""
|
||||
self.metadata.update(metadata)
|
||||
if inherit:
|
||||
@@ -1108,7 +1097,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove metadata from the callback manager.
|
||||
|
||||
Args:
|
||||
keys (list[str]): The keys to remove.
|
||||
keys: The keys to remove.
|
||||
"""
|
||||
for key in keys:
|
||||
self.metadata.pop(key, None)
|
||||
|
||||
@@ -27,32 +27,32 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Examples:
|
||||
Using as a context manager (recommended):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with FileCallbackHandler("output.txt") as handler:
|
||||
# Use handler with your chain/agent
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
```python
|
||||
with FileCallbackHandler("output.txt") as handler:
|
||||
# Use handler with your chain/agent
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
```
|
||||
|
||||
Direct instantiation (deprecated):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
handler = FileCallbackHandler("output.txt")
|
||||
# File remains open until handler is garbage collected
|
||||
try:
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
finally:
|
||||
handler.close() # Explicit cleanup recommended
|
||||
```python
|
||||
handler = FileCallbackHandler("output.txt")
|
||||
# File remains open until handler is garbage collected
|
||||
try:
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
finally:
|
||||
handler.close() # Explicit cleanup recommended
|
||||
```
|
||||
|
||||
Args:
|
||||
filename: The file path to write to.
|
||||
mode: The file open mode. Defaults to ``'a'`` (append).
|
||||
color: Default color for text output. Defaults to ``None``.
|
||||
mode: The file open mode. Defaults to `'a'` (append).
|
||||
color: Default color for text output.
|
||||
|
||||
!!! note
|
||||
When not used as a context manager, a deprecation warning will be issued
|
||||
on first use. The file will be opened immediately in ``__init__`` and closed
|
||||
in ``__del__`` or when ``close()`` is called explicitly.
|
||||
on first use. The file will be opened immediately in `__init__` and closed
|
||||
in `__del__` or when `close()` is called explicitly.
|
||||
|
||||
"""
|
||||
|
||||
@@ -63,8 +63,8 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
filename: Path to the output file.
|
||||
mode: File open mode (e.g., ``'w'``, ``'a'``, ``'x'``). Defaults to ``'a'``.
|
||||
color: Default text color for output. Defaults to ``None``.
|
||||
mode: File open mode (e.g., `'w'`, `'a'`, `'x'`). Defaults to `'a'`.
|
||||
color: Default text color for output.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
@@ -84,7 +84,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
The FileCallbackHandler instance.
|
||||
|
||||
!!! note
|
||||
The file is already opened in ``__init__``, so this just marks that
|
||||
The file is already opened in `__init__`, so this just marks that
|
||||
the handler is being used as a context manager.
|
||||
|
||||
"""
|
||||
@@ -131,9 +131,9 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
text: The text to write to the file.
|
||||
color: Optional color for the text. Defaults to ``self.color``.
|
||||
end: String appended after the text. Defaults to ``""``.
|
||||
file: Optional file to write to. Defaults to ``self.file``.
|
||||
color: Optional color for the text. Defaults to `self.color`.
|
||||
end: String appended after the text.
|
||||
file: Optional file to write to. Defaults to `self.file`.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the file is closed or not available.
|
||||
@@ -167,7 +167,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
serialized: The serialized chain information.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments that may contain ``'name'``.
|
||||
**kwargs: Additional keyword arguments that may contain `'name'`.
|
||||
|
||||
"""
|
||||
name = (
|
||||
@@ -196,8 +196,8 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
action: The agent action containing the log to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
color: Color override for this specific output. If `None`, uses
|
||||
`self.color`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
"""
|
||||
@@ -216,8 +216,8 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
output: The tool output to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
color: Color override for this specific output. If `None`, uses
|
||||
`self.color`.
|
||||
observation_prefix: Optional prefix to write before the output.
|
||||
llm_prefix: Optional prefix to write after the output.
|
||||
**kwargs: Additional keyword arguments.
|
||||
@@ -237,9 +237,9 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
text: The text to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
end: String appended after the text. Defaults to ``""``.
|
||||
color: Color override for this specific output. If `None`, uses
|
||||
`self.color`.
|
||||
end: String appended after the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
"""
|
||||
@@ -253,8 +253,8 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
finish: The agent finish object containing the log to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
color: Color override for this specific output. If `None`, uses
|
||||
`self.color`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
"""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,7 +20,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Initialize callback handler.
|
||||
|
||||
Args:
|
||||
color: The color to use for the text. Defaults to None.
|
||||
color: The color to use for the text.
|
||||
"""
|
||||
self.color = color
|
||||
|
||||
@@ -31,9 +31,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we are entering a chain.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs to the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
if "name" in kwargs:
|
||||
name = kwargs["name"]
|
||||
@@ -48,8 +48,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we finished a chain.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
|
||||
|
||||
@@ -60,9 +60,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
color (Optional[str]): The color to use for the text. Defaults to None.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(action.log, color=color or self.color)
|
||||
|
||||
@@ -78,12 +78,11 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""If not the final action, print out observation.
|
||||
|
||||
Args:
|
||||
output (Any): The output to print.
|
||||
color (Optional[str]): The color to use for the text. Defaults to None.
|
||||
observation_prefix (Optional[str]): The observation prefix.
|
||||
Defaults to None.
|
||||
llm_prefix (Optional[str]): The LLM prefix. Defaults to None.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
output: The output to print.
|
||||
color: The color to use for the text.
|
||||
observation_prefix: The observation prefix.
|
||||
llm_prefix: The LLM prefix.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
output = str(output)
|
||||
if observation_prefix is not None:
|
||||
@@ -103,10 +102,10 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the agent ends.
|
||||
|
||||
Args:
|
||||
text (str): The text to print.
|
||||
color (Optional[str]): The color to use for the text. Defaults to None.
|
||||
end (str): The end character to use. Defaults to "".
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
text: The text to print.
|
||||
color: The color to use for the text.
|
||||
end: The end character to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(text, color=color or self.color, end=end)
|
||||
|
||||
@@ -117,8 +116,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
color (Optional[str]): The color to use for the text. Defaults to None.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(finish.log, color=color or self.color, end="\n")
|
||||
|
||||
@@ -24,9 +24,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts to run.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts to run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chat_model_start(
|
||||
@@ -38,9 +38,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
messages (list[list[BaseMessage]]): The messages to run.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
messages: The messages to run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
@override
|
||||
@@ -48,8 +48,8 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on new LLM token. Only available when streaming is enabled.
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
sys.stdout.write(token)
|
||||
sys.stdout.flush()
|
||||
@@ -58,16 +58,16 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response from the LLM.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
response: The response from the LLM.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_start(
|
||||
@@ -76,25 +76,25 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs to the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when a chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_start(
|
||||
@@ -103,47 +103,47 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized tool.
|
||||
input_str: The input string.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text to print.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
text: The text to print.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
@@ -19,30 +19,29 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback Handler that tracks AIMessage.usage_metadata.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
callback = UsageMetadataCallbackHandler()
|
||||
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
||||
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
||||
callback.usage_metadata
|
||||
|
||||
.. code-block::
|
||||
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
callback = UsageMetadataCallbackHandler()
|
||||
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
||||
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
||||
callback.usage_metadata
|
||||
```
|
||||
```txt
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.49"
|
||||
|
||||
@@ -96,40 +95,44 @@ def get_usage_metadata_callback(
|
||||
"""Get usage metadata callback.
|
||||
|
||||
Get context manager for tracking usage metadata across chat model calls using
|
||||
``AIMessage.usage_metadata``.
|
||||
`AIMessage.usage_metadata`.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context variable. Defaults to
|
||||
``'usage_metadata_callback'``.
|
||||
name: The name of the context variable.
|
||||
|
||||
Yields:
|
||||
The usage metadata callback.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import get_usage_metadata_callback
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import get_usage_metadata_callback
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
with get_usage_metadata_callback() as cb:
|
||||
llm_1.invoke("Hello")
|
||||
llm_2.invoke("Hello")
|
||||
print(cb.usage_metadata)
|
||||
|
||||
.. code-block::
|
||||
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
with get_usage_metadata_callback() as cb:
|
||||
llm_1.invoke("Hello")
|
||||
llm_2.invoke("Hello")
|
||||
print(cb.usage_metadata)
|
||||
```
|
||||
```txt
|
||||
{
|
||||
"gpt-4o-mini-2024-07-18": {
|
||||
"input_tokens": 8,
|
||||
"output_tokens": 10,
|
||||
"total_tokens": 18,
|
||||
"input_token_details": {"audio": 0, "cache_read": 0},
|
||||
"output_token_details": {"audio": 0, "reasoning": 0},
|
||||
},
|
||||
"claude-3-5-haiku-20241022": {
|
||||
"input_tokens": 8,
|
||||
"output_tokens": 21,
|
||||
"total_tokens": 29,
|
||||
"input_token_details": {"cache_read": 0, "cache_creation": 0},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.49"
|
||||
|
||||
|
||||
@@ -1,18 +1,4 @@
|
||||
"""**Chat message history** stores a history of the message interactions in a chat.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AIMessage, HumanMessage, BaseMessage
|
||||
|
||||
""" # noqa: E501
|
||||
"""**Chat message history** stores a history of the message interactions in a chat."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -22,7 +8,9 @@ from typing import TYPE_CHECKING
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
get_buffer_string,
|
||||
)
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
@@ -61,46 +49,45 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
Example: Shows a default implementation.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import json
|
||||
import os
|
||||
from langchain_core.messages import messages_from_dict, message_to_dict
|
||||
```python
|
||||
import json
|
||||
import os
|
||||
from langchain_core.messages import messages_from_dict, message_to_dict
|
||||
|
||||
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
storage_path: str
|
||||
session_id: str
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
storage_path: str
|
||||
session_id: str
|
||||
|
||||
@property
|
||||
def messages(self) -> list[BaseMessage]:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(self.storage_path, self.session_id),
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
messages_data = json.load(f)
|
||||
return messages_from_dict(messages_data)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
@property
|
||||
def messages(self) -> list[BaseMessage]:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(self.storage_path, self.session_id),
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
messages_data = json.load(f)
|
||||
return messages_from_dict(messages_data)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(serialized, f)
|
||||
|
||||
def clear(self) -> None:
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump([], f)
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(serialized, f)
|
||||
|
||||
def clear(self) -> None:
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump([], f)
|
||||
```
|
||||
"""
|
||||
|
||||
messages: list[BaseMessage]
|
||||
@@ -124,6 +111,40 @@ class BaseChatMessageHistory(ABC):
|
||||
"""
|
||||
return await run_in_executor(None, lambda: self.messages)
|
||||
|
||||
def add_user_message(self, message: HumanMessage | str) -> None:
|
||||
"""Convenience method for adding a human message string to the store.
|
||||
|
||||
!!! note
|
||||
This is a convenience method. Code should favor the bulk `add_messages`
|
||||
interface instead to save on round-trips to the persistence layer.
|
||||
|
||||
This method may be deprecated in a future release.
|
||||
|
||||
Args:
|
||||
message: The human message to add to the store.
|
||||
"""
|
||||
if isinstance(message, HumanMessage):
|
||||
self.add_message(message)
|
||||
else:
|
||||
self.add_message(HumanMessage(content=message))
|
||||
|
||||
def add_ai_message(self, message: AIMessage | str) -> None:
|
||||
"""Convenience method for adding an AI message string to the store.
|
||||
|
||||
!!! note
|
||||
This is a convenience method. Code should favor the bulk `add_messages`
|
||||
interface instead to save on round-trips to the persistence layer.
|
||||
|
||||
This method may be deprecated in a future release.
|
||||
|
||||
Args:
|
||||
message: The AI message to add.
|
||||
"""
|
||||
if isinstance(message, AIMessage):
|
||||
self.add_message(message)
|
||||
else:
|
||||
self.add_message(AIMessage(content=message))
|
||||
|
||||
def add_message(self, message: BaseMessage) -> None:
|
||||
"""Add a Message object to the store.
|
||||
|
||||
@@ -132,7 +153,7 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the sub-class has not implemented an efficient
|
||||
add_messages method.
|
||||
`add_messages` method.
|
||||
"""
|
||||
if type(self).add_messages != BaseChatMessageHistory.add_messages:
|
||||
# This means that the sub-class has implemented an efficient add_messages
|
||||
|
||||
@@ -35,38 +35,38 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Sub-classes should not implement this method directly. Instead, they
|
||||
# should implement the lazy load method.
|
||||
def load(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return list(self.lazy_load())
|
||||
|
||||
async def aload(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return [document async for document in self.alazy_load()]
|
||||
|
||||
def load_and_split(
|
||||
self, text_splitter: TextSplitter | None = None
|
||||
) -> list[Document]:
|
||||
"""Load Documents and split into chunks. Chunks are returned as Documents.
|
||||
"""Load Documents and split into chunks. Chunks are returned as `Document`.
|
||||
|
||||
Do not override this method. It should be considered to be deprecated!
|
||||
|
||||
Args:
|
||||
text_splitter: TextSplitter instance to use for splitting documents.
|
||||
Defaults to RecursiveCharacterTextSplitter.
|
||||
text_splitter: `TextSplitter` instance to use for splitting documents.
|
||||
Defaults to `RecursiveCharacterTextSplitter`.
|
||||
|
||||
Raises:
|
||||
ImportError: If langchain-text-splitters is not installed
|
||||
and no text_splitter is provided.
|
||||
ImportError: If `langchain-text-splitters` is not installed
|
||||
and no `text_splitter` is provided.
|
||||
|
||||
Returns:
|
||||
List of Documents.
|
||||
List of `Document`.
|
||||
"""
|
||||
if text_splitter is None:
|
||||
if not _HAS_TEXT_SPLITTERS:
|
||||
@@ -86,10 +86,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Attention: This method will be upgraded into an abstractmethod once it's
|
||||
# implemented in all the existing subclasses.
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
if type(self).load != BaseLoader.load:
|
||||
return iter(self.load())
|
||||
@@ -97,10 +97,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
async def alazy_load(self) -> AsyncIterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
iterator = await run_in_executor(None, self.lazy_load)
|
||||
done = object()
|
||||
@@ -115,7 +115,7 @@ class BaseBlobParser(ABC):
|
||||
"""Abstract interface for blob parsers.
|
||||
|
||||
A blob parser provides a way to parse raw data stored in a blob into one
|
||||
or more documents.
|
||||
or more `Document` objects.
|
||||
|
||||
The parser can be composed with blob loaders, making it easy to reuse
|
||||
a parser independent of how the blob was originally loaded.
|
||||
@@ -128,25 +128,25 @@ class BaseBlobParser(ABC):
|
||||
Subclasses are required to implement this method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
Generator of documents
|
||||
Generator of `Document` objects
|
||||
"""
|
||||
|
||||
def parse(self, blob: Blob) -> list[Document]:
|
||||
"""Eagerly parse the blob into a document or documents.
|
||||
"""Eagerly parse the blob into a `Document` or `Document` objects.
|
||||
|
||||
This is a convenience method for interactive development environment.
|
||||
|
||||
Production applications should favor the lazy_parse method instead.
|
||||
Production applications should favor the `lazy_parse` method instead.
|
||||
|
||||
Subclasses should generally not over-ride this parse method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
List of documents
|
||||
List of `Document` objects
|
||||
"""
|
||||
return list(self.lazy_parse(blob))
|
||||
|
||||
@@ -22,22 +22,22 @@ class LangSmithLoader(BaseLoader):
|
||||
|
||||
??? note "Lazy load"
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.document_loaders import LangSmithLoader
|
||||
|
||||
from langchain_core.document_loaders import LangSmithLoader
|
||||
loader = LangSmithLoader(dataset_id="...", limit=100)
|
||||
docs = []
|
||||
for doc in loader.lazy_load():
|
||||
docs.append(doc)
|
||||
```
|
||||
|
||||
loader = LangSmithLoader(dataset_id="...", limit=100)
|
||||
docs = []
|
||||
for doc in loader.lazy_load():
|
||||
docs.append(doc)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
```python
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.34"
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -60,15 +60,15 @@ class LangSmithLoader(BaseLoader):
|
||||
"""Create a LangSmith loader.
|
||||
|
||||
Args:
|
||||
dataset_id: The ID of the dataset to filter by. Defaults to None.
|
||||
dataset_name: The name of the dataset to filter by. Defaults to None.
|
||||
content_key: The inputs key to set as Document page content. ``'.'`` characters
|
||||
are interpreted as nested keys. E.g. ``content_key="first.second"`` will
|
||||
dataset_id: The ID of the dataset to filter by.
|
||||
dataset_name: The name of the dataset to filter by.
|
||||
content_key: The inputs key to set as Document page content. `'.'` characters
|
||||
are interpreted as nested keys. E.g. `content_key="first.second"` will
|
||||
result in
|
||||
``Document(page_content=format_content(example.inputs["first"]["second"]))``
|
||||
`Document(page_content=format_content(example.inputs["first"]["second"]))`
|
||||
format_content: Function for converting the content extracted from the example
|
||||
inputs into a string. Defaults to JSON-encoding the contents.
|
||||
example_ids: The IDs of the examples to filter by. Defaults to None.
|
||||
example_ids: The IDs of the examples to filter by.
|
||||
as_of: The dataset version tag OR
|
||||
timestamp to retrieve the examples as of.
|
||||
Response examples will only be those that were present at the time
|
||||
@@ -76,17 +76,17 @@ class LangSmithLoader(BaseLoader):
|
||||
splits: A list of dataset splits, which are
|
||||
divisions of your dataset such as 'train', 'test', or 'validation'.
|
||||
Returns examples only from the specified splits.
|
||||
inline_s3_urls: Whether to inline S3 URLs. Defaults to True.
|
||||
offset: The offset to start from. Defaults to 0.
|
||||
inline_s3_urls: Whether to inline S3 URLs.
|
||||
offset: The offset to start from.
|
||||
limit: The maximum number of examples to return.
|
||||
metadata: Metadata to filter by. Defaults to None.
|
||||
metadata: Metadata to filter by.
|
||||
filter: A structured filter string to apply to the examples.
|
||||
client: LangSmith Client. If not provided will be initialized from below args.
|
||||
client_kwargs: Keyword args to pass to LangSmith client init. Should only be
|
||||
specified if ``client`` isn't.
|
||||
specified if `client` isn't.
|
||||
|
||||
Raises:
|
||||
ValueError: If both ``client`` and ``client_kwargs`` are provided.
|
||||
ValueError: If both `client` and `client_kwargs` are provided.
|
||||
""" # noqa: E501
|
||||
if client and client_kwargs:
|
||||
raise ValueError
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
**Document** module is a collection of classes that handle documents
|
||||
and their transformations.
|
||||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -57,52 +57,51 @@ class Blob(BaseMedia):
|
||||
|
||||
Example: Initialize a blob from in-memory data
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
|
||||
Example: Load from memory and specify mime-type and metadata
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
```
|
||||
|
||||
Example: Load the blob from a file
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
"""
|
||||
|
||||
data: bytes | str | None = None
|
||||
@@ -112,7 +111,7 @@ class Blob(BaseMedia):
|
||||
encoding: str = "utf-8"
|
||||
"""Encoding to use if decoding the bytes into a string.
|
||||
|
||||
Use utf-8 as default encoding, if decoding to string.
|
||||
Use `utf-8` as default encoding, if decoding to string.
|
||||
"""
|
||||
path: PathLike | None = None
|
||||
"""Location where the original content was found."""
|
||||
@@ -128,7 +127,7 @@ class Blob(BaseMedia):
|
||||
|
||||
If a path is associated with the blob, it will default to the path location.
|
||||
|
||||
Unless explicitly set via a metadata field called "source", in which
|
||||
Unless explicitly set via a metadata field called `"source"`, in which
|
||||
case that value will be used instead.
|
||||
"""
|
||||
if self.metadata and "source" in self.metadata:
|
||||
@@ -212,11 +211,11 @@ class Blob(BaseMedia):
|
||||
"""Load the blob from a path like object.
|
||||
|
||||
Args:
|
||||
path: path like object to file to be read
|
||||
path: Path-like object to file to be read
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
guess_type: If True, the mimetype will be guessed from the file extension,
|
||||
if a mime-type was not provided
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
guess_type: If `True`, the mimetype will be guessed from the file extension,
|
||||
if a mime-type was not provided
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -249,10 +248,10 @@ class Blob(BaseMedia):
|
||||
"""Initialize the blob from in-memory data.
|
||||
|
||||
Args:
|
||||
data: the in-memory data associated with the blob
|
||||
data: The in-memory data associated with the blob
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
path: if provided, will be set as the source from which the data came
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
path: If provided, will be set as the source from which the data came
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -278,15 +277,13 @@ class Document(BaseMedia):
|
||||
"""Class for storing a piece of text and associated metadata.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
document = Document(
|
||||
page_content="Hello, world!", metadata={"source": "https://example.com"}
|
||||
)
|
||||
|
||||
document = Document(
|
||||
page_content="Hello, world!", metadata={"source": "https://example.com"}
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
page_content: str
|
||||
@@ -306,7 +303,7 @@ class Document(BaseMedia):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
["langchain", "schema", "document"]
|
||||
|
||||
@@ -20,35 +20,34 @@ class BaseDocumentTransformer(ABC):
|
||||
sequence of transformed Documents.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
|
||||
embeddings: Embeddings
|
||||
similarity_fn: Callable = cosine_similarity
|
||||
similarity_threshold: float = 0.95
|
||||
|
||||
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
|
||||
embeddings: Embeddings
|
||||
similarity_fn: Callable = cosine_similarity
|
||||
similarity_threshold: float = 0.95
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
stateful_documents = get_stateful_documents(documents)
|
||||
embedded_documents = _get_embeddings_from_stateful_docs(
|
||||
self.embeddings, stateful_documents
|
||||
)
|
||||
included_idxs = _filter_similar_embeddings(
|
||||
embedded_documents,
|
||||
self.similarity_fn,
|
||||
self.similarity_threshold,
|
||||
)
|
||||
return [stateful_documents[i] for i in sorted(included_idxs)]
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
stateful_documents = get_stateful_documents(documents)
|
||||
embedded_documents = _get_embeddings_from_stateful_docs(
|
||||
self.embeddings, stateful_documents
|
||||
)
|
||||
included_idxs = _filter_similar_embeddings(
|
||||
embedded_documents,
|
||||
self.similarity_fn,
|
||||
self.similarity_threshold,
|
||||
)
|
||||
return [stateful_documents[i] for i in sorted(included_idxs)]
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
```
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -18,40 +18,38 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
This embedding model creates embeddings by sampling from a normal distribution.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.embeddings import FakeEmbeddings
|
||||
|
||||
from langchain_core.embeddings import FakeEmbeddings
|
||||
|
||||
embed = FakeEmbeddings(size=100)
|
||||
embed = FakeEmbeddings(size=100)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
```
|
||||
```python
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```
|
||||
|
||||
Embed multiple texts:
|
||||
.. code-block:: python
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
```
|
||||
```python
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
```
|
||||
"""
|
||||
|
||||
size: int
|
||||
@@ -75,40 +73,38 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
This embedding model creates embeddings by sampling from a normal distribution
|
||||
with a seed based on the hash of the text.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
|
||||
embed = DeterministicFakeEmbedding(size=100)
|
||||
embed = DeterministicFakeEmbedding(size=100)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
```
|
||||
```python
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```
|
||||
|
||||
Embed multiple texts:
|
||||
.. code-block:: python
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
```
|
||||
```python
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
```
|
||||
"""
|
||||
|
||||
size: int
|
||||
|
||||
@@ -154,7 +154,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -198,7 +198,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -285,9 +285,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -333,9 +332,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
|
||||
@@ -16,7 +16,7 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
"""Exception that output parsers should raise to signify a parsing error.
|
||||
|
||||
This exists to differentiate parsing errors from other code or execution errors
|
||||
that also may arise inside the output parser. OutputParserExceptions will be
|
||||
that also may arise inside the output parser. `OutputParserException` will be
|
||||
available to catch and handle in ways to fix the parsing error, while other
|
||||
errors will be raised.
|
||||
"""
|
||||
@@ -28,24 +28,23 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
llm_output: str | None = None,
|
||||
send_to_llm: bool = False, # noqa: FBT001,FBT002
|
||||
):
|
||||
"""Create an OutputParserException.
|
||||
"""Create an `OutputParserException`.
|
||||
|
||||
Args:
|
||||
error: The error that's being re-raised or an error message.
|
||||
observation: String explanation of error which can be passed to a
|
||||
model to try and remediate the issue. Defaults to None.
|
||||
model to try and remediate the issue.
|
||||
llm_output: String model output which is error-ing.
|
||||
Defaults to None.
|
||||
|
||||
send_to_llm: Whether to send the observation and llm_output back to an Agent
|
||||
after an OutputParserException has been raised.
|
||||
after an `OutputParserException` has been raised.
|
||||
This gives the underlying model driving the agent the context that the
|
||||
previous output was improperly structured, in the hopes that it will
|
||||
update the output to the correct format.
|
||||
Defaults to False.
|
||||
|
||||
Raises:
|
||||
ValueError: If ``send_to_llm`` is True but either observation or
|
||||
``llm_output`` are not provided.
|
||||
ValueError: If `send_to_llm` is True but either observation or
|
||||
`llm_output` are not provided.
|
||||
"""
|
||||
if isinstance(error, str):
|
||||
error = create_message(
|
||||
|
||||
@@ -299,9 +299,9 @@ def index(
|
||||
are not able to specify the uid of the document.
|
||||
|
||||
!!! warning "Behavior changed in 0.3.25"
|
||||
Added ``scoped_full`` cleanup mode.
|
||||
Added `scoped_full` cleanup mode.
|
||||
|
||||
!!! important
|
||||
!!! warning
|
||||
|
||||
* In full mode, the loader should be returning
|
||||
the entire dataset, and not just a subset of the dataset.
|
||||
@@ -315,7 +315,7 @@ def index(
|
||||
chunks, and we index them using a batch size of 5, we'll have 3 batches
|
||||
all with the same source id. In general, to avoid doing too much
|
||||
redundant work select as big a batch size as possible.
|
||||
* The ``scoped_full`` mode is suitable if determining an appropriate batch size
|
||||
* The `scoped_full` mode is suitable if determining an appropriate batch size
|
||||
is challenging or if your data loader cannot return the entire dataset at
|
||||
once. This mode keeps track of source IDs in memory, which should be fine
|
||||
for most use cases. If your dataset is large (10M+ docs), you will likely
|
||||
@@ -326,8 +326,8 @@ def index(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -342,15 +342,12 @@ def index(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
@@ -381,8 +378,8 @@ def index(
|
||||
ValueError: If vectorstore does not have
|
||||
"delete" and "add_documents" required methods.
|
||||
ValueError: If source_id_key is not None, but is not a string or callable.
|
||||
TypeError: If ``vectorstore`` is not a VectorStore or a DocumentIndex.
|
||||
AssertionError: If ``source_id`` is None when cleanup mode is incremental.
|
||||
TypeError: If `vectorstore` is not a VectorStore or a DocumentIndex.
|
||||
AssertionError: If `source_id` is None when cleanup mode is incremental.
|
||||
(should be unreachable code).
|
||||
"""
|
||||
# Behavior is deprecated, but we keep it for backwards compatibility.
|
||||
@@ -640,9 +637,9 @@ async def aindex(
|
||||
are not able to specify the uid of the document.
|
||||
|
||||
!!! warning "Behavior changed in 0.3.25"
|
||||
Added ``scoped_full`` cleanup mode.
|
||||
Added `scoped_full` cleanup mode.
|
||||
|
||||
!!! important
|
||||
!!! warning
|
||||
|
||||
* In full mode, the loader should be returning
|
||||
the entire dataset, and not just a subset of the dataset.
|
||||
@@ -656,7 +653,7 @@ async def aindex(
|
||||
chunks, and we index them using a batch size of 5, we'll have 3 batches
|
||||
all with the same source id. In general, to avoid doing too much
|
||||
redundant work select as big a batch size as possible.
|
||||
* The ``scoped_full`` mode is suitable if determining an appropriate batch size
|
||||
* The `scoped_full` mode is suitable if determining an appropriate batch size
|
||||
is challenging or if your data loader cannot return the entire dataset at
|
||||
once. This mode keeps track of source IDs in memory, which should be fine
|
||||
for most use cases. If your dataset is large (10M+ docs), you will likely
|
||||
@@ -667,8 +664,8 @@ async def aindex(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -683,15 +680,12 @@ async def aindex(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
@@ -722,9 +716,9 @@ async def aindex(
|
||||
ValueError: If vectorstore does not have
|
||||
"adelete" and "aadd_documents" required methods.
|
||||
ValueError: If source_id_key is not None, but is not a string or callable.
|
||||
TypeError: If ``vector_store`` is not a VectorStore or DocumentIndex.
|
||||
AssertionError: If ``source_id_key`` is None when cleanup mode is
|
||||
incremental or ``scoped_full`` (should be unreachable).
|
||||
TypeError: If `vector_store` is not a VectorStore or DocumentIndex.
|
||||
AssertionError: If `source_id_key` is None when cleanup mode is
|
||||
incremental or `scoped_full` (should be unreachable).
|
||||
"""
|
||||
# Behavior is deprecated, but we keep it for backwards compatibility.
|
||||
# # Warn only once per process.
|
||||
|
||||
@@ -61,7 +61,7 @@ class RecordManager(ABC):
|
||||
"""Initialize the record manager.
|
||||
|
||||
Args:
|
||||
namespace (str): The namespace for the record manager.
|
||||
namespace: The namespace for the record manager.
|
||||
"""
|
||||
self.namespace = namespace
|
||||
|
||||
@@ -244,7 +244,7 @@ class InMemoryRecordManager(RecordManager):
|
||||
"""Initialize the in-memory record manager.
|
||||
|
||||
Args:
|
||||
namespace (str): The namespace for the record manager.
|
||||
namespace: The namespace for the record manager.
|
||||
"""
|
||||
super().__init__(namespace)
|
||||
# Each key points to a dictionary
|
||||
@@ -278,10 +278,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to None.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to None.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -315,10 +315,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to None.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to None.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -361,13 +361,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to None.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to None.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to None.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to None.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -397,13 +397,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to None.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to None.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to None.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to None.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -522,14 +522,14 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
When an ID is specified and the content already exists in the vectorstore,
|
||||
the upsert method should update the content with the new data. If the content
|
||||
does not exist, the upsert method should add the item to the vectorstore.
|
||||
does not exist, the upsert method should add the item to the `VectorStore`.
|
||||
|
||||
Args:
|
||||
items: Sequence of documents to add to the vectorstore.
|
||||
items: Sequence of documents to add to the `VectorStore`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
UpsertResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully added or updated in the vectorstore and the list of IDs that
|
||||
failed to be added or updated.
|
||||
"""
|
||||
@@ -545,14 +545,14 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
When an ID is specified and the item already exists in the vectorstore,
|
||||
the upsert method should update the item with the new data. If the item
|
||||
does not exist, the upsert method should add the item to the vectorstore.
|
||||
does not exist, the upsert method should add the item to the `VectorStore`.
|
||||
|
||||
Args:
|
||||
items: Sequence of documents to add to the vectorstore.
|
||||
items: Sequence of documents to add to the `VectorStore`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
UpsertResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully added or updated in the vectorstore and the list of IDs that
|
||||
failed to be added or updated.
|
||||
"""
|
||||
@@ -571,12 +571,12 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of ids to delete.
|
||||
kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
**kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
For example, can include an option to delete the entire index,
|
||||
or else issue a non-blocking delete etc.
|
||||
|
||||
Returns:
|
||||
DeleteResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully deleted and the list of IDs that failed to be deleted.
|
||||
"""
|
||||
|
||||
@@ -589,11 +589,11 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of ids to delete.
|
||||
kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
**kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
For example, can include an option to delete the entire index.
|
||||
|
||||
Returns:
|
||||
DeleteResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully deleted and the list of IDs that failed to be deleted.
|
||||
"""
|
||||
return await run_in_executor(
|
||||
@@ -624,10 +624,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of IDs to get.
|
||||
kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
**kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
|
||||
Returns:
|
||||
list[Document]: List of documents that were found.
|
||||
List of documents that were found.
|
||||
"""
|
||||
|
||||
async def aget(
|
||||
@@ -650,10 +650,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of IDs to get.
|
||||
kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
**kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
|
||||
Returns:
|
||||
list[Document]: List of documents that were found.
|
||||
List of documents that were found.
|
||||
"""
|
||||
return await run_in_executor(
|
||||
None,
|
||||
|
||||
@@ -1,45 +1,29 @@
|
||||
"""Language models.
|
||||
|
||||
**Language Model** is a type of model that can generate text or complete
|
||||
text prompts.
|
||||
LangChain has two main classes to work with language models: chat models and
|
||||
"old-fashioned" LLMs.
|
||||
|
||||
LangChain has two main classes to work with language models: **Chat Models**
|
||||
and "old-fashioned" **LLMs**.
|
||||
|
||||
**Chat Models**
|
||||
**Chat models**
|
||||
|
||||
Language models that use a sequence of messages as inputs and return chat messages
|
||||
as outputs (as opposed to using plain text). These are traditionally newer models (
|
||||
older models are generally LLMs, see below). Chat models support the assignment of
|
||||
as outputs (as opposed to using plain text). Chat models support the assignment of
|
||||
distinct roles to conversation messages, helping to distinguish messages from the AI,
|
||||
users, and instructions such as system messages.
|
||||
|
||||
The key abstraction for chat models is `BaseChatModel`. Implementations
|
||||
should inherit from this class. Please see LangChain how-to guides with more
|
||||
information on how to implement a custom chat model.
|
||||
should inherit from this class.
|
||||
|
||||
To implement a custom Chat Model, inherit from `BaseChatModel`. See
|
||||
the following guide for more information on how to implement a custom Chat Model:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_chat_model/
|
||||
See existing [chat model integrations](https://docs.langchain.com/oss/python/integrations/chat).
|
||||
|
||||
**LLMs**
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are Chat Models,
|
||||
see below).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers
|
||||
also allow these models to take messages as input. This gives them the same interface
|
||||
as Chat Models. When messages are passed in as input, they will be formatted into a
|
||||
string under the hood before being passed to the underlying model.
|
||||
|
||||
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
|
||||
Please see the following guide for more information on how to implement a custom LLM:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_llm/
|
||||
|
||||
These are traditionally older models (newer models generally are chat models).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also
|
||||
allow these models to take messages as input. This gives them the same interface as
|
||||
chat models. When messages are passed in as input, they will be formatted into a string
|
||||
under the hood before being passed to the underlying model.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -19,7 +19,7 @@ def is_openai_data_block(
|
||||
) -> bool:
|
||||
"""Check whether a block contains multimodal data in OpenAI Chat Completions format.
|
||||
|
||||
Supports both data and ID-style blocks (e.g. ``'file_data'`` and ``'file_id'``)
|
||||
Supports both data and ID-style blocks (e.g. `'file_data'` and `'file_id'`)
|
||||
|
||||
If additional keys are present, they are ignored / will not affect outcome as long
|
||||
as the required keys are present and valid.
|
||||
@@ -30,12 +30,12 @@ def is_openai_data_block(
|
||||
- "image": Only match image_url blocks
|
||||
- "audio": Only match input_audio blocks
|
||||
- "file": Only match file blocks
|
||||
If None, match any valid OpenAI data block type. Note that this means that
|
||||
If `None`, match any valid OpenAI data block type. Note that this means that
|
||||
if the block has a valid OpenAI data type but the filter_ is set to a
|
||||
different type, this function will return False.
|
||||
|
||||
Returns:
|
||||
True if the block is a valid OpenAI data block and matches the filter_
|
||||
`True` if the block is a valid OpenAI data block and matches the filter_
|
||||
(if provided).
|
||||
|
||||
"""
|
||||
@@ -89,21 +89,20 @@ class ParsedDataUri(TypedDict):
|
||||
def _parse_data_uri(uri: str) -> ParsedDataUri | None:
|
||||
"""Parse a data URI into its components.
|
||||
|
||||
If parsing fails, return None. If either MIME type or data is missing, return None.
|
||||
If parsing fails, return `None`. If either MIME type or data is missing, return
|
||||
`None`.
|
||||
|
||||
Example:
|
||||
```python
|
||||
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
|
||||
parsed = _parse_data_uri(data_uri)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
|
||||
parsed = _parse_data_uri(data_uri)
|
||||
|
||||
assert parsed == {
|
||||
"source_type": "base64",
|
||||
"mime_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
|
||||
assert parsed == {
|
||||
"source_type": "base64",
|
||||
"mime_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
```
|
||||
"""
|
||||
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
|
||||
match = re.match(regex, uri)
|
||||
@@ -133,8 +132,8 @@ def _normalize_messages(
|
||||
- LangChain v1 standard content blocks
|
||||
|
||||
This function extends support to:
|
||||
- `Audio <https://platform.openai.com/docs/api-reference/chat/create>`__ and
|
||||
`file <https://platform.openai.com/docs/api-reference/files>`__ data in OpenAI
|
||||
- `[Audio](https://platform.openai.com/docs/api-reference/chat/create) and
|
||||
`[file](https://platform.openai.com/docs/api-reference/files) data in OpenAI
|
||||
Chat Completions format
|
||||
- Images are technically supported but we expect chat models to handle them
|
||||
directly; this may change in the future
|
||||
@@ -148,50 +147,50 @@ def _normalize_messages(
|
||||
|
||||
??? note "v0 Content Block Schemas"
|
||||
|
||||
``URLContentBlock``:
|
||||
`URLContentBlock`:
|
||||
|
||||
.. codeblock::
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['url'],
|
||||
url: str,
|
||||
}
|
||||
```
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['url'],
|
||||
url: str,
|
||||
}
|
||||
`Base64ContentBlock`:
|
||||
|
||||
``Base64ContentBlock``:
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['base64'],
|
||||
data: str,
|
||||
}
|
||||
```
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['base64'],
|
||||
data: str,
|
||||
}
|
||||
|
||||
``IDContentBlock``:
|
||||
`IDContentBlock`:
|
||||
|
||||
(In practice, this was never used)
|
||||
|
||||
.. codeblock::
|
||||
```python
|
||||
{
|
||||
type: Literal["image", "audio", "file"],
|
||||
source_type: Literal["id"],
|
||||
id: str,
|
||||
}
|
||||
```
|
||||
|
||||
{
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['id'],
|
||||
id: str,
|
||||
}
|
||||
`PlainTextContentBlock`:
|
||||
|
||||
``PlainTextContentBlock``:
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['file'],
|
||||
source_type: Literal['text'],
|
||||
url: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['file'],
|
||||
source_type: Literal['text'],
|
||||
url: str,
|
||||
}
|
||||
```
|
||||
|
||||
If a v1 message is passed in, it will be returned as-is, meaning it is safe to
|
||||
always pass in v1 messages to this function for assurance.
|
||||
@@ -222,7 +221,7 @@ def _normalize_messages(
|
||||
"type": Literal['file'],
|
||||
"file": Union[
|
||||
{
|
||||
"filename": Optional[str] = "$FILENAME",
|
||||
"filename": str | None = "$FILENAME",
|
||||
"file_data": str = "$BASE64_ENCODED_FILE",
|
||||
},
|
||||
{
|
||||
|
||||
@@ -96,9 +96,16 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
||||
|
||||
|
||||
LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
|
||||
"""Input to a language model."""
|
||||
|
||||
LanguageModelOutput = BaseMessage | str
|
||||
"""Output from a language model."""
|
||||
|
||||
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
||||
"""Input/output interface for a language model."""
|
||||
|
||||
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
|
||||
"""Type variable for the output of a language model."""
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
@@ -110,20 +117,19 @@ class BaseLanguageModel(
|
||||
):
|
||||
"""Abstract base class for interfacing with language models.
|
||||
|
||||
All language model wrappers inherited from ``BaseLanguageModel``.
|
||||
All language model wrappers inherited from `BaseLanguageModel`.
|
||||
|
||||
"""
|
||||
|
||||
cache: BaseCache | bool | None = Field(default=None, exclude=True)
|
||||
"""Whether to cache the response.
|
||||
|
||||
* If true, will use the global cache.
|
||||
* If false, will not use a cache
|
||||
* If None, will use the global cache if it's set, otherwise no cache.
|
||||
* If instance of ``BaseCache``, will use the provided cache.
|
||||
* If `True`, will use the global cache.
|
||||
* If `False`, will not use a cache
|
||||
* If `None`, will use the global cache if it's set, otherwise no cache.
|
||||
* If instance of `BaseCache`, will use the provided cache.
|
||||
|
||||
Caching is not currently supported for streaming methods of models.
|
||||
|
||||
"""
|
||||
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
|
||||
"""Whether to print out response text."""
|
||||
@@ -144,9 +150,9 @@ class BaseLanguageModel(
|
||||
|
||||
@field_validator("verbose", mode="before")
|
||||
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
|
||||
"""If verbose is None, set it.
|
||||
"""If verbose is `None`, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
This allows users to pass in `None` as verbose to access the global setting.
|
||||
|
||||
Args:
|
||||
verbose: The verbosity setting to use.
|
||||
@@ -162,7 +168,7 @@ class BaseLanguageModel(
|
||||
@property
|
||||
@override
|
||||
def InputType(self) -> TypeAlias:
|
||||
"""Get the input type for this runnable."""
|
||||
"""Get the input type for this `Runnable`."""
|
||||
# This is a version of LanguageModelInput which replaces the abstract
|
||||
# base class BaseMessage with a union of its subclasses, which makes
|
||||
# for a much better schema.
|
||||
@@ -186,22 +192,22 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -223,22 +229,22 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An ``LLMResult``, which contains a list of candidate Generations for each
|
||||
input prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -263,8 +269,7 @@ class BaseLanguageModel(
|
||||
|
||||
Returns:
|
||||
A list of ids corresponding to the tokens in the text, in order they occur
|
||||
in the text.
|
||||
|
||||
in the text.
|
||||
"""
|
||||
if self.custom_get_token_ids is not None:
|
||||
return self.custom_get_token_ids(text)
|
||||
@@ -294,13 +299,13 @@ class BaseLanguageModel(
|
||||
Useful for checking if an input fits in a model's context window.
|
||||
|
||||
!!! note
|
||||
The base implementation of ``get_num_tokens_from_messages`` ignores tool
|
||||
The base implementation of `get_num_tokens_from_messages` ignores tool
|
||||
schemas.
|
||||
|
||||
Args:
|
||||
messages: The message inputs to tokenize.
|
||||
tools: If provided, sequence of dict, ``BaseModel``, function, or
|
||||
``BaseTools`` to be converted to tool schemas.
|
||||
tools: If provided, sequence of dict, `BaseModel`, function, or
|
||||
`BaseTool` objects to be converted to tool schemas.
|
||||
|
||||
Returns:
|
||||
The sum of the number of tokens across the messages.
|
||||
|
||||
@@ -108,11 +108,11 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
|
||||
|
||||
|
||||
def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
|
||||
"""Format messages for tracing in ``on_chat_model_start``.
|
||||
"""Format messages for tracing in `on_chat_model_start`.
|
||||
|
||||
- Update image content blocks to OpenAI Chat Completions format (backward
|
||||
compatibility).
|
||||
- Add ``type`` key to content blocks that have a single key.
|
||||
- Add `type` key to content blocks that have a single key.
|
||||
|
||||
Args:
|
||||
messages: List of messages to format.
|
||||
@@ -179,13 +179,13 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
|
||||
"""Generate from a stream.
|
||||
|
||||
Args:
|
||||
stream: Iterator of ``ChatGenerationChunk``.
|
||||
stream: Iterator of `ChatGenerationChunk`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no generations are found in the stream.
|
||||
|
||||
Returns:
|
||||
ChatResult: Chat result.
|
||||
Chat result.
|
||||
|
||||
"""
|
||||
generation = next(stream, None)
|
||||
@@ -210,10 +210,10 @@ async def agenerate_from_stream(
|
||||
"""Async generate from a stream.
|
||||
|
||||
Args:
|
||||
stream: Iterator of ``ChatGenerationChunk``.
|
||||
stream: Iterator of `ChatGenerationChunk`.
|
||||
|
||||
Returns:
|
||||
ChatResult: Chat result.
|
||||
Chat result.
|
||||
|
||||
"""
|
||||
chunks = [chunk async for chunk in stream]
|
||||
@@ -240,79 +240,52 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
"""Base class for chat models.
|
||||
r"""Base class for chat models.
|
||||
|
||||
Key imperative methods:
|
||||
Methods that actually call the underlying model.
|
||||
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| Method | Input | Output | Description |
|
||||
+===========================+================================================================+=====================================================================+==================================================================================================+
|
||||
| `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
|
||||
|
||||
This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
|
||||
| Method | Input | Output | Description |
|
||||
| ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
|
||||
| `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
|
||||
| `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
|
||||
| `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
|
||||
| `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
|
||||
| `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
|
||||
| `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
| `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
|
||||
Key declarative methods:
|
||||
Methods for creating another Runnable using the ChatModel.
|
||||
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Method | Description |
|
||||
+==================================+===========================================================================================================+
|
||||
| `bind_tools` | Create ChatModel that can call tools. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
Methods for creating another `Runnable` using the chat model.
|
||||
|
||||
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
|
||||
|
||||
| Method | Description |
|
||||
| ---------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| `bind_tools` | Create chat model that can call tools. |
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
|
||||
|
||||
Creating custom chat model:
|
||||
Custom chat model implementations should inherit from this class.
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| Method/Property | Description | Required/Optional |
|
||||
+==================================+====================================================================+===================+
|
||||
| Method/Property | Description | Required |
|
||||
| -------------------------------- | ------------------------------------------------------------------ | ----------------- |
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_stream` | Use to implement streaming | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
|
||||
Follow the guide for more information on how to implement a custom Chat Model:
|
||||
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
@@ -322,39 +295,39 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
disable_streaming: bool | Literal["tool_calling"] = False
|
||||
"""Whether to disable streaming for this model.
|
||||
|
||||
If streaming is bypassed, then ``stream()``/``astream()``/``astream_events()`` will
|
||||
defer to ``invoke()``/``ainvoke()``.
|
||||
If streaming is bypassed, then `stream`/`astream`/`astream_events` will
|
||||
defer to `invoke`/`ainvoke`.
|
||||
|
||||
- If True, will always bypass streaming case.
|
||||
- If ``'tool_calling'``, will bypass streaming case only when the model is called
|
||||
with a ``tools`` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (``invoke()``) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
- If False (default), will always use streaming case if available.
|
||||
- If `True`, will always bypass streaming case.
|
||||
- If `'tool_calling'`, will bypass streaming case only when the model is called
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
- If `False` (Default), will always use streaming case if available.
|
||||
|
||||
The main reason for this flag is that code might be written using ``stream()`` and
|
||||
The main reason for this flag is that code might be written using `stream` and
|
||||
a user may want to swap out a given model for another model whose the implementation
|
||||
does not properly support streaming.
|
||||
|
||||
"""
|
||||
|
||||
output_version: str | None = Field(
|
||||
default_factory=from_env("LC_OUTPUT_VERSION", default=None)
|
||||
)
|
||||
"""Version of ``AIMessage`` output format to store in message content.
|
||||
"""Version of `AIMessage` output format to store in message content.
|
||||
|
||||
``AIMessage.content_blocks`` will lazily parse the contents of ``content`` into a
|
||||
`AIMessage.content_blocks` will lazily parse the contents of `content` into a
|
||||
standard format. This flag can be used to additionally store the standard format
|
||||
in message content, e.g., for serialization purposes.
|
||||
|
||||
Supported values:
|
||||
|
||||
- ``"v0"``: provider-specific format in content (can lazily-parse with
|
||||
``.content_blocks``)
|
||||
- ``"v1"``: standardized format in content (consistent with ``.content_blocks``)
|
||||
- `'v0'`: provider-specific format in content (can lazily-parse with
|
||||
`content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `content_blocks`)
|
||||
|
||||
Partner packages (e.g., ``langchain-openai``) can also use this field to roll out
|
||||
new content formats in a backward-compatible way.
|
||||
Partner packages (e.g.,
|
||||
[`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
|
||||
field to roll out new content formats in a backward-compatible way.
|
||||
|
||||
!!! version-added "Added in version 1.0"
|
||||
|
||||
@@ -373,7 +346,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> Any:
|
||||
"""Get the output type for this runnable."""
|
||||
"""Get the output type for this `Runnable`."""
|
||||
return AnyMessage
|
||||
|
||||
def _convert_input(self, model_input: LanguageModelInput) -> PromptValue:
|
||||
@@ -471,8 +444,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
if "stream" in kwargs:
|
||||
return kwargs["stream"]
|
||||
|
||||
if getattr(self, "streaming", False):
|
||||
return True
|
||||
if "streaming" in self.model_fields_set:
|
||||
streaming_value = getattr(self, "streaming", None)
|
||||
if isinstance(streaming_value, bool):
|
||||
return streaming_value
|
||||
|
||||
# Check if any streaming callback handlers have been passed in.
|
||||
handlers = run_manager.handlers if run_manager else []
|
||||
@@ -863,13 +838,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
messages: List of list of messages.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: The tags to apply.
|
||||
metadata: The metadata to apply.
|
||||
@@ -879,8 +854,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
@@ -981,13 +956,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
messages: List of list of messages.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: The tags to apply.
|
||||
metadata: The metadata to apply.
|
||||
@@ -997,8 +972,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
@@ -1529,123 +1504,130 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
|
||||
- an OpenAI function/tool schema,
|
||||
- a JSON Schema,
|
||||
- a TypedDict class,
|
||||
- a `TypedDict` class,
|
||||
- or a Pydantic class.
|
||||
|
||||
If ``schema`` is a Pydantic class then the model output will be a
|
||||
If `schema` is a Pydantic class then the model output will be a
|
||||
Pydantic instance of that class, and the model-generated fields will be
|
||||
validated by the Pydantic class. Otherwise the model output will be a
|
||||
dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
|
||||
for more on how to properly specify types and descriptions of
|
||||
schema fields when specifying a Pydantic or TypedDict class.
|
||||
dict and will not be validated.
|
||||
|
||||
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
|
||||
more on how to properly specify types and descriptions of schema fields
|
||||
when specifying a Pydantic or `TypedDict` class.
|
||||
|
||||
include_raw:
|
||||
If False then only the parsed structured output is returned. If
|
||||
an error occurs during model output parsing it will be raised. If True
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
If `False` then only the parsed structured output is returned. If
|
||||
an error occurs during model output parsing it will be raised. If `True`
|
||||
then both the raw model response (a `BaseMessage`) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
|
||||
will be caught and returned as well.
|
||||
|
||||
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
|
||||
`'parsing_error'`.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are any unsupported ``kwargs``.
|
||||
ValueError: If there are any unsupported `kwargs`.
|
||||
NotImplementedError: If the model does not implement
|
||||
``with_structured_output()``.
|
||||
`with_structured_output()`.
|
||||
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
|
||||
A `Runnable` that takes same inputs as a
|
||||
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
|
||||
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
|
||||
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
|
||||
`False` then `Runnable` outputs a `dict`.
|
||||
|
||||
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
|
||||
an instance of ``schema`` (i.e., a Pydantic object).
|
||||
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
|
||||
|
||||
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||
- `'raw'`: `BaseMessage`
|
||||
- `'parsed'`: `None` if there was a parsing error, otherwise the type
|
||||
depends on the `schema` as described above.
|
||||
- `'parsing_error'`: `BaseException | None`
|
||||
|
||||
If ``include_raw`` is True, then Runnable outputs a dict with keys:
|
||||
Example: Pydantic schema (`include_raw=False`):
|
||||
|
||||
- ``'raw'``: BaseMessage
|
||||
- ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- ``'parsing_error'``: Optional[BaseException]
|
||||
|
||||
Example: Pydantic schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(AnswerWithJustification)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
```
|
||||
|
||||
Example: Pydantic schema (include_raw=True):
|
||||
.. code-block:: python
|
||||
Example: Pydantic schema (`include_raw=True`):
|
||||
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
```
|
||||
|
||||
Example: Dict schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
Example: `dict` schema (`include_raw=False`):
|
||||
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(dict_schema)
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(dict_schema)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.2.26"
|
||||
Added support for TypedDict class.
|
||||
Added support for TypedDict class.
|
||||
|
||||
""" # noqa: E501
|
||||
_ = kwargs.pop("method", None)
|
||||
@@ -1692,7 +1674,7 @@ class SimpleChatModel(BaseChatModel):
|
||||
|
||||
!!! note
|
||||
This implementation is primarily here for backwards compatibility. For new
|
||||
implementations, please use ``BaseChatModel`` directly.
|
||||
implementations, please use `BaseChatModel` directly.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
|
||||
|
||||
|
||||
class FakeMessagesListChatModel(BaseChatModel):
|
||||
"""Fake ``ChatModel`` for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[BaseMessage]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
@@ -57,7 +57,7 @@ class FakeListChatModelError(Exception):
|
||||
|
||||
|
||||
class FakeListChatModel(SimpleChatModel):
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[str]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
@@ -228,10 +228,10 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
"""Generic fake chat model that can be used to test the chat model interface.
|
||||
|
||||
* Chat model should be usable in both sync and async tests
|
||||
* Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
|
||||
tokens.
|
||||
* Invokes `on_llm_new_token` to allow for testing of callback related code for new
|
||||
tokens.
|
||||
* Includes logic to break messages into message chunk to facilitate testing of
|
||||
streaming.
|
||||
streaming.
|
||||
|
||||
"""
|
||||
|
||||
@@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
to make the interface more generic if needed.
|
||||
|
||||
!!! note
|
||||
if you want to pass a list, you can use ``iter`` to convert it to an iterator.
|
||||
if you want to pass a list, you can use `iter` to convert it to an iterator.
|
||||
|
||||
!!! warning
|
||||
Streaming is not implemented yet. We should try to implement it in the future by
|
||||
|
||||
@@ -74,8 +74,8 @@ def create_base_retry_decorator(
|
||||
|
||||
Args:
|
||||
error_types: List of error types to retry on.
|
||||
max_retries: Number of retries. Default is 1.
|
||||
run_manager: Callback manager for the run. Default is None.
|
||||
max_retries: Number of retries.
|
||||
run_manager: Callback manager for the run.
|
||||
|
||||
Returns:
|
||||
A retry decorator.
|
||||
@@ -91,13 +91,17 @@ def create_base_retry_decorator(
|
||||
if isinstance(run_manager, AsyncCallbackManagerForLLMRun):
|
||||
coro = run_manager.on_retry(retry_state)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
if loop.is_running():
|
||||
# TODO: Fix RUF006 - this task should have a reference
|
||||
# and be awaited somewhere
|
||||
loop.create_task(coro) # noqa: RUF006
|
||||
else:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError:
|
||||
asyncio.run(coro)
|
||||
else:
|
||||
if loop.is_running():
|
||||
# TODO: Fix RUF006 - this task should have a reference
|
||||
# and be awaited somewhere
|
||||
loop.create_task(coro) # noqa: RUF006
|
||||
else:
|
||||
asyncio.run(coro)
|
||||
except Exception as e:
|
||||
_log_error_once(f"Error in on_retry: {e}")
|
||||
else:
|
||||
@@ -153,7 +157,7 @@ def get_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -189,7 +193,7 @@ async def aget_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -299,7 +303,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> type[str]:
|
||||
"""Get the input type for this runnable."""
|
||||
"""Get the input type for this `Runnable`."""
|
||||
return str
|
||||
|
||||
def _convert_input(self, model_input: LanguageModelInput) -> PromptValue:
|
||||
@@ -835,13 +839,13 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of string prompts.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: List of tags to associate with each prompt. If provided, the length
|
||||
of the list must match the length of the prompts list.
|
||||
@@ -857,12 +861,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
|
||||
Raises:
|
||||
ValueError: If prompts is not a list.
|
||||
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
||||
``run_name`` (if provided) does not match the length of prompts.
|
||||
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
"""
|
||||
if not isinstance(prompts, list):
|
||||
msg = (
|
||||
@@ -1105,13 +1109,13 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of string prompts.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: List of tags to associate with each prompt. If provided, the length
|
||||
of the list must match the length of the prompts list.
|
||||
@@ -1126,12 +1130,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Raises:
|
||||
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
||||
``run_name`` (if provided) does not match the length of prompts.
|
||||
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
"""
|
||||
if isinstance(metadata, list):
|
||||
metadata = [
|
||||
@@ -1340,11 +1344,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
ValueError: If the file path is not a string or Path object.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
|
||||
```python
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
```
|
||||
"""
|
||||
# Convert file to Path object.
|
||||
save_path = Path(file_path)
|
||||
|
||||
@@ -42,10 +42,9 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
|
||||
Args:
|
||||
obj: The object to dump.
|
||||
pretty: Whether to pretty print the json. If true, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of kwargs).
|
||||
Default is False.
|
||||
kwargs: Additional arguments to pass to json.dumps
|
||||
pretty: Whether to pretty print the json. If `True`, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of `kwargs`).
|
||||
**kwargs: Additional arguments to pass to `json.dumps`
|
||||
|
||||
Returns:
|
||||
A json string representation of the object.
|
||||
|
||||
@@ -63,16 +63,13 @@ class Reviver:
|
||||
Args:
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to None.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to None.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to True.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to None.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to False.
|
||||
"""
|
||||
self.secrets_from_env = secrets_from_env
|
||||
self.secrets_map = secrets_map or {}
|
||||
@@ -107,7 +104,7 @@ class Reviver:
|
||||
ValueError: If trying to deserialize something that cannot
|
||||
be deserialized in the current version of langchain-core.
|
||||
NotImplementedError: If the object is not implemented and
|
||||
``ignore_unserializable_fields`` is False.
|
||||
`ignore_unserializable_fields` is False.
|
||||
"""
|
||||
if (
|
||||
value.get("lc") == 1
|
||||
@@ -200,16 +197,13 @@ def loads(
|
||||
text: The string to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to None.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to None.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to True.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to None.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to False.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
@@ -245,16 +239,13 @@ def load(
|
||||
obj: The object to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to None.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to None.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to True.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to None.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to False.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
|
||||
@@ -25,16 +25,16 @@ class BaseSerialized(TypedDict):
|
||||
id: list[str]
|
||||
"""The unique identifier of the object."""
|
||||
name: NotRequired[str]
|
||||
"""The name of the object. Optional."""
|
||||
"""The name of the object."""
|
||||
graph: NotRequired[dict[str, Any]]
|
||||
"""The graph of the object. Optional."""
|
||||
"""The graph of the object."""
|
||||
|
||||
|
||||
class SerializedConstructor(BaseSerialized):
|
||||
"""Serialized constructor."""
|
||||
|
||||
type: Literal["constructor"]
|
||||
"""The type of the object. Must be ``'constructor'``."""
|
||||
"""The type of the object. Must be `'constructor'`."""
|
||||
kwargs: dict[str, Any]
|
||||
"""The constructor arguments."""
|
||||
|
||||
@@ -43,16 +43,16 @@ class SerializedSecret(BaseSerialized):
|
||||
"""Serialized secret."""
|
||||
|
||||
type: Literal["secret"]
|
||||
"""The type of the object. Must be ``'secret'``."""
|
||||
"""The type of the object. Must be `'secret'`."""
|
||||
|
||||
|
||||
class SerializedNotImplemented(BaseSerialized):
|
||||
"""Serialized not implemented."""
|
||||
|
||||
type: Literal["not_implemented"]
|
||||
"""The type of the object. Must be ``'not_implemented'``."""
|
||||
"""The type of the object. Must be `'not_implemented'`."""
|
||||
repr: str | None
|
||||
"""The representation of the object. Optional."""
|
||||
"""The representation of the object."""
|
||||
|
||||
|
||||
def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
@@ -61,7 +61,7 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
Args:
|
||||
value: The value.
|
||||
key: The key.
|
||||
model: The pydantic model.
|
||||
model: The Pydantic model.
|
||||
|
||||
Returns:
|
||||
Whether the value is different from the default.
|
||||
@@ -92,19 +92,19 @@ class Serializable(BaseModel, ABC):
|
||||
|
||||
It relies on the following methods and properties:
|
||||
|
||||
- ``is_lc_serializable``: Is this class serializable?
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
- ``get_lc_namespace``: Get the namespace of the langchain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
|
||||
During deserialization an additional mapping is handle
|
||||
classes that have moved or been renamed across package versions.
|
||||
- ``lc_secrets``: A map of constructor argument names to secret ids.
|
||||
- ``lc_attributes``: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
- `is_lc_serializable`: Is this class serializable?
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the LangChain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the `Reviver` class in `langchain_core.load.load` for more details.
|
||||
During deserialization an additional mapping is handle classes that have moved
|
||||
or been renamed across package versions.
|
||||
- `lc_secrets`: A map of constructor argument names to secret ids.
|
||||
- `lc_attributes`: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
"""
|
||||
|
||||
# Remove default BaseModel init docstring.
|
||||
@@ -116,24 +116,24 @@ class Serializable(BaseModel, ABC):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""Is this class serializable?
|
||||
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
|
||||
Returns:
|
||||
Whether the class is serializable. Default is False.
|
||||
Whether the class is serializable. Default is `False`.
|
||||
"""
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
||||
namespace is ["langchain", "llms", "openai"]
|
||||
namespace is `["langchain", "llms", "openai"]`
|
||||
|
||||
Returns:
|
||||
The namespace as a list of strings.
|
||||
The namespace.
|
||||
"""
|
||||
return cls.__module__.split(".")
|
||||
|
||||
@@ -141,8 +141,7 @@ class Serializable(BaseModel, ABC):
|
||||
def lc_secrets(self) -> dict[str, str]:
|
||||
"""A map of constructor argument names to secret ids.
|
||||
|
||||
For example,
|
||||
{"openai_api_key": "OPENAI_API_KEY"}
|
||||
For example, `{"openai_api_key": "OPENAI_API_KEY"}`
|
||||
"""
|
||||
return {}
|
||||
|
||||
@@ -151,6 +150,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""List of attribute names that should be included in the serialized kwargs.
|
||||
|
||||
These attributes must be accepted by the constructor.
|
||||
|
||||
Default is an empty dictionary.
|
||||
"""
|
||||
return {}
|
||||
@@ -161,8 +161,9 @@ class Serializable(BaseModel, ABC):
|
||||
|
||||
The unique identifier is a list of strings that describes the path
|
||||
to the object.
|
||||
|
||||
For example, for the class `langchain.llms.openai.OpenAI`, the id is
|
||||
["langchain", "llms", "openai", "OpenAI"].
|
||||
`["langchain", "llms", "openai", "OpenAI"]`.
|
||||
"""
|
||||
# Pydantic generics change the class name. So we need to do the following
|
||||
if (
|
||||
@@ -193,7 +194,7 @@ class Serializable(BaseModel, ABC):
|
||||
ValueError: If the class has deprecated attributes.
|
||||
|
||||
Returns:
|
||||
A json serializable object or a SerializedNotImplemented object.
|
||||
A json serializable object or a `SerializedNotImplemented` object.
|
||||
"""
|
||||
if not self.is_lc_serializable():
|
||||
return self.to_json_not_implemented()
|
||||
@@ -268,7 +269,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented.
|
||||
`SerializedNotImplemented`.
|
||||
"""
|
||||
return to_json_not_implemented(self)
|
||||
|
||||
@@ -283,8 +284,8 @@ def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool:
|
||||
|
||||
Returns:
|
||||
Whether the field is useful. If the field is required, it is useful.
|
||||
If the field is not required, it is useful if the value is not None.
|
||||
If the field is not required and the value is None, it is useful if the
|
||||
If the field is not required, it is useful if the value is not `None`.
|
||||
If the field is not required and the value is `None`, it is useful if the
|
||||
default value is different from the value.
|
||||
"""
|
||||
field = type(inst).model_fields.get(key)
|
||||
@@ -343,10 +344,10 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Args:
|
||||
obj: object to serialize.
|
||||
obj: Object to serialize.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented
|
||||
`SerializedNotImplemented`
|
||||
"""
|
||||
id_: list[str] = []
|
||||
try:
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
"""**Messages** are objects used in prompts and chat conversations.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
|
||||
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
ChatPromptTemplate
|
||||
|
||||
""" # noqa: E501
|
||||
"""**Messages** are objects used in prompts and chat conversations."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -40,13 +40,13 @@ class InputTokenDetails(TypedDict, total=False):
|
||||
Does *not* need to sum to full input token count. Does *not* need to have all keys.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
}
|
||||
```python
|
||||
{
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.9"
|
||||
|
||||
@@ -76,12 +76,12 @@ class OutputTokenDetails(TypedDict, total=False):
|
||||
Does *not* need to sum to full output token count. Does *not* need to have all keys.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
```python
|
||||
{
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.9"
|
||||
|
||||
@@ -104,25 +104,25 @@ class UsageMetadata(TypedDict):
|
||||
This is a standard representation of token usage that is consistent across models.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
}
|
||||
```python
|
||||
{
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.3.9"
|
||||
Added ``input_token_details`` and ``output_token_details``.
|
||||
Added `input_token_details` and `output_token_details`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -148,27 +148,26 @@ class UsageMetadata(TypedDict):
|
||||
class AIMessage(BaseMessage):
|
||||
"""Message from an AI.
|
||||
|
||||
AIMessage is returned from a chat model as a response to a prompt.
|
||||
An `AIMessage` is returned from a chat model as a response to a prompt.
|
||||
|
||||
This message represents the output of the model and consists of both
|
||||
the raw output as returned by the model together standardized fields
|
||||
the raw output as returned by the model and standardized fields
|
||||
(e.g., tool calls, usage metadata) added by the LangChain framework.
|
||||
|
||||
"""
|
||||
|
||||
tool_calls: list[ToolCall] = []
|
||||
"""If provided, tool calls associated with the message."""
|
||||
"""If present, tool calls associated with the message."""
|
||||
invalid_tool_calls: list[InvalidToolCall] = []
|
||||
"""If provided, tool calls with parsing errors associated with the message."""
|
||||
"""If present, tool calls with parsing errors associated with the message."""
|
||||
usage_metadata: UsageMetadata | None = None
|
||||
"""If provided, usage metadata for a message, such as token counts.
|
||||
"""If present, usage metadata for a message, such as token counts.
|
||||
|
||||
This is a standard representation of token usage that is consistent across models.
|
||||
|
||||
"""
|
||||
|
||||
type: Literal["ai"] = "ai"
|
||||
"""The type of the message (used for deserialization). Defaults to "ai"."""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -191,14 +190,14 @@ class AIMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize ``AIMessage``.
|
||||
"""Initialize an `AIMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The content of the message.
|
||||
content_blocks: Typed standard content.
|
||||
kwargs: Additional arguments to pass to the parent class.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
if content_blocks is not None:
|
||||
# If there are tool calls in content_blocks, but not in tool_calls, add them
|
||||
@@ -217,7 +216,11 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized.
|
||||
|
||||
Includes all attributes, even if they are derived from other initialization
|
||||
arguments.
|
||||
"""
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -225,11 +228,11 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message.
|
||||
"""Return standard, typed `ContentBlock` dicts from the message.
|
||||
|
||||
If the message has a known model provider, use the provider-specific translator
|
||||
first before falling back to best-effort parsing. For details, see the property
|
||||
on ``BaseMessage``.
|
||||
on `BaseMessage`.
|
||||
"""
|
||||
if self.response_metadata.get("output_version") == "v1":
|
||||
return cast("list[types.ContentBlock]", self.content)
|
||||
@@ -331,11 +334,10 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@override
|
||||
def pretty_repr(self, html: bool = False) -> str:
|
||||
"""Return a pretty representation of the message.
|
||||
"""Return a pretty representation of the message for display.
|
||||
|
||||
Args:
|
||||
html: Whether to return an HTML-formatted string.
|
||||
Defaults to False.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -372,31 +374,27 @@ class AIMessage(BaseMessage):
|
||||
|
||||
|
||||
class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
"""Message chunk from an AI."""
|
||||
"""Message chunk from an AI (yielded when streaming)."""
|
||||
|
||||
# Ignoring mypy re-assignment here since we're overriding the value
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for deserialization).
|
||||
|
||||
Defaults to ``AIMessageChunk``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
tool_call_chunks: list[ToolCallChunk] = []
|
||||
"""If provided, tool call chunks associated with the message."""
|
||||
|
||||
chunk_position: Literal["last"] | None = None
|
||||
"""Optional span represented by an aggregated AIMessageChunk.
|
||||
"""Optional span represented by an aggregated `AIMessageChunk`.
|
||||
|
||||
If a chunk with ``chunk_position="last"`` is aggregated into a stream,
|
||||
``tool_call_chunks`` in message content will be parsed into ``tool_calls``.
|
||||
If a chunk with `chunk_position="last"` is aggregated into a stream,
|
||||
`tool_call_chunks` in message content will be parsed into `tool_calls`.
|
||||
"""
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized, even if they are derived from other initialization args.""" # noqa: E501
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -404,7 +402,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message."""
|
||||
"""Return standard, typed `ContentBlock` dicts from the message."""
|
||||
if self.response_metadata.get("output_version") == "v1":
|
||||
return cast("list[types.ContentBlock]", self.content)
|
||||
|
||||
@@ -545,12 +543,15 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
and call_id in id_to_tc
|
||||
):
|
||||
self.content[idx] = cast("dict[str, Any]", id_to_tc[call_id])
|
||||
if "extras" in block:
|
||||
# mypy does not account for instance check for dict above
|
||||
self.content[idx]["extras"] = block["extras"] # type: ignore[index]
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def init_server_tool_calls(self) -> Self:
|
||||
"""Parse server_tool_call_chunks."""
|
||||
"""Parse `server_tool_call_chunks`."""
|
||||
if (
|
||||
self.chunk_position == "last"
|
||||
and self.response_metadata.get("output_version") == "v1"
|
||||
@@ -596,14 +597,14 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
def add_ai_message_chunks(
|
||||
left: AIMessageChunk, *others: AIMessageChunk
|
||||
) -> AIMessageChunk:
|
||||
"""Add multiple ``AIMessageChunk``s together.
|
||||
"""Add multiple `AIMessageChunk`s together.
|
||||
|
||||
Args:
|
||||
left: The first ``AIMessageChunk``.
|
||||
*others: Other ``AIMessageChunk``s to add.
|
||||
left: The first `AIMessageChunk`.
|
||||
*others: Other `AIMessageChunk`s to add.
|
||||
|
||||
Returns:
|
||||
The resulting ``AIMessageChunk``.
|
||||
The resulting `AIMessageChunk`.
|
||||
|
||||
"""
|
||||
content = merge_content(left.content, *(o.content for o in others))
|
||||
@@ -681,43 +682,42 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
|
||||
"""Recursively add two UsageMetadata objects.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages.ai import add_usage
|
||||
|
||||
from langchain_core.messages.ai import add_usage
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=0,
|
||||
total_tokens=5,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=0,
|
||||
output_tokens=10,
|
||||
total_tokens=10,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=0,
|
||||
total_tokens=5,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=0,
|
||||
output_tokens=10,
|
||||
total_tokens=10,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
add_usage(left, right)
|
||||
add_usage(left, right)
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
```python
|
||||
UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
```
|
||||
Args:
|
||||
left: The first ``UsageMetadata`` object.
|
||||
right: The second ``UsageMetadata`` object.
|
||||
left: The first `UsageMetadata` object.
|
||||
right: The second `UsageMetadata` object.
|
||||
|
||||
Returns:
|
||||
The sum of the two ``UsageMetadata`` objects.
|
||||
The sum of the two `UsageMetadata` objects.
|
||||
|
||||
"""
|
||||
if not (left or right):
|
||||
@@ -740,48 +740,47 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
|
||||
def subtract_usage(
|
||||
left: UsageMetadata | None, right: UsageMetadata | None
|
||||
) -> UsageMetadata:
|
||||
"""Recursively subtract two ``UsageMetadata`` objects.
|
||||
"""Recursively subtract two `UsageMetadata` objects.
|
||||
|
||||
Token counts cannot be negative so the actual operation is ``max(left - right, 0)``.
|
||||
Token counts cannot be negative so the actual operation is `max(left - right, 0)`.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages.ai import subtract_usage
|
||||
|
||||
from langchain_core.messages.ai import subtract_usage
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=3,
|
||||
output_tokens=8,
|
||||
total_tokens=11,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=3,
|
||||
output_tokens=8,
|
||||
total_tokens=11,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
subtract_usage(left, right)
|
||||
subtract_usage(left, right)
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
UsageMetadata(
|
||||
input_tokens=2,
|
||||
output_tokens=2,
|
||||
total_tokens=4,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
output_token_details=OutputTokenDetails(reasoning=0),
|
||||
)
|
||||
|
||||
```python
|
||||
UsageMetadata(
|
||||
input_tokens=2,
|
||||
output_tokens=2,
|
||||
total_tokens=4,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
output_token_details=OutputTokenDetails(reasoning=0),
|
||||
)
|
||||
```
|
||||
Args:
|
||||
left: The first ``UsageMetadata`` object.
|
||||
right: The second ``UsageMetadata`` object.
|
||||
left: The first `UsageMetadata` object.
|
||||
right: The second `UsageMetadata` object.
|
||||
|
||||
Returns:
|
||||
The resulting ``UsageMetadata`` after subtraction.
|
||||
The resulting `UsageMetadata` after subtraction.
|
||||
|
||||
"""
|
||||
if not (left or right):
|
||||
|
||||
@@ -48,13 +48,13 @@ class TextAccessor(str):
|
||||
|
||||
Exists to maintain backward compatibility while transitioning from method-based to
|
||||
property-based text access in message objects. In LangChain <v1.0, message text was
|
||||
accessed via ``.text()`` method calls. In v1.0=<, the preferred pattern is property
|
||||
access via ``.text``.
|
||||
accessed via `.text()` method calls. In v1.0=<, the preferred pattern is property
|
||||
access via `.text`.
|
||||
|
||||
Rather than breaking existing code immediately, ``TextAccessor`` allows both
|
||||
Rather than breaking existing code immediately, `TextAccessor` allows both
|
||||
patterns:
|
||||
- Modern property access: ``message.text`` (returns string directly)
|
||||
- Legacy method access: ``message.text()`` (callable, emits deprecation warning)
|
||||
- Modern property access: `message.text` (returns string directly)
|
||||
- Legacy method access: `message.text()` (callable, emits deprecation warning)
|
||||
|
||||
"""
|
||||
|
||||
@@ -67,12 +67,12 @@ class TextAccessor(str):
|
||||
def __call__(self) -> str:
|
||||
"""Enable method-style text access for backward compatibility.
|
||||
|
||||
This method exists solely to support legacy code that calls ``.text()``
|
||||
as a method. New code should use property access (``.text``) instead.
|
||||
This method exists solely to support legacy code that calls `.text()`
|
||||
as a method. New code should use property access (`.text`) instead.
|
||||
|
||||
!!! deprecated
|
||||
As of `langchain-core` 1.0.0, calling ``.text()`` as a method is deprecated.
|
||||
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
|
||||
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
The string content, identical to property access.
|
||||
@@ -92,11 +92,11 @@ class TextAccessor(str):
|
||||
class BaseMessage(Serializable):
|
||||
"""Base abstract message class.
|
||||
|
||||
Messages are the inputs and outputs of a ``ChatModel``.
|
||||
Messages are the inputs and outputs of a chat model.
|
||||
"""
|
||||
|
||||
content: str | list[str | dict]
|
||||
"""The string contents of the message."""
|
||||
"""The contents of the message."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict)
|
||||
"""Reserved for additional payload data associated with the message.
|
||||
@@ -159,14 +159,14 @@ class BaseMessage(Serializable):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize ``BaseMessage``.
|
||||
"""Initialize a `BaseMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
kwargs: Additional arguments to pass to the parent class.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
if content_blocks is not None:
|
||||
super().__init__(content=content_blocks, **kwargs)
|
||||
@@ -175,7 +175,7 @@ class BaseMessage(Serializable):
|
||||
|
||||
@classmethod
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""``BaseMessage`` is serializable.
|
||||
"""`BaseMessage` is serializable.
|
||||
|
||||
Returns:
|
||||
True
|
||||
@@ -184,10 +184,10 @@ class BaseMessage(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "messages"]``
|
||||
`["langchain", "schema", "messages"]`
|
||||
"""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
@@ -259,11 +259,11 @@ class BaseMessage(Serializable):
|
||||
def text(self) -> TextAccessor:
|
||||
"""Get the text content of the message as a string.
|
||||
|
||||
Can be used as both property (``message.text``) and method (``message.text()``).
|
||||
Can be used as both property (`message.text`) and method (`message.text()`).
|
||||
|
||||
!!! deprecated
|
||||
As of langchain-core 1.0.0, calling ``.text()`` as a method is deprecated.
|
||||
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
|
||||
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
The text content of the message.
|
||||
@@ -306,8 +306,8 @@ class BaseMessage(Serializable):
|
||||
"""Get a pretty representation of the message.
|
||||
|
||||
Args:
|
||||
html: Whether to format the message as HTML. If True, the message will be
|
||||
formatted with HTML tags. Default is False.
|
||||
html: Whether to format the message as HTML. If `True`, the message will be
|
||||
formatted with HTML tags.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -331,8 +331,8 @@ def merge_content(
|
||||
"""Merge multiple message contents.
|
||||
|
||||
Args:
|
||||
first_content: The first ``content``. Can be a string or a list.
|
||||
contents: The other ``content``s. Can be a string or a list.
|
||||
first_content: The first `content`. Can be a string or a list.
|
||||
contents: The other `content`s. Can be a string or a list.
|
||||
|
||||
Returns:
|
||||
The merged content.
|
||||
@@ -388,9 +388,9 @@ class BaseMessageChunk(BaseMessage):
|
||||
|
||||
For example,
|
||||
|
||||
``AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")``
|
||||
`AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
|
||||
|
||||
will give ``AIMessageChunk(content="Hello World")``
|
||||
will give `AIMessageChunk(content="Hello World")`
|
||||
|
||||
"""
|
||||
if isinstance(other, BaseMessageChunk):
|
||||
@@ -439,8 +439,8 @@ def message_to_dict(message: BaseMessage) -> dict:
|
||||
message: Message to convert.
|
||||
|
||||
Returns:
|
||||
Message as a dict. The dict will have a ``type`` key with the message type
|
||||
and a ``data`` key with the message data as a dict.
|
||||
Message as a dict. The dict will have a `type` key with the message type
|
||||
and a `data` key with the message data as a dict.
|
||||
|
||||
"""
|
||||
return {"type": message.type, "data": message.model_dump()}
|
||||
@@ -450,7 +450,7 @@ def messages_to_dict(messages: Sequence[BaseMessage]) -> list[dict]:
|
||||
"""Convert a sequence of Messages to a list of dictionaries.
|
||||
|
||||
Args:
|
||||
messages: Sequence of messages (as ``BaseMessage``s) to convert.
|
||||
messages: Sequence of messages (as `BaseMessage`s) to convert.
|
||||
|
||||
Returns:
|
||||
List of messages as dicts.
|
||||
@@ -464,7 +464,7 @@ def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
|
||||
|
||||
Args:
|
||||
title: The title.
|
||||
bold: Whether to bold the title. Default is False.
|
||||
bold: Whether to bold the title.
|
||||
|
||||
Returns:
|
||||
The title representation.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
"""Derivations of standard content blocks from provider content.
|
||||
|
||||
``AIMessage`` will first attempt to use a provider-specific translator if
|
||||
``model_provider`` is set in ``response_metadata`` on the message. Consequently, each
|
||||
`AIMessage` will first attempt to use a provider-specific translator if
|
||||
`model_provider` is set in `response_metadata` on the message. Consequently, each
|
||||
provider translator must handle all possible content response types from the provider,
|
||||
including text.
|
||||
|
||||
If no provider is set, or if the provider does not have a registered translator,
|
||||
``AIMessage`` will fall back to best-effort parsing of the content into blocks using
|
||||
the implementation in ``BaseMessage``.
|
||||
`AIMessage` will fall back to best-effort parsing of the content into blocks using
|
||||
the implementation in `BaseMessage`.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -23,15 +23,15 @@ if TYPE_CHECKING:
|
||||
PROVIDER_TRANSLATORS: dict[str, dict[str, Callable[..., list[types.ContentBlock]]]] = {}
|
||||
"""Map model provider names to translator functions.
|
||||
|
||||
The dictionary maps provider names (e.g. ``'openai'``, ``'anthropic'``) to another
|
||||
The dictionary maps provider names (e.g. `'openai'`, `'anthropic'`) to another
|
||||
dictionary with two keys:
|
||||
- ``'translate_content'``: Function to translate ``AIMessage`` content.
|
||||
- ``'translate_content_chunk'``: Function to translate ``AIMessageChunk`` content.
|
||||
- `'translate_content'`: Function to translate `AIMessage` content.
|
||||
- `'translate_content_chunk'`: Function to translate `AIMessageChunk` content.
|
||||
|
||||
When calling `.content_blocks` on an ``AIMessage`` or ``AIMessageChunk``, if
|
||||
``model_provider`` is set in ``response_metadata``, the corresponding translator
|
||||
When calling `content_blocks` on an `AIMessage` or `AIMessageChunk`, if
|
||||
`model_provider` is set in `response_metadata`, the corresponding translator
|
||||
functions will be used to parse the content into blocks. Otherwise, best-effort parsing
|
||||
in ``BaseMessage`` will be used.
|
||||
in `BaseMessage` will be used.
|
||||
"""
|
||||
|
||||
|
||||
@@ -43,9 +43,9 @@ def register_translator(
|
||||
"""Register content translators for a provider in `PROVIDER_TRANSLATORS`.
|
||||
|
||||
Args:
|
||||
provider: The model provider name (e.g. ``'openai'``, ``'anthropic'``).
|
||||
translate_content: Function to translate ``AIMessage`` content.
|
||||
translate_content_chunk: Function to translate ``AIMessageChunk`` content.
|
||||
provider: The model provider name (e.g. `'openai'`, `'anthropic'`).
|
||||
translate_content: Function to translate `AIMessage` content.
|
||||
translate_content_chunk: Function to translate `AIMessageChunk` content.
|
||||
"""
|
||||
PROVIDER_TRANSLATORS[provider] = {
|
||||
"translate_content": translate_content,
|
||||
@@ -62,9 +62,9 @@ def get_translator(
|
||||
provider: The model provider name.
|
||||
|
||||
Returns:
|
||||
Dictionary with ``'translate_content'`` and ``'translate_content_chunk'``
|
||||
Dictionary with `'translate_content'` and `'translate_content_chunk'`
|
||||
functions, or None if no translator is registered for the provider. In such
|
||||
case, best-effort parsing in ``BaseMessage`` will be used.
|
||||
case, best-effort parsing in `BaseMessage` will be used.
|
||||
"""
|
||||
return PROVIDER_TRANSLATORS.get(provider)
|
||||
|
||||
@@ -72,10 +72,10 @@ def get_translator(
|
||||
def _register_translators() -> None:
|
||||
"""Register all translators in langchain-core.
|
||||
|
||||
A unit test ensures all modules in ``block_translators`` are represented here.
|
||||
A unit test ensures all modules in `block_translators` are represented here.
|
||||
|
||||
For translators implemented outside langchain-core, they can be registered by
|
||||
calling ``register_translator`` from within the integration package.
|
||||
calling `register_translator` from within the integration package.
|
||||
"""
|
||||
from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415
|
||||
_register_anthropic_translator,
|
||||
|
||||
@@ -31,12 +31,12 @@ def _convert_to_v1_from_anthropic_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Anthropic format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Anthropic format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -35,12 +35,12 @@ def _convert_to_v1_from_converse_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Bedrock Converse format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Converse format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -105,12 +105,12 @@ def _convert_to_v1_from_genai_input(
|
||||
Called when message isn't an `AIMessage` or `model_provider` isn't set on
|
||||
`response_metadata`.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be GenAI format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
@@ -282,7 +282,7 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
|
||||
standard content blocks for returning.
|
||||
|
||||
Args:
|
||||
message: The AIMessage or AIMessageChunk to convert.
|
||||
message: The `AIMessage` or `AIMessageChunk` to convert.
|
||||
|
||||
Returns:
|
||||
List of standard content blocks derived from the message content.
|
||||
@@ -453,9 +453,10 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"status": status, # type: ignore[typeddict-item]
|
||||
"output": item.get("code_execution_result", ""),
|
||||
}
|
||||
server_tool_result_block["extras"] = {"block_type": item_type}
|
||||
# Preserve original outcome in extras
|
||||
if outcome is not None:
|
||||
server_tool_result_block["extras"] = {"outcome": outcome}
|
||||
server_tool_result_block["extras"]["outcome"] = outcome
|
||||
converted_blocks.append(server_tool_result_block)
|
||||
else:
|
||||
# Unknown type, preserve as non-standard
|
||||
|
||||
@@ -1,37 +1,9 @@
|
||||
"""Derivations of standard content blocks from Google (VertexAI) content."""
|
||||
|
||||
import warnings
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a chunk with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
from langchain_core.messages.block_translators.google_genai import (
|
||||
translate_content,
|
||||
translate_content_chunk,
|
||||
)
|
||||
|
||||
|
||||
def _register_google_vertexai_translator() -> None:
|
||||
|
||||
@@ -1,39 +1,135 @@
|
||||
"""Derivations of standard content blocks from Groq content."""
|
||||
|
||||
import warnings
|
||||
import json
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _populate_extras(
|
||||
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
|
||||
) -> types.ContentBlock:
|
||||
"""Mutate a block, populating extras."""
|
||||
if standard_block.get("type") == "non_standard":
|
||||
return standard_block
|
||||
|
||||
for key, value in block.items():
|
||||
if key not in known_fields:
|
||||
if "extras" not in standard_block:
|
||||
# Below type-ignores are because mypy thinks a non-standard block can
|
||||
# get here, although we exclude them above.
|
||||
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
|
||||
standard_block["extras"][key] = value # type: ignore[typeddict-item]
|
||||
|
||||
return standard_block
|
||||
|
||||
|
||||
def _parse_code_json(s: str) -> dict:
|
||||
"""Extract Python code from Groq built-in tool content.
|
||||
|
||||
Extracts the value of the 'code' field from a string of the form:
|
||||
{"code": some_arbitrary_text_with_unescaped_quotes}
|
||||
|
||||
As Groq may not escape quotes in the executed tools, e.g.:
|
||||
```
|
||||
'{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
|
||||
```
|
||||
""" # noqa: E501
|
||||
m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
|
||||
if not m:
|
||||
msg = (
|
||||
"Could not extract Python code from Groq tool arguments. "
|
||||
"Expected a JSON object with a 'code' field."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
raise ValueError(msg)
|
||||
return {"code": m.group(1)}
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message chunk with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Convert groq message content to v1 format."""
|
||||
content_blocks: list[types.ContentBlock] = []
|
||||
|
||||
if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
|
||||
content_blocks.append(reasoning_block)
|
||||
|
||||
if executed_tools := message.additional_kwargs.get("executed_tools"):
|
||||
for idx, executed_tool in enumerate(executed_tools):
|
||||
args: dict[str, Any] | None = None
|
||||
if arguments := executed_tool.get("arguments"):
|
||||
try:
|
||||
args = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
if executed_tool.get("type") == "python":
|
||||
try:
|
||||
args = _parse_code_json(arguments)
|
||||
except ValueError:
|
||||
continue
|
||||
elif (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
# GPT-OSS
|
||||
args = {"code": arguments}
|
||||
else:
|
||||
continue
|
||||
if isinstance(args, dict):
|
||||
name = ""
|
||||
if executed_tool.get("type") == "search":
|
||||
name = "web_search"
|
||||
elif executed_tool.get("type") == "python" or (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
name = "code_interpreter"
|
||||
server_tool_call: types.ServerToolCall = {
|
||||
"type": "server_tool_call",
|
||||
"name": name,
|
||||
"id": str(idx),
|
||||
"args": args,
|
||||
}
|
||||
content_blocks.append(server_tool_call)
|
||||
if tool_output := executed_tool.get("output"):
|
||||
tool_result: types.ServerToolResult = {
|
||||
"type": "server_tool_result",
|
||||
"tool_call_id": str(idx),
|
||||
"output": tool_output,
|
||||
"status": "success",
|
||||
}
|
||||
known_fields = {"type", "arguments", "index", "output"}
|
||||
_populate_extras(tool_result, executed_tool, known_fields)
|
||||
content_blocks.append(tool_result)
|
||||
|
||||
if isinstance(message.content, str) and message.content:
|
||||
content_blocks.append({"type": "text", "text": message.content})
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
content_blocks.append( # noqa: PERF401
|
||||
{
|
||||
"type": "tool_call",
|
||||
"name": tool_call["name"],
|
||||
"args": tool_call["args"],
|
||||
"id": tool_call.get("id"),
|
||||
}
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
return content_blocks
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message chunk with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def _register_groq_translator() -> None:
|
||||
"""Register the Groq translator with the central registry.
|
||||
"""Register the groq translator with the central registry.
|
||||
|
||||
Run automatically when the module is imported.
|
||||
"""
|
||||
|
||||
@@ -10,12 +10,12 @@ def _convert_v0_multimodal_input_to_v1(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert v0 multimodal blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any v0 format
|
||||
blocks to v1 format.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -18,7 +18,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
|
||||
"""Convert ``ImageContentBlock`` to format expected by OpenAI Chat Completions."""
|
||||
"""Convert `ImageContentBlock` to format expected by OpenAI Chat Completions."""
|
||||
if "url" in block:
|
||||
return {
|
||||
"type": "image_url",
|
||||
@@ -155,12 +155,12 @@ def _convert_to_v1_from_chat_completions_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert OpenAI Chat Completions format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be OpenAI format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
@@ -263,7 +263,7 @@ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
|
||||
|
||||
|
||||
def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
|
||||
"""Convert v0 AIMessage into ``output_version="responses/v1"`` format."""
|
||||
"""Convert v0 AIMessage into `output_version="responses/v1"` format."""
|
||||
from langchain_core.messages import AIMessageChunk # noqa: PLC0415
|
||||
|
||||
# Only update ChatOpenAI v0.3 AIMessages
|
||||
|
||||
@@ -19,7 +19,7 @@ class ChatMessage(BaseMessage):
|
||||
"""The speaker / role of the Message."""
|
||||
|
||||
type: Literal["chat"] = "chat"
|
||||
"""The type of the message (used during serialization). Defaults to "chat"."""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
|
||||
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
@@ -29,11 +29,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used during serialization).
|
||||
|
||||
Defaults to ``'ChatMessageChunk'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
change in future releases.
|
||||
|
||||
This module provides standardized data structures for representing inputs to and
|
||||
outputs from LLMs. The core abstraction is the **Content Block**, a ``TypedDict``.
|
||||
outputs from LLMs. The core abstraction is the **Content Block**, a `TypedDict`.
|
||||
|
||||
**Rationale**
|
||||
|
||||
@@ -20,59 +20,59 @@ blocks into the format required by its API.
|
||||
**Extensibility**
|
||||
|
||||
Data **not yet mapped** to a standard block may be represented using the
|
||||
``NonStandardContentBlock``, which allows for provider-specific data to be included
|
||||
`NonStandardContentBlock`, which allows for provider-specific data to be included
|
||||
without losing the benefits of type checking and validation.
|
||||
|
||||
Furthermore, provider-specific fields **within** a standard block are fully supported
|
||||
by default in the ``extras`` field of each block. This allows for additional metadata
|
||||
by default in the `extras` field of each block. This allows for additional metadata
|
||||
to be included without breaking the standard structure.
|
||||
|
||||
!!! warning
|
||||
Do not heavily rely on the ``extras`` field for provider-specific data! This field
|
||||
Do not heavily rely on the `extras` field for provider-specific data! This field
|
||||
is subject to deprecation in future releases as we move towards PEP 728.
|
||||
|
||||
!!! note
|
||||
Following widespread adoption of `PEP 728 <https://peps.python.org/pep-0728/>`__, we
|
||||
will add ``extra_items=Any`` as a param to Content Blocks. This will signify to type
|
||||
Following widespread adoption of [PEP 728](https://peps.python.org/pep-0728/), we
|
||||
will add `extra_items=Any` as a param to Content Blocks. This will signify to type
|
||||
checkers that additional provider-specific fields are allowed outside of the
|
||||
``extras`` field, and that will become the new standard approach to adding
|
||||
`extras` field, and that will become the new standard approach to adding
|
||||
provider-specific metadata.
|
||||
|
||||
??? note
|
||||
|
||||
**Example with PEP 728 provider-specific fields:**
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# Content block definition
|
||||
# NOTE: `extra_items=Any`
|
||||
class TextContentBlock(TypedDict, extra_items=Any):
|
||||
type: Literal["text"]
|
||||
id: NotRequired[str]
|
||||
text: str
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
index: NotRequired[int]
|
||||
```
|
||||
|
||||
# Content block definition
|
||||
# NOTE: `extra_items=Any`
|
||||
class TextContentBlock(TypedDict, extra_items=Any):
|
||||
type: Literal["text"]
|
||||
id: NotRequired[str]
|
||||
text: str
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
index: NotRequired[int]
|
||||
```python
|
||||
from langchain_core.messages.content import TextContentBlock
|
||||
|
||||
.. code-block:: python
|
||||
# Create a text content block with provider-specific fields
|
||||
my_block: TextContentBlock = {
|
||||
# Add required fields
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
# Additional fields not specified in the TypedDict
|
||||
# These are valid with PEP 728 and are typed as Any
|
||||
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
|
||||
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
|
||||
"custom_field": "any value",
|
||||
}
|
||||
|
||||
from langchain_core.messages.content import TextContentBlock
|
||||
# Mutating an existing block to add provider-specific fields
|
||||
openai_data = my_block["openai_metadata"] # Type: Any
|
||||
```
|
||||
|
||||
# Create a text content block with provider-specific fields
|
||||
my_block: TextContentBlock = {
|
||||
# Add required fields
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
# Additional fields not specified in the TypedDict
|
||||
# These are valid with PEP 728 and are typed as Any
|
||||
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
|
||||
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
|
||||
"custom_field": "any value",
|
||||
}
|
||||
|
||||
# Mutating an existing block to add provider-specific fields
|
||||
openai_data = my_block["openai_metadata"] # Type: Any
|
||||
|
||||
PEP 728 is enabled with ``# type: ignore[call-arg]`` comments to suppress
|
||||
PEP 728 is enabled with `# type: ignore[call-arg]` comments to suppress
|
||||
warnings from type checkers that don't yet support it. The functionality works
|
||||
correctly in Python 3.13+ and will be fully supported as the ecosystem catches
|
||||
up.
|
||||
@@ -81,52 +81,51 @@ to be included without breaking the standard structure.
|
||||
|
||||
The module defines several types of content blocks, including:
|
||||
|
||||
- ``TextContentBlock``: Standard text output.
|
||||
- ``Citation``: For annotations that link text output to a source document.
|
||||
- ``ToolCall``: For function calling.
|
||||
- ``ReasoningContentBlock``: To capture a model's thought process.
|
||||
- `TextContentBlock`: Standard text output.
|
||||
- `Citation`: For annotations that link text output to a source document.
|
||||
- `ToolCall`: For function calling.
|
||||
- `ReasoningContentBlock`: To capture a model's thought process.
|
||||
- Multimodal data:
|
||||
- ``ImageContentBlock``
|
||||
- ``AudioContentBlock``
|
||||
- ``VideoContentBlock``
|
||||
- ``PlainTextContentBlock`` (e.g. .txt or .md files)
|
||||
- ``FileContentBlock`` (e.g. PDFs, etc.)
|
||||
- `ImageContentBlock`
|
||||
- `AudioContentBlock`
|
||||
- `VideoContentBlock`
|
||||
- `PlainTextContentBlock` (e.g. .txt or .md files)
|
||||
- `FileContentBlock` (e.g. PDFs, etc.)
|
||||
|
||||
**Example Usage**
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# Direct construction:
|
||||
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
|
||||
|
||||
# Direct construction:
|
||||
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
|
||||
multimodal_message: AIMessage(
|
||||
content_blocks=[
|
||||
TextContentBlock(type="text", text="What is shown in this image?"),
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
multimodal_message: AIMessage(
|
||||
content_blocks=[
|
||||
TextContentBlock(type="text", text="What is shown in this image?"),
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
# Using factories:
|
||||
from langchain_core.messages.content import create_text_block, create_image_block
|
||||
|
||||
# Using factories:
|
||||
from langchain_core.messages.content import create_text_block, create_image_block
|
||||
|
||||
multimodal_message: AIMessage(
|
||||
content=[
|
||||
create_text_block("What is shown in this image?"),
|
||||
create_image_block(
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
multimodal_message: AIMessage(
|
||||
content=[
|
||||
create_text_block("What is shown in this image?"),
|
||||
create_image_block(
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
Factory functions offer benefits such as:
|
||||
- Automatic ID generation (when not provided)
|
||||
- No need to manually specify the ``type`` field
|
||||
|
||||
- No need to manually specify the `type` field
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args, get_type_hints
|
||||
@@ -140,12 +139,12 @@ class Citation(TypedDict):
|
||||
"""Annotation for citing data from a document.
|
||||
|
||||
!!! note
|
||||
``start``/``end`` indices refer to the **response text**,
|
||||
`start`/`end` indices refer to the **response text**,
|
||||
not the source text. This means that the indices are relative to the model's
|
||||
response, not the original document (as specified in the ``url``).
|
||||
response, not the original document (as specified in the `url`).
|
||||
|
||||
!!! note
|
||||
``create_citation`` may also be used as a factory to create a ``Citation``.
|
||||
!!! note "Factory function"
|
||||
`create_citation` may also be used as a factory to create a `Citation`.
|
||||
Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
@@ -157,10 +156,12 @@ class Citation(TypedDict):
|
||||
"""Type of the content block. Used for discrimination."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -174,10 +175,10 @@ class Citation(TypedDict):
|
||||
"""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the **response text** (``TextContentBlock.text``)."""
|
||||
"""Start index of the **response text** (`TextContentBlock.text`)."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the **response text** (``TextContentBlock.text``)"""
|
||||
"""End index of the **response text** (`TextContentBlock.text`)"""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Excerpt of source text being cited."""
|
||||
@@ -202,8 +203,9 @@ class NonStandardAnnotation(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -212,6 +214,7 @@ class NonStandardAnnotation(TypedDict):
|
||||
|
||||
|
||||
Annotation = Citation | NonStandardAnnotation
|
||||
"""A union of all defined `Annotation` types."""
|
||||
|
||||
|
||||
class TextContentBlock(TypedDict):
|
||||
@@ -220,9 +223,9 @@ class TextContentBlock(TypedDict):
|
||||
This typically represents the main text content of a message, such as the response
|
||||
from a language model or the text of a user message.
|
||||
|
||||
!!! note
|
||||
``create_text_block`` may also be used as a factory to create a
|
||||
``TextContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_text_block` may also be used as a factory to create a
|
||||
`TextContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -236,8 +239,9 @@ class TextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -245,7 +249,7 @@ class TextContentBlock(TypedDict):
|
||||
"""Block text."""
|
||||
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
"""``Citation``s and other annotations."""
|
||||
"""`Citation`s and other annotations."""
|
||||
|
||||
index: NotRequired[int | str]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
@@ -255,20 +259,19 @@ class TextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```python
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```
|
||||
|
||||
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
||||
and an identifier of "123".
|
||||
|
||||
!!! note
|
||||
``create_tool_call`` may also be used as a factory to create a
|
||||
``ToolCall``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_tool_call` may also be used as a factory to create a
|
||||
`ToolCall`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -301,24 +304,22 @@ class ToolCall(TypedDict):
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``),
|
||||
When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of ``index`` are equal and not ``None``.
|
||||
values of `index` are equal and not `None`.
|
||||
|
||||
Example:
|
||||
```python
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
```
|
||||
"""
|
||||
|
||||
# TODO: Consider making fields NotRequired[str] in the future.
|
||||
@@ -350,7 +351,7 @@ class ToolCallChunk(TypedDict):
|
||||
class InvalidToolCall(TypedDict):
|
||||
"""Allowance for errors made by LLM.
|
||||
|
||||
Here we add an ``error`` key to surface errors made during generation
|
||||
Here we add an `error` key to surface errors made during generation
|
||||
(e.g., invalid JSON arguments.)
|
||||
|
||||
"""
|
||||
@@ -385,7 +386,10 @@ class InvalidToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCall(TypedDict):
|
||||
"""Tool call that is executed server-side."""
|
||||
"""Tool call that is executed server-side.
|
||||
|
||||
For example: code execution, web search, etc.
|
||||
"""
|
||||
|
||||
type: Literal["server_tool_call"]
|
||||
"""Used for discrimination."""
|
||||
@@ -407,7 +411,7 @@ class ServerToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (as part of a stream)."""
|
||||
"""A chunk of a server-side tool call (yielded when streaming)."""
|
||||
|
||||
type: Literal["server_tool_call_chunk"]
|
||||
"""Used for discrimination."""
|
||||
@@ -456,9 +460,9 @@ class ServerToolResult(TypedDict):
|
||||
class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning output from a LLM.
|
||||
|
||||
!!! note
|
||||
``create_reasoning_block`` may also be used as a factory to create a
|
||||
``ReasoningContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_reasoning_block` may also be used as a factory to create a
|
||||
`ReasoningContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -472,8 +476,9 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -481,7 +486,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning text.
|
||||
|
||||
Either the thought summary or the raw reasoning text itself. This is often parsed
|
||||
from ``<think>`` tags in the model's response.
|
||||
from `<think>` tags in the model's response.
|
||||
|
||||
"""
|
||||
|
||||
@@ -498,9 +503,9 @@ class ReasoningContentBlock(TypedDict):
|
||||
class ImageContentBlock(TypedDict):
|
||||
"""Image data.
|
||||
|
||||
!!! note
|
||||
``create_image_block`` may also be used as a factory to create a
|
||||
``ImageContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_image_block` may also be used as a factory to create a
|
||||
`ImageContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -514,8 +519,9 @@ class ImageContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -525,7 +531,7 @@ class ImageContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the image. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#image>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#image)
|
||||
|
||||
"""
|
||||
|
||||
@@ -545,9 +551,9 @@ class ImageContentBlock(TypedDict):
|
||||
class VideoContentBlock(TypedDict):
|
||||
"""Video data.
|
||||
|
||||
!!! note
|
||||
``create_video_block`` may also be used as a factory to create a
|
||||
``VideoContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_video_block` may also be used as a factory to create a
|
||||
`VideoContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -561,8 +567,9 @@ class VideoContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -572,7 +579,7 @@ class VideoContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the video. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#video>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#video)
|
||||
|
||||
"""
|
||||
|
||||
@@ -592,9 +599,9 @@ class VideoContentBlock(TypedDict):
|
||||
class AudioContentBlock(TypedDict):
|
||||
"""Audio data.
|
||||
|
||||
!!! note
|
||||
``create_audio_block`` may also be used as a factory to create an
|
||||
``AudioContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_audio_block` may also be used as a factory to create an
|
||||
`AudioContentBlock`. Benefits include:
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
|
||||
@@ -607,8 +614,9 @@ class AudioContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -618,7 +626,7 @@ class AudioContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the audio. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#audio>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#audio)
|
||||
|
||||
"""
|
||||
|
||||
@@ -639,18 +647,18 @@ class PlainTextContentBlock(TypedDict):
|
||||
"""Plaintext data (e.g., from a document).
|
||||
|
||||
!!! note
|
||||
A ``PlainTextContentBlock`` existed in ``langchain-core<1.0.0``. Although the
|
||||
A `PlainTextContentBlock` existed in `langchain-core<1.0.0`. Although the
|
||||
name has carried over, the structure has changed significantly. The only shared
|
||||
keys between the old and new versions are ``type`` and ``text``, though the
|
||||
``type`` value has changed from ``'text'`` to ``'text-plain'``.
|
||||
keys between the old and new versions are `type` and `text`, though the
|
||||
`type` value has changed from `'text'` to `'text-plain'`.
|
||||
|
||||
!!! note
|
||||
Title and context are optional fields that may be passed to the model. See
|
||||
Anthropic `example <https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content>`__.
|
||||
Anthropic [example](https://docs.claude.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
|
||||
|
||||
!!! note
|
||||
``create_plaintext_block`` may also be used as a factory to create a
|
||||
``PlainTextContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_plaintext_block` may also be used as a factory to create a
|
||||
`PlainTextContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -664,8 +672,9 @@ class PlainTextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -698,18 +707,18 @@ class PlainTextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class FileContentBlock(TypedDict):
|
||||
"""File data that doesn't fit into other multimodal blocks.
|
||||
"""File data that doesn't fit into other multimodal block types.
|
||||
|
||||
This block is intended for files that are not images, audio, or plaintext. For
|
||||
example, it can be used for PDFs, Word documents, etc.
|
||||
|
||||
If the file is an image, audio, or plaintext, you should use the corresponding
|
||||
content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``,
|
||||
``PlainTextContentBlock``).
|
||||
content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
|
||||
`PlainTextContentBlock`).
|
||||
|
||||
!!! note
|
||||
``create_file_block`` may also be used as a factory to create a
|
||||
``FileContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_file_block` may also be used as a factory to create a
|
||||
`FileContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -723,8 +732,9 @@ class FileContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -734,7 +744,7 @@ class FileContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the file. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml)
|
||||
|
||||
"""
|
||||
|
||||
@@ -764,14 +774,14 @@ class NonStandardContentBlock(TypedDict):
|
||||
The purpose of this block should be to simply hold a provider-specific payload.
|
||||
If a provider's non-standard output includes reasoning and tool calls, it should be
|
||||
the adapter's job to parse that payload and emit the corresponding standard
|
||||
``ReasoningContentBlock`` and ``ToolCalls``.
|
||||
`ReasoningContentBlock` and `ToolCalls`.
|
||||
|
||||
Has no ``extras`` field, as provider-specific data should be included in the
|
||||
``value`` field.
|
||||
Has no `extras` field, as provider-specific data should be included in the
|
||||
`value` field.
|
||||
|
||||
!!! note
|
||||
``create_non_standard_block`` may also be used as a factory to create a
|
||||
``NonStandardContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_non_standard_block` may also be used as a factory to create a
|
||||
`NonStandardContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -785,8 +795,9 @@ class NonStandardContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -805,6 +816,7 @@ DataContentBlock = (
|
||||
| PlainTextContentBlock
|
||||
| FileContentBlock
|
||||
)
|
||||
"""A union of all defined multimodal data `ContentBlock` types."""
|
||||
|
||||
ToolContentBlock = (
|
||||
ToolCall | ToolCallChunk | ServerToolCall | ServerToolCallChunk | ServerToolResult
|
||||
@@ -818,6 +830,7 @@ ContentBlock = (
|
||||
| DataContentBlock
|
||||
| ToolContentBlock
|
||||
)
|
||||
"""A union of all defined `ContentBlock` types and aliases."""
|
||||
|
||||
|
||||
KNOWN_BLOCK_TYPES = {
|
||||
@@ -842,7 +855,7 @@ KNOWN_BLOCK_TYPES = {
|
||||
"non_standard",
|
||||
# citation and non_standard_annotation intentionally omitted
|
||||
}
|
||||
"""These are block types known to ``langchain-core>=1.0.0``.
|
||||
"""These are block types known to `langchain-core>=1.0.0`.
|
||||
|
||||
If a block has a type not in this set, it is considered to be provider-specific.
|
||||
"""
|
||||
@@ -881,7 +894,7 @@ def is_data_content_block(block: dict) -> bool:
|
||||
block: The content block to check.
|
||||
|
||||
Returns:
|
||||
True if the content block is a data content block, False otherwise.
|
||||
`True` if the content block is a data content block, `False` otherwise.
|
||||
|
||||
"""
|
||||
if block.get("type") not in _get_data_content_block_types():
|
||||
@@ -923,20 +936,20 @@ def create_text_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> TextContentBlock:
|
||||
"""Create a ``TextContentBlock``.
|
||||
"""Create a `TextContentBlock`.
|
||||
|
||||
Args:
|
||||
text: The text content of the block.
|
||||
id: Content block identifier. Generated automatically if not provided.
|
||||
annotations: ``Citation``s and other annotations for the text.
|
||||
annotations: `Citation`s and other annotations for the text.
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``TextContentBlock``.
|
||||
A properly formatted `TextContentBlock`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = TextContentBlock(
|
||||
@@ -966,7 +979,7 @@ def create_image_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ImageContentBlock:
|
||||
"""Create an ``ImageContentBlock``.
|
||||
"""Create an `ImageContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the image.
|
||||
@@ -977,15 +990,15 @@ def create_image_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ImageContentBlock``.
|
||||
A properly formatted `ImageContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no image source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no image source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1022,7 +1035,7 @@ def create_video_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> VideoContentBlock:
|
||||
"""Create a ``VideoContentBlock``.
|
||||
"""Create a `VideoContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the video.
|
||||
@@ -1033,15 +1046,15 @@ def create_video_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``VideoContentBlock``.
|
||||
A properly formatted `VideoContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no video source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no video source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1082,7 +1095,7 @@ def create_audio_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> AudioContentBlock:
|
||||
"""Create an ``AudioContentBlock``.
|
||||
"""Create an `AudioContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the audio.
|
||||
@@ -1093,15 +1106,15 @@ def create_audio_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``AudioContentBlock``.
|
||||
A properly formatted `AudioContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no audio source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no audio source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1142,7 +1155,7 @@ def create_file_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> FileContentBlock:
|
||||
"""Create a ``FileContentBlock``.
|
||||
"""Create a `FileContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the file.
|
||||
@@ -1153,15 +1166,15 @@ def create_file_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``FileContentBlock``.
|
||||
A properly formatted `FileContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no file source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no file source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1203,7 +1216,7 @@ def create_plaintext_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> PlainTextContentBlock:
|
||||
"""Create a ``PlainTextContentBlock``.
|
||||
"""Create a `PlainTextContentBlock`.
|
||||
|
||||
Args:
|
||||
text: The plaintext content.
|
||||
@@ -1216,11 +1229,11 @@ def create_plaintext_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``PlainTextContentBlock``.
|
||||
A properly formatted `PlainTextContentBlock`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = PlainTextContentBlock(
|
||||
@@ -1259,7 +1272,7 @@ def create_tool_call(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ToolCall:
|
||||
"""Create a ``ToolCall``.
|
||||
"""Create a `ToolCall`.
|
||||
|
||||
Args:
|
||||
name: The name of the tool to be called.
|
||||
@@ -1268,11 +1281,11 @@ def create_tool_call(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ToolCall``.
|
||||
A properly formatted `ToolCall`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = ToolCall(
|
||||
@@ -1298,7 +1311,7 @@ def create_reasoning_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ReasoningContentBlock:
|
||||
"""Create a ``ReasoningContentBlock``.
|
||||
"""Create a `ReasoningContentBlock`.
|
||||
|
||||
Args:
|
||||
reasoning: The reasoning text or thought summary.
|
||||
@@ -1306,11 +1319,11 @@ def create_reasoning_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ReasoningContentBlock``.
|
||||
A properly formatted `ReasoningContentBlock`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = ReasoningContentBlock(
|
||||
@@ -1339,7 +1352,7 @@ def create_citation(
|
||||
id: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Citation:
|
||||
"""Create a ``Citation``.
|
||||
"""Create a `Citation`.
|
||||
|
||||
Args:
|
||||
url: URL of the document source.
|
||||
@@ -1350,11 +1363,11 @@ def create_citation(
|
||||
id: Content block identifier. Generated automatically if not provided.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``Citation``.
|
||||
A properly formatted `Citation`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = Citation(type="citation", id=ensure_id(id))
|
||||
@@ -1383,7 +1396,7 @@ def create_non_standard_block(
|
||||
id: str | None = None,
|
||||
index: int | str | None = None,
|
||||
) -> NonStandardContentBlock:
|
||||
"""Create a ``NonStandardContentBlock``.
|
||||
"""Create a `NonStandardContentBlock`.
|
||||
|
||||
Args:
|
||||
value: Provider-specific data.
|
||||
@@ -1391,11 +1404,11 @@ def create_non_standard_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``NonStandardContentBlock``.
|
||||
A properly formatted `NonStandardContentBlock`.
|
||||
|
||||
!!! note
|
||||
The ``id`` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = NonStandardContentBlock(
|
||||
|
||||
@@ -15,11 +15,11 @@ from langchain_core.utils._merge import merge_dicts
|
||||
class FunctionMessage(BaseMessage):
|
||||
"""Message for passing the result of executing a tool back to a model.
|
||||
|
||||
``FunctionMessage`` are an older version of the ``ToolMessage`` schema, and
|
||||
do not contain the ``tool_call_id`` field.
|
||||
`FunctionMessage` are an older version of the `ToolMessage` schema, and
|
||||
do not contain the `tool_call_id` field.
|
||||
|
||||
The ``tool_call_id`` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
|
||||
"""The name of the function that was executed."""
|
||||
|
||||
type: Literal["function"] = "function"
|
||||
"""The type of the message (used for serialization). Defaults to ``'function'``."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
|
||||
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
@@ -38,11 +38,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'FunctionMessageChunk'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -7,33 +7,27 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
|
||||
|
||||
class HumanMessage(BaseMessage):
|
||||
"""Message from a human.
|
||||
"""Message from the user.
|
||||
|
||||
``HumanMessage``s are messages that are passed in from a human to the model.
|
||||
A `HumanMessage` is a message that is passed in from a user to the model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
```
|
||||
"""
|
||||
|
||||
type: Literal["human"] = "human"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'human'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -56,7 +50,7 @@ class HumanMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
||||
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
||||
if content_blocks is not None:
|
||||
super().__init__(
|
||||
content=cast("str | list[str | dict]", content_blocks),
|
||||
@@ -73,5 +67,4 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
Defaults to "HumanMessageChunk"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -9,7 +9,7 @@ class RemoveMessage(BaseMessage):
|
||||
"""Message responsible for deleting other messages."""
|
||||
|
||||
type: Literal["remove"] = "remove"
|
||||
"""The type of the message (used for serialization). Defaults to "remove"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -20,7 +20,7 @@ class RemoveMessage(BaseMessage):
|
||||
|
||||
Args:
|
||||
id: The ID of the message to remove.
|
||||
kwargs: Additional fields to pass to the message.
|
||||
**kwargs: Additional fields to pass to the message.
|
||||
|
||||
Raises:
|
||||
ValueError: If the 'content' field is passed in kwargs.
|
||||
|
||||
@@ -13,27 +13,21 @@ class SystemMessage(BaseMessage):
|
||||
of input messages.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Define a chat model and invoke it with the messages
|
||||
print(model.invoke(messages))
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Define a chat model and invoke it with the messages
|
||||
print(model.invoke(messages))
|
||||
```
|
||||
"""
|
||||
|
||||
type: Literal["system"] = "system"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'system'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -56,7 +50,7 @@ class SystemMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
||||
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
||||
if content_blocks is not None:
|
||||
super().__init__(
|
||||
content=cast("str | list[str | dict]", content_blocks),
|
||||
@@ -73,8 +67,4 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'SystemMessageChunk'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -16,9 +16,9 @@ from langchain_core.utils._merge import merge_dicts, merge_obj
|
||||
class ToolOutputMixin:
|
||||
"""Mixin for objects that tools can return directly.
|
||||
|
||||
If a custom BaseTool is invoked with a ``ToolCall`` and the output of custom code is
|
||||
not an instance of ``ToolOutputMixin``, the output will automatically be coerced to
|
||||
a string and wrapped in a ``ToolMessage``.
|
||||
If a custom BaseTool is invoked with a `ToolCall` and the output of custom code is
|
||||
not an instance of `ToolOutputMixin`, the output will automatically be coerced to
|
||||
a string and wrapped in a `ToolMessage`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -26,42 +26,39 @@ class ToolOutputMixin:
|
||||
class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
"""Message for passing the result of executing a tool back to a model.
|
||||
|
||||
``ToolMessage``s contain the result of a tool invocation. Typically, the result
|
||||
is encoded inside the ``content`` field.
|
||||
`ToolMessage` objects contain the result of a tool invocation. Typically, the result
|
||||
is encoded inside the `content` field.
|
||||
|
||||
Example: A ``ToolMessage`` representing a result of ``42`` from a tool call with id
|
||||
Example: A `ToolMessage` representing a result of `42` from a tool call with id
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```
|
||||
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
Example: A `ToolMessage` where only part of the tool output is sent to the model
|
||||
and the full output is passed in to artifact.
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
Example: A ``ToolMessage`` where only part of the tool output is sent to the model
|
||||
and the full output is passed in to artifact.
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
|
||||
The ``tool_call_id`` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -70,11 +67,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
"""Tool call that this message is responding to."""
|
||||
|
||||
type: Literal["tool"] = "tool"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'tool'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
artifact: Any = None
|
||||
"""Artifact of the Tool execution which is not meant to be sent to the model.
|
||||
@@ -83,21 +76,15 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
a subset of the full tool output is being passed as message content but the full
|
||||
output is needed in other parts of the code.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
|
||||
"""
|
||||
|
||||
status: Literal["success", "error"] = "success"
|
||||
"""Status of the tool invocation.
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
|
||||
"""
|
||||
"""Status of the tool invocation."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
response_metadata: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -165,12 +152,12 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize ``ToolMessage``.
|
||||
"""Initialize a `ToolMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
**kwargs: Additional fields.
|
||||
"""
|
||||
@@ -216,16 +203,15 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
```python
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
|
||||
This represents a request to call the tool named ``'foo'`` with arguments
|
||||
``{"a": 1}`` and an identifier of ``'123'``.
|
||||
This represents a request to call the tool named `'foo'` with arguments
|
||||
`{"a": 1}` and an identifier of `'123'`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -263,24 +249,22 @@ def tool_call(
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``),
|
||||
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of ``index`` are equal and not None.
|
||||
values of `index` are equal and not None.
|
||||
|
||||
Example:
|
||||
```python
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
```
|
||||
"""
|
||||
|
||||
name: str | None
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +1,4 @@
|
||||
"""**OutputParser** classes parse the output of an LLM call.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Serializable, Generation, PromptValue
|
||||
""" # noqa: E501
|
||||
"""**OutputParser** classes parse the output of an LLM call."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -31,13 +31,13 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
|
||||
@abstractmethod
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -46,17 +46,17 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
result: A list of `Generation` to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
|
||||
@@ -134,29 +134,28 @@ class BaseOutputParser(
|
||||
Output parsers help structure language model responses.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
class BooleanOutputParser(BaseOutputParser[bool]):
|
||||
true_val: str = "YES"
|
||||
false_val: str = "NO"
|
||||
|
||||
class BooleanOutputParser(BaseOutputParser[bool]):
|
||||
true_val: str = "YES"
|
||||
false_val: str = "NO"
|
||||
|
||||
def parse(self, text: str) -> bool:
|
||||
cleaned_text = text.strip().upper()
|
||||
if cleaned_text not in (
|
||||
self.true_val.upper(),
|
||||
self.false_val.upper(),
|
||||
):
|
||||
raise OutputParserException(
|
||||
f"BooleanOutputParser expected output value to either be "
|
||||
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
||||
f"Received {cleaned_text}."
|
||||
)
|
||||
return cleaned_text == self.true_val.upper()
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "boolean_output_parser"
|
||||
def parse(self, text: str) -> bool:
|
||||
cleaned_text = text.strip().upper()
|
||||
if cleaned_text not in (
|
||||
self.true_val.upper(),
|
||||
self.false_val.upper(),
|
||||
):
|
||||
raise OutputParserException(
|
||||
f"BooleanOutputParser expected output value to either be "
|
||||
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
||||
f"Received {cleaned_text}."
|
||||
)
|
||||
return cleaned_text == self.true_val.upper()
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "boolean_output_parser"
|
||||
```
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -173,7 +172,7 @@ class BaseOutputParser(
|
||||
This property is inferred from the first type argument of the class.
|
||||
|
||||
Raises:
|
||||
TypeError: If the class doesn't have an inferable OutputType.
|
||||
TypeError: If the class doesn't have an inferable `OutputType`.
|
||||
"""
|
||||
for base in self.__class__.mro():
|
||||
if hasattr(base, "__pydantic_generic_metadata__"):
|
||||
@@ -235,16 +234,16 @@ class BaseOutputParser(
|
||||
|
||||
@override
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -265,20 +264,20 @@ class BaseOutputParser(
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
async def aparse(self, text: str) -> T:
|
||||
@@ -300,13 +299,13 @@ class BaseOutputParser(
|
||||
) -> Any:
|
||||
"""Parse the output of an LLM call with the input prompt for context.
|
||||
|
||||
The prompt is largely provided in the event the OutputParser wants
|
||||
The prompt is largely provided in the event the `OutputParser` wants
|
||||
to retry or fix the output in some way, and needs information from
|
||||
the prompt to do so.
|
||||
|
||||
Args:
|
||||
completion: String output of a language model.
|
||||
prompt: Input PromptValue.
|
||||
prompt: Input `PromptValue`.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
|
||||
@@ -40,7 +40,7 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
pydantic_object: Annotated[type[TBaseModel] | None, SkipValidation()] = None # type: ignore[valid-type]
|
||||
"""The Pydantic object to use for validation.
|
||||
If None, no validation is performed."""
|
||||
If `None`, no validation is performed."""
|
||||
|
||||
@override
|
||||
def _diff(self, prev: Any | None, next: Any) -> Any:
|
||||
@@ -59,10 +59,9 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
If True, the output will be a JSON object containing
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If False, the output will be the full JSON object.
|
||||
Default is False.
|
||||
If `False`, the output will be the full JSON object.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -146,10 +146,10 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "output_parsers", "list"]``
|
||||
`["langchain", "output_parsers", "list"]`
|
||||
"""
|
||||
return ["langchain", "output_parsers", "list"]
|
||||
|
||||
|
||||
@@ -31,13 +31,13 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
`OutputParserException`: If the output is not valid JSON.
|
||||
"""
|
||||
generation = result[0]
|
||||
if not isinstance(generation, ChatGeneration):
|
||||
@@ -56,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
"""Parse an output as the Json object."""
|
||||
"""Parse an output as the JSON object."""
|
||||
|
||||
strict: bool = False
|
||||
"""Whether to allow non-JSON-compliant strings.
|
||||
@@ -82,13 +82,13 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
OutputParserExcept`ion: If the output is not valid JSON.
|
||||
"""
|
||||
if len(result) != 1:
|
||||
msg = f"Expected exactly one result, but got {len(result)}"
|
||||
@@ -155,7 +155,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
"""Parse an output as the element of the Json object."""
|
||||
"""Parse an output as the element of the JSON object."""
|
||||
|
||||
key_name: str
|
||||
"""The name of the key to return."""
|
||||
@@ -165,7 +165,7 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -177,48 +177,50 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
|
||||
class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
"""Parse an output as a pydantic object.
|
||||
"""Parse an output as a Pydantic object.
|
||||
|
||||
This parser is used to parse the output of a ChatModel that uses
|
||||
OpenAI function format to invoke functions.
|
||||
This parser is used to parse the output of a chat model that uses OpenAI function
|
||||
format to invoke functions.
|
||||
|
||||
The parser extracts the function call invocation and matches
|
||||
them to the pydantic schema provided.
|
||||
The parser extracts the function call invocation and matches them to the Pydantic
|
||||
schema provided.
|
||||
|
||||
An exception will be raised if the function call does not match
|
||||
the provided schema.
|
||||
An exception will be raised if the function call does not match the provided schema.
|
||||
|
||||
Example:
|
||||
... code-block:: python
|
||||
```python
|
||||
message = AIMessage(
|
||||
content="This is a test message",
|
||||
additional_kwargs={
|
||||
"function_call": {
|
||||
"name": "cookie",
|
||||
"arguments": json.dumps({"name": "value", "age": 10}),
|
||||
}
|
||||
},
|
||||
)
|
||||
chat_generation = ChatGeneration(message=message)
|
||||
|
||||
message = AIMessage(
|
||||
content="This is a test message",
|
||||
additional_kwargs={
|
||||
"function_call": {
|
||||
"name": "cookie",
|
||||
"arguments": json.dumps({"name": "value", "age": 10}),
|
||||
}
|
||||
},
|
||||
)
|
||||
chat_generation = ChatGeneration(message=message)
|
||||
|
||||
class Cookie(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
class Cookie(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
|
||||
class Dog(BaseModel):
|
||||
species: str
|
||||
|
||||
# Full output
|
||||
parser = PydanticOutputFunctionsParser(
|
||||
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
||||
)
|
||||
result = parser.parse_result([chat_generation])
|
||||
class Dog(BaseModel):
|
||||
species: str
|
||||
|
||||
|
||||
# Full output
|
||||
parser = PydanticOutputFunctionsParser(
|
||||
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
||||
)
|
||||
result = parser.parse_result([chat_generation])
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
||||
"""The pydantic schema to parse the output with.
|
||||
"""The Pydantic schema to parse the output with.
|
||||
|
||||
If multiple schemas are provided, then the function name will be used to
|
||||
determine which schema to use.
|
||||
@@ -227,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_schema(cls, values: dict) -> Any:
|
||||
"""Validate the pydantic schema.
|
||||
"""Validate the Pydantic schema.
|
||||
|
||||
Args:
|
||||
values: The values to validate.
|
||||
@@ -236,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
The validated values.
|
||||
|
||||
Raises:
|
||||
ValueError: If the schema is not a pydantic schema.
|
||||
ValueError: If the schema is not a Pydantic schema.
|
||||
"""
|
||||
schema = values["pydantic_schema"]
|
||||
if "args_only" not in values:
|
||||
@@ -259,10 +261,10 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Raises:
|
||||
ValueError: If the pydantic schema is not valid.
|
||||
ValueError: If the Pydantic schema is not valid.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -285,13 +287,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
elif issubclass(pydantic_schema, BaseModelV1):
|
||||
pydantic_args = pydantic_schema.parse_raw(args)
|
||||
else:
|
||||
msg = f"Unsupported pydantic schema: {pydantic_schema}"
|
||||
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
|
||||
raise ValueError(msg)
|
||||
return pydantic_args
|
||||
|
||||
|
||||
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
"""Parse an output as an attribute of a pydantic object."""
|
||||
"""Parse an output as an attribute of a Pydantic object."""
|
||||
|
||||
attr_name: str
|
||||
"""The name of the attribute to return."""
|
||||
@@ -302,7 +304,7 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -31,10 +31,9 @@ def parse_tool_call(
|
||||
|
||||
Args:
|
||||
raw_tool_call: The raw tool call to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool call.
|
||||
@@ -105,10 +104,9 @@ def parse_tool_calls(
|
||||
|
||||
Args:
|
||||
raw_tool_calls: The raw tool calls to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -148,7 +146,7 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
first_tool_only: bool = False
|
||||
"""Whether to return only the first tool call.
|
||||
|
||||
If False, the result will be a list of tool calls, or an empty list
|
||||
If `False`, the result will be a list of tool calls, or an empty list
|
||||
if no tool calls are found.
|
||||
|
||||
If true, and multiple tool calls are found, only the first one will be returned,
|
||||
@@ -162,10 +160,9 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON.
|
||||
If True, the output will be a JSON object containing
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If False, the output will be the full JSON object.
|
||||
Default is False.
|
||||
If `False`, the output will be the full JSON object.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -226,10 +223,9 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON.
|
||||
If True, the output will be a JSON object containing
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If False, the output will be the full JSON object.
|
||||
Default is False.
|
||||
If `False`, the output will be the full JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the generation is not a chat generation.
|
||||
@@ -310,10 +306,9 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON.
|
||||
If True, the output will be a JSON object containing
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If False, the output will be the full JSON object.
|
||||
Default is False.
|
||||
If `False`, the output will be the full JSON object.
|
||||
|
||||
Returns:
|
||||
The parsed Pydantic objects.
|
||||
|
||||
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
|
||||
|
||||
|
||||
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
"""Parse an output using a pydantic model."""
|
||||
"""Parse an output using a Pydantic model."""
|
||||
|
||||
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
|
||||
"""The pydantic model to parse."""
|
||||
"""The Pydantic model to parse."""
|
||||
|
||||
def _parse_obj(self, obj: dict) -> TBaseModel:
|
||||
try:
|
||||
@@ -45,21 +45,20 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> TBaseModel | None:
|
||||
"""Parse the result of an LLM call to a pydantic object.
|
||||
"""Parse the result of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
If True, the output will be a JSON object containing
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
Defaults to False.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the result is not valid JSON
|
||||
or does not conform to the pydantic model.
|
||||
`OutputParserException`: If the result is not valid JSON
|
||||
or does not conform to the Pydantic model.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
try:
|
||||
json_object = super().parse_result(result)
|
||||
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
raise
|
||||
|
||||
def parse(self, text: str) -> TBaseModel:
|
||||
"""Parse the output of an LLM call to a pydantic object.
|
||||
"""Parse the output of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
text: The output of the LLM call.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
return super().parse(text)
|
||||
|
||||
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> type[TBaseModel]:
|
||||
"""Return the pydantic model."""
|
||||
"""Return the Pydantic model."""
|
||||
return self.pydantic_object
|
||||
|
||||
|
||||
|
||||
@@ -19,10 +19,10 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "output_parser"]``
|
||||
`["langchain", "schema", "output_parser"]`
|
||||
"""
|
||||
return ["langchain", "schema", "output_parser"]
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
Args:
|
||||
input: The input to transform.
|
||||
config: The configuration to use for the transformation.
|
||||
kwargs: Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Yields:
|
||||
The transformed output.
|
||||
@@ -85,7 +85,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
Args:
|
||||
input: The input to transform.
|
||||
config: The configuration to use for the transformation.
|
||||
kwargs: Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Yields:
|
||||
The transformed output.
|
||||
|
||||
@@ -82,7 +82,7 @@ class _StreamingParser:
|
||||
chunk: A chunk of text to parse. This can be a string or a BaseMessage.
|
||||
|
||||
Yields:
|
||||
AddableDict: A dictionary representing the parsed XML element.
|
||||
A dictionary representing the parsed XML element.
|
||||
|
||||
Raises:
|
||||
xml.etree.ElementTree.ParseError: If the XML is not well-formed.
|
||||
|
||||
@@ -12,7 +12,7 @@ When invoking models via the standard runnable methods (e.g. invoke, batch, etc.
|
||||
- LLMs will return regular text strings.
|
||||
|
||||
In addition, users can access the raw output of either LLMs or chat models via
|
||||
callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
|
||||
callbacks. The `on_chat_model_end` and `on_llm_end` callbacks will return an
|
||||
LLMResult object containing the generated outputs and any additional information
|
||||
returned by the model provider.
|
||||
|
||||
|
||||
@@ -15,14 +15,14 @@ from langchain_core.utils._merge import merge_dicts
|
||||
class ChatGeneration(Generation):
|
||||
"""A single chat generation output.
|
||||
|
||||
A subclass of ``Generation`` that represents the response from a chat model
|
||||
A subclass of `Generation` that represents the response from a chat model
|
||||
that generates chat messages.
|
||||
|
||||
The ``message`` attribute is a structured representation of the chat message.
|
||||
Most of the time, the message will be of type ``AIMessage``.
|
||||
The `message` attribute is a structured representation of the chat message.
|
||||
Most of the time, the message will be of type `AIMessage`.
|
||||
|
||||
Users working with chat models will usually access information via either
|
||||
``AIMessage`` (returned from runnable interfaces) or ``LLMResult`` (available
|
||||
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
|
||||
via callbacks).
|
||||
"""
|
||||
|
||||
@@ -70,9 +70,9 @@ class ChatGeneration(Generation):
|
||||
|
||||
|
||||
class ChatGenerationChunk(ChatGeneration):
|
||||
"""``ChatGeneration`` chunk.
|
||||
"""`ChatGeneration` chunk.
|
||||
|
||||
``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks.
|
||||
`ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks.
|
||||
"""
|
||||
|
||||
message: BaseMessageChunk
|
||||
@@ -84,18 +84,18 @@ class ChatGenerationChunk(ChatGeneration):
|
||||
def __add__(
|
||||
self, other: ChatGenerationChunk | list[ChatGenerationChunk]
|
||||
) -> ChatGenerationChunk:
|
||||
"""Concatenate two ``ChatGenerationChunk``s.
|
||||
"""Concatenate two `ChatGenerationChunk`s.
|
||||
|
||||
Args:
|
||||
other: The other ``ChatGenerationChunk`` or list of ``ChatGenerationChunk``
|
||||
other: The other `ChatGenerationChunk` or list of `ChatGenerationChunk`
|
||||
to concatenate.
|
||||
|
||||
Raises:
|
||||
TypeError: If other is not a ``ChatGenerationChunk`` or list of
|
||||
``ChatGenerationChunk``.
|
||||
TypeError: If other is not a `ChatGenerationChunk` or list of
|
||||
`ChatGenerationChunk`.
|
||||
|
||||
Returns:
|
||||
A new ``ChatGenerationChunk`` concatenated from self and other.
|
||||
A new `ChatGenerationChunk` concatenated from self and other.
|
||||
"""
|
||||
if isinstance(other, ChatGenerationChunk):
|
||||
generation_info = merge_dicts(
|
||||
@@ -124,13 +124,13 @@ class ChatGenerationChunk(ChatGeneration):
|
||||
def merge_chat_generation_chunks(
|
||||
chunks: list[ChatGenerationChunk],
|
||||
) -> ChatGenerationChunk | None:
|
||||
"""Merge a list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.
|
||||
"""Merge a list of `ChatGenerationChunk`s into a single `ChatGenerationChunk`.
|
||||
|
||||
Args:
|
||||
chunks: A list of ``ChatGenerationChunk`` to merge.
|
||||
chunks: A list of `ChatGenerationChunk` to merge.
|
||||
|
||||
Returns:
|
||||
A merged ``ChatGenerationChunk``, or None if the input list is empty.
|
||||
A merged `ChatGenerationChunk`, or None if the input list is empty.
|
||||
"""
|
||||
if not chunks:
|
||||
return None
|
||||
|
||||
@@ -44,10 +44,10 @@ class Generation(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "output"]``
|
||||
`["langchain", "schema", "output"]`
|
||||
"""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
@@ -56,16 +56,16 @@ class GenerationChunk(Generation):
|
||||
"""Generation chunk, which can be concatenated with other Generation chunks."""
|
||||
|
||||
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
||||
"""Concatenate two ``GenerationChunk``s.
|
||||
"""Concatenate two `GenerationChunk`s.
|
||||
|
||||
Args:
|
||||
other: Another ``GenerationChunk`` to concatenate with.
|
||||
other: Another `GenerationChunk` to concatenate with.
|
||||
|
||||
Raises:
|
||||
TypeError: If other is not a ``GenerationChunk``.
|
||||
TypeError: If other is not a `GenerationChunk`.
|
||||
|
||||
Returns:
|
||||
A new ``GenerationChunk`` concatenated from self and other.
|
||||
A new `GenerationChunk` concatenated from self and other.
|
||||
"""
|
||||
if isinstance(other, GenerationChunk):
|
||||
generation_info = merge_dicts(
|
||||
|
||||
@@ -30,8 +30,8 @@ class LLMResult(BaseModel):
|
||||
The second dimension of the list represents different candidate generations for a
|
||||
given prompt.
|
||||
|
||||
- When returned from **an LLM**, the type is ``list[list[Generation]]``.
|
||||
- When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
|
||||
- When returned from **an LLM**, the type is `list[list[Generation]]`.
|
||||
- When returned from a **chat model**, the type is `list[list[ChatGeneration]]`.
|
||||
|
||||
ChatGeneration is a subclass of Generation that has a field for a structured chat
|
||||
message.
|
||||
@@ -91,13 +91,13 @@ class LLMResult(BaseModel):
|
||||
return llm_results
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Check for ``LLMResult`` equality by ignoring any metadata related to runs.
|
||||
"""Check for `LLMResult` equality by ignoring any metadata related to runs.
|
||||
|
||||
Args:
|
||||
other: Another ``LLMResult`` object to compare against.
|
||||
other: Another `LLMResult` object to compare against.
|
||||
|
||||
Returns:
|
||||
True if the generations and ``llm_output`` are equal, False otherwise.
|
||||
`True` if the generations and `llm_output` are equal, `False` otherwise.
|
||||
"""
|
||||
if not isinstance(other, LLMResult):
|
||||
return NotImplemented
|
||||
|
||||
@@ -24,8 +24,8 @@ from langchain_core.messages import (
|
||||
class PromptValue(Serializable, ABC):
|
||||
"""Base abstract class for inputs to any language model.
|
||||
|
||||
PromptValues can be converted to both LLM (pure text-generation) inputs and
|
||||
ChatModel inputs.
|
||||
`PromptValues` can be converted to both LLM (pure text-generation) inputs and
|
||||
chat model inputs.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@@ -35,12 +35,12 @@ class PromptValue(Serializable, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "prompt"]``
|
||||
`["langchain", "schema", "prompt"]`
|
||||
"""
|
||||
return ["langchain", "schema", "prompt"]
|
||||
|
||||
@@ -62,12 +62,12 @@ class StringPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "base"]``
|
||||
`["langchain", "prompts", "base"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "base"]
|
||||
|
||||
@@ -99,12 +99,12 @@ class ChatPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "chat"]``
|
||||
`["langchain", "prompts", "chat"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@@ -113,11 +113,11 @@ class ImageURL(TypedDict, total=False):
|
||||
"""Image URL."""
|
||||
|
||||
detail: Literal["auto", "low", "high"]
|
||||
"""Specifies the detail level of the image. Defaults to ``'auto'``.
|
||||
Can be ``'auto'``, ``'low'``, or ``'high'``.
|
||||
"""Specifies the detail level of the image.
|
||||
|
||||
Can be `'auto'`, `'low'`, or `'high'`.
|
||||
|
||||
This follows OpenAI's Chat Completion API's image URL format.
|
||||
|
||||
"""
|
||||
|
||||
url: str
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user