mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-05 16:50:03 +00:00
Compare commits
90 Commits
wrap_tool_
...
redirect-n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2200524dc6 | ||
|
|
a743f14fe2 | ||
|
|
c6b3f5b888 | ||
|
|
15db024811 | ||
|
|
b4b48e8ab4 | ||
|
|
6d73003b17 | ||
|
|
13259a109a | ||
|
|
aa78be574a | ||
|
|
d0dd1b30d1 | ||
|
|
0338a15192 | ||
|
|
e10d99b728 | ||
|
|
c9018f81ec | ||
|
|
31718492c7 | ||
|
|
2209878f48 | ||
|
|
dd77dbe3ab | ||
|
|
eb19e12527 | ||
|
|
551e86a517 | ||
|
|
8734c05f64 | ||
|
|
0c8cbfb7de | ||
|
|
89c3428d85 | ||
|
|
707e96c541 | ||
|
|
26e0a00c4c | ||
|
|
d0f8f00e7e | ||
|
|
a39132787c | ||
|
|
296994ebf0 | ||
|
|
b5b31eec88 | ||
|
|
8f6851c349 | ||
|
|
0788461abd | ||
|
|
3bfd1f6d8a | ||
|
|
d83c3a12bf | ||
|
|
79200cf3c2 | ||
|
|
bcb6789888 | ||
|
|
89b7933ef1 | ||
|
|
4da5a8081f | ||
|
|
53e9f00804 | ||
|
|
6e25e185f6 | ||
|
|
68ceeb64f6 | ||
|
|
edae976b81 | ||
|
|
9f4366bc9d | ||
|
|
99e0a60aab | ||
|
|
d38729fbac | ||
|
|
ff0d21cfd5 | ||
|
|
9140a7cb86 | ||
|
|
41fe18bc80 | ||
|
|
9105573cb3 | ||
|
|
fff87e95d1 | ||
|
|
9beb29a34c | ||
|
|
ca00f5aed9 | ||
|
|
637777b8e7 | ||
|
|
1cf851e054 | ||
|
|
961f965f0c | ||
|
|
760fc3bc12 | ||
|
|
e3fc7d8aa6 | ||
|
|
2b3b209e40 | ||
|
|
78903ac285 | ||
|
|
f361acc11c | ||
|
|
ed185c0026 | ||
|
|
6dc34beb71 | ||
|
|
c2205f88e6 | ||
|
|
abdbe185c5 | ||
|
|
c1b816cb7e | ||
|
|
0559558715 | ||
|
|
75965474fc | ||
|
|
5dc014fdf4 | ||
|
|
291a9fcea1 | ||
|
|
dd994b9d7f | ||
|
|
83901b30e3 | ||
|
|
bcfa21a6e7 | ||
|
|
af1da28459 | ||
|
|
ed2ee4e8cc | ||
|
|
f293c8ffd6 | ||
|
|
714c370191 | ||
|
|
a29d4e9c3a | ||
|
|
74983f8a96 | ||
|
|
11c5b86981 | ||
|
|
383f4c0ee9 | ||
|
|
045e7ad4a1 | ||
|
|
0e80291804 | ||
|
|
c99773b652 | ||
|
|
5f9e3e33cd | ||
|
|
6fc21afbc9 | ||
|
|
50445d4a27 | ||
|
|
11a2efe49b | ||
|
|
d8a680ee57 | ||
|
|
f405a2c57d | ||
|
|
3576e690fa | ||
|
|
057ac361ef | ||
|
|
d9675a4a20 | ||
|
|
a7ae0e627f | ||
|
|
f75dd1c17e |
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(uv run:*)",
|
||||
"Bash(make:*)",
|
||||
"WebSearch",
|
||||
"WebFetch(domain:ai.pydantic.dev)",
|
||||
"WebFetch(domain:openai.github.io)",
|
||||
"Bash(uv run:*)",
|
||||
"Bash(python3:*)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"Bash(gh pr view:*)",
|
||||
"Bash(gh pr diff:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}
|
||||
2
.github/workflows/check_diffs.yml
vendored
2
.github/workflows/check_diffs.yml
vendored
@@ -186,7 +186,7 @@ jobs:
|
||||
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: "📦 Install UV Package Manager"
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
|
||||
23
.github/workflows/integration_tests.yml
vendored
23
.github/workflows/integration_tests.yml
vendored
@@ -23,10 +23,8 @@ permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
|
||||
POETRY_LIBS: ("libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
# Generate dynamic test matrix based on input parameters or defaults
|
||||
@@ -60,7 +58,6 @@ jobs:
|
||||
echo $matrix
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
# Run integration tests against partner libraries with live API credentials
|
||||
# Tests are run with Poetry or UV depending on the library's setup
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
name: "🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}"
|
||||
@@ -95,17 +92,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + Poetry"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + UV"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -123,15 +110,7 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: "📦 Install Dependencies (Poetry)"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: "📦 Install Dependencies (UV)"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
- name: "📦 Install Dependencies"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
|
||||
10
AGENTS.md
10
AGENTS.md
@@ -149,23 +149,25 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (`'low'`, ``'normal'``, `'high'`).
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
10
CLAUDE.md
10
CLAUDE.md
@@ -149,23 +149,25 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (`'low'`, ``'normal'``, `'high'`).
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
30
README.md
30
README.md
@@ -12,13 +12,16 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://opensource.org/licenses/MIT" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/l/langchain-core?style=flat-square" alt="PyPI - License">
|
||||
<img src="https://img.shields.io/pypi/l/langchain" alt="PyPI - License">
|
||||
</a>
|
||||
<a href="https://pypistats.org/packages/langchain-core" target="_blank">
|
||||
<a href="https://pypistats.org/packages/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads">
|
||||
</a>
|
||||
<a href="https://pypi.org/project/langchain/#history" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/v/langchain?label=%20" alt="Version">
|
||||
</a>
|
||||
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square" alt="Open in Dev Containers">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode" alt="Open in Dev Containers">
|
||||
</a>
|
||||
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20">
|
||||
@@ -34,14 +37,14 @@
|
||||
LangChain is a framework for building LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves.
|
||||
|
||||
```bash
|
||||
pip install -U langchain
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/).
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/oss/python/langchain/overview).
|
||||
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building controllable agent workflows.
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our framework for building controllable agent workflows.
|
||||
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
@@ -62,16 +65,13 @@ While the LangChain framework can be used standalone, it also integrates seamles
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Conceptual Guides](https://docs.langchain.com/oss/python/langchain/overview): Explanations of key
|
||||
concepts behind the LangChain framework.
|
||||
- [Tutorials](https://docs.langchain.com/oss/python/learn): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [API Reference](https://reference.langchain.com/python/): Detailed reference on
|
||||
- [Learn](https://docs.langchain.com/oss/python/learn): Use cases, conceptual overviews, and more.
|
||||
- [API Reference](https://reference.langchain.com/python): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.
|
||||
- [LangChain Forum](https://forum.langchain.com): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com): Ask questions & chat with our documentation.
|
||||
|
||||
@@ -1,6 +1,30 @@
|
||||
# langchain-cli
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful
|
||||
for getting started with LangChain Templates!
|
||||
[](https://pypi.org/project/langchain-cli/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-cli)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
## Quick Install
|
||||
|
||||
```bash
|
||||
pip install langchain-cli
|
||||
```
|
||||
|
||||
## 🤔 What is this?
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful for getting started with LangChain Templates!
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
[CLI Docs](https://github.com/langchain-ai/langchain/blob/master/libs/cli/DOCS.md)
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -19,8 +19,8 @@ And you should configure credentials by setting the following environment variab
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__()
|
||||
llm.invoke("Sing a ballad of LangChain.")
|
||||
model = Chat__ModuleName__()
|
||||
model.invoke("Sing a ballad of LangChain.")
|
||||
```
|
||||
|
||||
## Embeddings
|
||||
@@ -41,6 +41,6 @@ embeddings.embed_query("What is the meaning of life?")
|
||||
```python
|
||||
from __module_name__ import __ModuleName__LLM
|
||||
|
||||
llm = __ModuleName__LLM()
|
||||
llm.invoke("The meaning of life is")
|
||||
model = __ModuleName__LLM()
|
||||
model.invoke("The meaning of life is")
|
||||
```
|
||||
|
||||
@@ -1,262 +1,264 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"model = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = model.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"llm = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,236 +1,238 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"model = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = model.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"llm = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = llm.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -185,7 +185,7 @@
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -1,204 +1,204 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -1,271 +1,271 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -26,13 +26,13 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
@@ -57,216 +57,214 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
timeout=None,
|
||||
max_retries=2,
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
model = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
timeout=None,
|
||||
max_retries=2,
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
Invoke:
|
||||
.. code-block:: python
|
||||
```python
|
||||
messages = [
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
model.invoke(messages)
|
||||
```
|
||||
|
||||
messages = [
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
llm.invoke(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Stream:
|
||||
.. code-block:: python
|
||||
```python
|
||||
for chunk in model.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```
|
||||
|
||||
for chunk in llm.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
stream = model.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
```
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
stream = llm.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
await model.ainvoke(messages)
|
||||
|
||||
await llm.ainvoke(messages)
|
||||
# stream:
|
||||
# async for chunk in (await model.astream(messages))
|
||||
|
||||
# stream:
|
||||
# async for chunk in (await llm.astream(messages))
|
||||
|
||||
# batch:
|
||||
# await llm.abatch([messages])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
# batch:
|
||||
# await model.abatch([messages])
|
||||
```
|
||||
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
# TODO: Delete if .bind_tools() isn't supported.
|
||||
Tool calling:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
class GetWeather(BaseModel):
|
||||
'''Get the current weather in a given location'''
|
||||
|
||||
class GetWeather(BaseModel):
|
||||
'''Get the current weather in a given location'''
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
class GetPopulation(BaseModel):
|
||||
'''Get the current population in a given location'''
|
||||
|
||||
class GetPopulation(BaseModel):
|
||||
'''Get the current population in a given location'''
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
|
||||
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
See ``Chat__ModuleName__.bind_tools()`` method for more.
|
||||
See `Chat__ModuleName__.bind_tools()` method for more.
|
||||
|
||||
# TODO: Delete if .with_structured_output() isn't supported.
|
||||
Structured output:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
class Joke(BaseModel):
|
||||
'''Joke to tell user.'''
|
||||
|
||||
class Joke(BaseModel):
|
||||
'''Joke to tell user.'''
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
|
||||
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
|
||||
structured_model = model.with_structured_output(Joke)
|
||||
structured_model.invoke("Tell me a joke about cats")
|
||||
```
|
||||
|
||||
structured_llm = llm.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats")
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
See ``Chat__ModuleName__.with_structured_output()`` for more.
|
||||
See `Chat__ModuleName__.with_structured_output()` for more.
|
||||
|
||||
# TODO: Delete if JSON mode response format isn't supported.
|
||||
JSON mode:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_model = model.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_llm = llm.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_llm.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if image inputs aren't supported.
|
||||
Image input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
import base64
|
||||
import httpx
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
import base64
|
||||
import httpx
|
||||
from langchain_core.messages import HumanMessage
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
# TODO: Replace with appropriate message content format.
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = model.invoke([message])
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
# TODO: Replace with appropriate message content format.
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = llm.invoke([message])
|
||||
ai_msg.content
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if audio inputs aren't supported.
|
||||
Audio input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Example input
|
||||
```
|
||||
|
||||
# TODO: Example input
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if video inputs aren't supported.
|
||||
Video input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Example input
|
||||
```
|
||||
|
||||
# TODO: Example input
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if token usage metadata isn't supported.
|
||||
Token usage:
|
||||
.. code-block:: python
|
||||
```python
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
```
|
||||
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
|
||||
```python
|
||||
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
|
||||
```
|
||||
|
||||
# TODO: Delete if logprobs aren't supported.
|
||||
Logprobs:
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_llm = llm.bind(logprobs=True)
|
||||
ai_msg = logprobs_llm.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_model = model.bind(logprobs=True)
|
||||
ai_msg = logprobs_model.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
```
|
||||
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
Response metadata
|
||||
.. code-block:: python
|
||||
```python
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
```
|
||||
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
model_name: str = Field(alias="model")
|
||||
@@ -314,11 +312,11 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Args:
|
||||
messages: the prompt composed of a list of messages.
|
||||
stop: a list of strings on which the model should stop generating.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
run_manager: A run manager with callbacks for the LLM.
|
||||
"""
|
||||
# Replace this with actual logic to generate a response from a list
|
||||
@@ -362,11 +360,11 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Args:
|
||||
messages: the prompt composed of a list of messages.
|
||||
stop: a list of strings on which the model should stop generating.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
run_manager: A run manager with callbacks for the LLM.
|
||||
"""
|
||||
last_message = messages[-1]
|
||||
|
||||
@@ -14,55 +14,55 @@ class __ModuleName__Loader(BaseLoader):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_community.document_loaders import __ModuleName__Loader
|
||||
|
||||
from langchain_community.document_loaders import __ModuleName__Loader
|
||||
|
||||
loader = __ModuleName__Loader(
|
||||
# required params = ...
|
||||
# other params = ...
|
||||
)
|
||||
loader = __ModuleName__Loader(
|
||||
# required params = ...
|
||||
# other params = ...
|
||||
)
|
||||
```
|
||||
|
||||
Lazy load:
|
||||
.. code-block:: python
|
||||
```python
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
# async variant:
|
||||
# docs_lazy = await loader.alazy_load()
|
||||
|
||||
# async variant:
|
||||
# docs_lazy = await loader.alazy_load()
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
```
|
||||
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
```python
|
||||
TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if async load is not implemented
|
||||
Async load:
|
||||
.. code-block:: python
|
||||
```python
|
||||
docs = await loader.aload()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
```
|
||||
|
||||
docs = await loader.aload()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
```python
|
||||
TODO: Example output
|
||||
|
||||
```
|
||||
"""
|
||||
|
||||
# TODO: This method must be implemented to load documents.
|
||||
|
||||
@@ -8,13 +8,13 @@ class __ModuleName__Embeddings(Embeddings):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
@@ -25,50 +25,50 @@ class __ModuleName__Embeddings(Embeddings):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__ import __ModuleName__Embeddings
|
||||
|
||||
from __module_name__ import __ModuleName__Embeddings
|
||||
|
||||
embed = __ModuleName__Embeddings(
|
||||
model="...",
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
embed = __ModuleName__Embeddings(
|
||||
model="...",
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
embed.embed_query(input_text)
|
||||
```
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
embed.embed_query(input_text)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Embed multiple text:
|
||||
.. code-block:: python
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
embed.embed_documents(input_texts)
|
||||
```
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
embed.embed_documents(input_texts)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
await embed.aembed_query(input_text)
|
||||
|
||||
await embed.aembed_query(input_text)
|
||||
# multiple:
|
||||
# await embed.aembed_documents(input_texts)
|
||||
```
|
||||
|
||||
# multiple:
|
||||
# await embed.aembed_documents(input_texts)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, model: str):
|
||||
|
||||
@@ -14,13 +14,13 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars, etc.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args:
|
||||
@@ -31,58 +31,58 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __package_name__ import __ModuleName__Retriever
|
||||
|
||||
from __package_name__ import __ModuleName__Retriever
|
||||
|
||||
retriever = __ModuleName__Retriever(
|
||||
# ...
|
||||
)
|
||||
retriever = __ModuleName__Retriever(
|
||||
# ...
|
||||
)
|
||||
```
|
||||
|
||||
Usage:
|
||||
.. code-block:: python
|
||||
```python
|
||||
query = "..."
|
||||
|
||||
query = "..."
|
||||
retriever.invoke(query)
|
||||
```
|
||||
|
||||
retriever.invoke(query)
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
Use within a chain:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from langchain_openai import ChatOpenAI
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
\"\"\"Answer the question based only on the context provided.
|
||||
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
\"\"\"Answer the question based only on the context provided.
|
||||
Context: {context}
|
||||
|
||||
Context: {context}
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| model
|
||||
| StrOutputParser()
|
||||
)
|
||||
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| llm
|
||||
| StrOutputParser()
|
||||
)
|
||||
chain.invoke("...")
|
||||
```
|
||||
|
||||
chain.invoke("...")
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -12,13 +12,13 @@ class __ModuleName__Toolkit(BaseToolkit):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars, etc.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args:
|
||||
@@ -29,42 +29,42 @@ class __ModuleName__Toolkit(BaseToolkit):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __package_name__ import __ModuleName__Toolkit
|
||||
|
||||
from __package_name__ import __ModuleName__Toolkit
|
||||
|
||||
toolkit = __ModuleName__Toolkit(
|
||||
# ...
|
||||
)
|
||||
toolkit = __ModuleName__Toolkit(
|
||||
# ...
|
||||
)
|
||||
```
|
||||
|
||||
Tools:
|
||||
.. code-block:: python
|
||||
```python
|
||||
toolkit.get_tools()
|
||||
```
|
||||
|
||||
toolkit.get_tools()
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
Use within an agent:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
agent_executor = create_react_agent(llm, tools)
|
||||
|
||||
agent_executor = create_react_agent(llm, tools)
|
||||
example_query = "..."
|
||||
|
||||
example_query = "..."
|
||||
events = agent_executor.stream(
|
||||
{"messages": [("user", example_query)]},
|
||||
stream_mode="values",
|
||||
)
|
||||
for event in events:
|
||||
event["messages"][-1].pretty_print()
|
||||
```
|
||||
|
||||
events = agent_executor.stream(
|
||||
{"messages": [("user", example_query)]},
|
||||
stream_mode="values",
|
||||
)
|
||||
for event in events:
|
||||
event["messages"][-1].pretty_print()
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -27,42 +27,42 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override]
|
||||
|
||||
Setup:
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
Instantiation:
|
||||
.. code-block:: python
|
||||
|
||||
tool = __ModuleName__Tool(
|
||||
# TODO: init params
|
||||
)
|
||||
```python
|
||||
tool = __ModuleName__Tool(
|
||||
# TODO: init params
|
||||
)
|
||||
```
|
||||
|
||||
Invocation with args:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: invoke args
|
||||
tool.invoke({...})
|
||||
```
|
||||
|
||||
# TODO: invoke args
|
||||
tool.invoke({...})
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: output of invocation
|
||||
```python
|
||||
# TODO: output of invocation
|
||||
```
|
||||
|
||||
Invocation with ToolCall:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: invoke args
|
||||
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
|
||||
```
|
||||
|
||||
# TODO: invoke args
|
||||
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: output of invocation
|
||||
```python
|
||||
# TODO: output of invocation
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
# TODO: Set tool name and description
|
||||
|
||||
@@ -28,12 +28,12 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable `__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — indexing params:
|
||||
@@ -51,110 +51,110 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__.vectorstores import __ModuleName__VectorStore
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
from __module_name__.vectorstores import __ModuleName__VectorStore
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
vector_store = __ModuleName__VectorStore(
|
||||
collection_name="foo",
|
||||
embedding_function=OpenAIEmbeddings(),
|
||||
connection_args={"uri": "./foo.db"},
|
||||
# other params...
|
||||
)
|
||||
vector_store = __ModuleName__VectorStore(
|
||||
collection_name="foo",
|
||||
embedding_function=OpenAIEmbeddings(),
|
||||
connection_args={"uri": "./foo.db"},
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant variables.
|
||||
Add Documents:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_core.documents import Document
|
||||
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
|
||||
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
|
||||
document_3 = Document(page_content="i will be deleted :(")
|
||||
|
||||
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
|
||||
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
|
||||
document_3 = Document(page_content="i will be deleted :(")
|
||||
|
||||
documents = [document_1, document_2, document_3]
|
||||
ids = ["1", "2", "3"]
|
||||
vector_store.add_documents(documents=documents, ids=ids)
|
||||
documents = [document_1, document_2, document_3]
|
||||
ids = ["1", "2", "3"]
|
||||
vector_store.add_documents(documents=documents, ids=ids)
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant variables.
|
||||
Delete Documents:
|
||||
.. code-block:: python
|
||||
|
||||
vector_store.delete(ids=["3"])
|
||||
```python
|
||||
vector_store.delete(ids=["3"])
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search(query="thud",k=1)
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search(query="thud",k=1)
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search with filter:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search with score:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search_with_score(query="qux",k=1)
|
||||
for doc, score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search_with_score(query="qux",k=1)
|
||||
for doc, score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# add documents
|
||||
# await vector_store.aadd_documents(documents=documents, ids=ids)
|
||||
|
||||
# add documents
|
||||
# await vector_store.aadd_documents(documents=documents, ids=ids)
|
||||
# delete documents
|
||||
# await vector_store.adelete(ids=["3"])
|
||||
|
||||
# delete documents
|
||||
# await vector_store.adelete(ids=["3"])
|
||||
# search
|
||||
# results = vector_store.asimilarity_search(query="thud",k=1)
|
||||
|
||||
# search
|
||||
# results = vector_store.asimilarity_search(query="thud",k=1)
|
||||
# search with score
|
||||
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
|
||||
for doc,score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
# search with score
|
||||
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
|
||||
for doc,score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Use as Retriever:
|
||||
.. code-block:: python
|
||||
```python
|
||||
retriever = vector_store.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
|
||||
)
|
||||
retriever.invoke("thud")
|
||||
```
|
||||
|
||||
retriever = vector_store.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
|
||||
)
|
||||
retriever.invoke("thud")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(self, embedding: Embeddings) -> None:
|
||||
|
||||
@@ -24,7 +24,7 @@ def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
|
||||
This code works
|
||||
|
||||
Args:
|
||||
pkg_name (str): The name of the partner package.
|
||||
pkg_name: The name of the partner package.
|
||||
|
||||
Returns:
|
||||
List of 2-tuples containing old and new import paths.
|
||||
|
||||
@@ -65,7 +65,7 @@ def is_subclass(class_obj: type, classes_: list[type]) -> bool:
|
||||
classes_: A list of classes to check against.
|
||||
|
||||
Returns:
|
||||
True if `class_obj` is a subclass of any class in `classes_`, False otherwise.
|
||||
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
|
||||
"""
|
||||
return any(
|
||||
issubclass(class_obj, kls)
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
# 🦜🍎️ LangChain Core
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypi.org/project/langchain-core/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-core)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
|
||||
## Quick Install
|
||||
|
||||
@@ -9,16 +16,14 @@
|
||||
pip install langchain-core
|
||||
```
|
||||
|
||||
## What is it?
|
||||
## 🤔 What is this?
|
||||
|
||||
LangChain Core contains the base abstractions that power the the LangChain ecosystem.
|
||||
LangChain Core contains the base abstractions that power the LangChain ecosystem.
|
||||
|
||||
These abstractions are designed to be as modular and simple as possible.
|
||||
|
||||
The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
|
||||
|
||||
For full documentation see the [API reference](https://reference.langchain.com/python/).
|
||||
|
||||
## ⛰️ Why build on top of LangChain Core?
|
||||
|
||||
The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
|
||||
@@ -27,12 +32,16 @@ The LangChain ecosystem is built on top of `langchain-core`. Some of the benefit
|
||||
- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
|
||||
- **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_core/).
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning Policy](https://docs.langchain.com/oss/python/versioning).
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing).
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -6,7 +6,6 @@ This module is only relevant for LangChain developers, not for users.
|
||||
|
||||
This module and its submodules are for internal use only. Do not use them in your
|
||||
own code. We may change the API at any time with no warning.
|
||||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -51,29 +51,26 @@ def beta(
|
||||
own (annotation-emitting) `C.__init__`).
|
||||
|
||||
Args:
|
||||
message : str, optional
|
||||
message:
|
||||
Override the default beta message. The %(since)s,
|
||||
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
|
||||
and %(removal)s format specifiers will be replaced by the
|
||||
values of the respective arguments passed to this function.
|
||||
name : str, optional
|
||||
name:
|
||||
The name of the beta object.
|
||||
obj_type : str, optional
|
||||
obj_type:
|
||||
The object type being beta.
|
||||
addendum : str, optional
|
||||
addendum:
|
||||
Additional text appended directly to the final message.
|
||||
|
||||
Returns:
|
||||
A decorator which can be used to mark functions or classes as beta.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@beta
|
||||
def the_function_to_annotate():
|
||||
pass
|
||||
|
||||
```python
|
||||
@beta
|
||||
def the_function_to_annotate():
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
def beta(
|
||||
|
||||
@@ -97,47 +97,44 @@ def deprecated(
|
||||
property, and 'function' otherwise.
|
||||
|
||||
Args:
|
||||
since : str
|
||||
since:
|
||||
The release at which this API became deprecated.
|
||||
message : str, optional
|
||||
message:
|
||||
Override the default deprecation message. The %(since)s,
|
||||
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
|
||||
and %(removal)s format specifiers will be replaced by the
|
||||
values of the respective arguments passed to this function.
|
||||
name : str, optional
|
||||
name:
|
||||
The name of the deprecated object.
|
||||
alternative : str, optional
|
||||
alternative:
|
||||
An alternative API that the user may use in place of the
|
||||
deprecated API. The deprecation warning will tell the user
|
||||
about this alternative if provided.
|
||||
alternative_import: str, optional
|
||||
alternative_import:
|
||||
An alternative import that the user may use instead.
|
||||
pending : bool, optional
|
||||
pending:
|
||||
If `True`, uses a `PendingDeprecationWarning` instead of a
|
||||
DeprecationWarning. Cannot be used together with removal.
|
||||
obj_type : str, optional
|
||||
obj_type:
|
||||
The object type being deprecated.
|
||||
addendum : str, optional
|
||||
addendum:
|
||||
Additional text appended directly to the final message.
|
||||
removal : str, optional
|
||||
removal:
|
||||
The expected removal version. With the default (an empty
|
||||
string), a removal version is automatically computed from
|
||||
since. Set to other Falsy values to not schedule a removal
|
||||
date. Cannot be used together with pending.
|
||||
package: str, optional
|
||||
package:
|
||||
The package of the deprecated object.
|
||||
|
||||
Returns:
|
||||
A decorator to mark a function or class as deprecated.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@deprecated("1.4.0")
|
||||
def the_function_to_deprecate():
|
||||
pass
|
||||
|
||||
```python
|
||||
@deprecated("1.4.0")
|
||||
def the_function_to_deprecate():
|
||||
pass
|
||||
```
|
||||
"""
|
||||
_validate_deprecation_params(
|
||||
removal, alternative, alternative_import, pending=pending
|
||||
@@ -550,12 +547,10 @@ def rename_parameter(
|
||||
A decorator indicating that a parameter was renamed.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@_api.rename_parameter("3.1", "bad_name", "good_name")
|
||||
def func(good_name): ...
|
||||
|
||||
```python
|
||||
@_api.rename_parameter("3.1", "bad_name", "good_name")
|
||||
def func(good_name): ...
|
||||
```
|
||||
"""
|
||||
|
||||
def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Schema definitions for representing agent actions, observations, and return values.
|
||||
|
||||
**ATTENTION** The schema definitions are provided for backwards compatibility.
|
||||
!!! warning
|
||||
The schema definitions are provided for backwards compatibility.
|
||||
|
||||
!!! warning
|
||||
New agents should be built using the
|
||||
@@ -16,10 +17,10 @@ Agents use language models to choose a sequence of actions to take.
|
||||
A basic agent works in the following manner:
|
||||
|
||||
1. Given a prompt an agent uses an LLM to request an action to take
|
||||
(e.g., a tool to run).
|
||||
(e.g., a tool to run).
|
||||
2. The agent executes the action (e.g., runs the tool), and receives an observation.
|
||||
3. The agent returns the observation to the LLM, which can then be used to generate
|
||||
the next action.
|
||||
the next action.
|
||||
4. When the agent reaches a stopping condition, it returns a final return value.
|
||||
|
||||
The schemas for the agents themselves are defined in langchain.agents.agent.
|
||||
@@ -83,10 +84,10 @@ class AgentAction(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "agent"]``
|
||||
`["langchain", "schema", "agent"]`
|
||||
"""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
@@ -111,7 +112,7 @@ class AgentActionMessageLog(AgentAction):
|
||||
if (tool, tool_input) cannot be used to fully recreate the LLM
|
||||
prediction, and you need that LLM prediction (for future agent iteration).
|
||||
Compared to `log`, this is useful when the underlying LLM is a
|
||||
ChatModel (and therefore returns messages rather than a string)."""
|
||||
chat model (and therefore returns messages rather than a string)."""
|
||||
# Ignoring type because we're overriding the type from AgentAction.
|
||||
# And this is the correct thing to do in this case.
|
||||
# The type literal is used for serialization purposes.
|
||||
@@ -160,10 +161,10 @@ class AgentFinish(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "agent"]``
|
||||
`["langchain", "schema", "agent"]`
|
||||
"""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
|
||||
@@ -1,24 +1,15 @@
|
||||
"""Cache classes.
|
||||
"""`caches` provides an optional caching layer for language models.
|
||||
|
||||
!!! warning
|
||||
Beta Feature!
|
||||
This is a beta feature! Please be wary of deploying experimental code to production
|
||||
unless you've taken appropriate precautions.
|
||||
|
||||
**Cache** provides an optional caching layer for LLMs.
|
||||
A cache is useful for two reasons:
|
||||
|
||||
Cache is useful for two reasons:
|
||||
|
||||
- It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
- It can speed up your application by reducing the number of API calls you make
|
||||
to the LLM provider.
|
||||
|
||||
Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
|
||||
1. It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
2. It can speed up your application by reducing the number of API calls you make to the
|
||||
LLM provider.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -40,8 +31,8 @@ class BaseCache(ABC):
|
||||
|
||||
The cache interface consists of the following methods:
|
||||
|
||||
- lookup: Look up a value based on a prompt and llm_string.
|
||||
- update: Update the cache based on a prompt and llm_string.
|
||||
- lookup: Look up a value based on a prompt and `llm_string`.
|
||||
- update: Update the cache based on a prompt and `llm_string`.
|
||||
- clear: Clear the cache.
|
||||
|
||||
In addition, the cache interface provides an async version of each method.
|
||||
@@ -53,14 +44,14 @@ class BaseCache(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -69,27 +60,27 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the lookup method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
|
||||
@@ -98,14 +89,14 @@ class BaseCache(ABC):
|
||||
"""Clear cache that can take additional keyword arguments."""
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -114,29 +105,29 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.lookup, prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the look up method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.update, prompt, llm_string, return_val)
|
||||
@@ -156,10 +147,9 @@ class InMemoryCache(BaseCache):
|
||||
maxsize: The maximum number of items to store in the cache.
|
||||
If `None`, the cache has no maximum size.
|
||||
If the cache exceeds the maximum size, the oldest items are removed.
|
||||
Default is None.
|
||||
|
||||
Raises:
|
||||
ValueError: If maxsize is less than or equal to 0.
|
||||
ValueError: If `maxsize` is less than or equal to `0`.
|
||||
"""
|
||||
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
if maxsize is not None and maxsize <= 0:
|
||||
@@ -168,28 +158,28 @@ class InMemoryCache(BaseCache):
|
||||
self._maxsize = maxsize
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self._cache.get((prompt, llm_string), None)
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
if self._maxsize is not None and len(self._cache) == self._maxsize:
|
||||
@@ -202,30 +192,30 @@ class InMemoryCache(BaseCache):
|
||||
self._cache = {}
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self.lookup(prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
self.update(prompt, llm_string, return_val)
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
"""**Callback handlers** allow listening to events in LangChain."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -35,10 +35,10 @@ class RetrieverManagerMixin:
|
||||
"""Run when Retriever errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_retriever_end(
|
||||
@@ -52,10 +52,10 @@ class RetrieverManagerMixin:
|
||||
"""Run when Retriever ends running.
|
||||
|
||||
Args:
|
||||
documents (Sequence[Document]): The documents retrieved.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
documents: The documents retrieved.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -76,12 +76,11 @@ class LLMManagerMixin:
|
||||
For both chat models and non-chat models (legacy LLMs).
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
chunk: The new generated chunk, containing content and other information.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_end(
|
||||
@@ -95,10 +94,10 @@ class LLMManagerMixin:
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
response: The response which was generated.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_error(
|
||||
@@ -112,10 +111,10 @@ class LLMManagerMixin:
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -133,10 +132,10 @@ class ChainManagerMixin:
|
||||
"""Run when chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_error(
|
||||
@@ -150,10 +149,10 @@ class ChainManagerMixin:
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_action(
|
||||
@@ -167,10 +166,10 @@ class ChainManagerMixin:
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_finish(
|
||||
@@ -184,10 +183,10 @@ class ChainManagerMixin:
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -205,10 +204,10 @@ class ToolManagerMixin:
|
||||
"""Run when the tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_error(
|
||||
@@ -222,10 +221,10 @@ class ToolManagerMixin:
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -248,16 +247,16 @@ class CallbackManagerMixin:
|
||||
!!! warning
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
`on_chat_model_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chat_model_start(
|
||||
@@ -275,16 +274,16 @@ class CallbackManagerMixin:
|
||||
|
||||
!!! warning
|
||||
This method is called for chat models. If you're implementing a handler for
|
||||
a non-chat model, you should use ``on_llm_start`` instead.
|
||||
a non-chat model, you should use `on_llm_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
messages (list[list[BaseMessage]]): The messages.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chat model.
|
||||
messages: The messages.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
# NotImplementedError is thrown intentionally
|
||||
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||
@@ -305,13 +304,13 @@ class CallbackManagerMixin:
|
||||
"""Run when the Retriever starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized Retriever.
|
||||
query (str): The query.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized Retriever.
|
||||
query: The query.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_start(
|
||||
@@ -328,13 +327,13 @@ class CallbackManagerMixin:
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_start(
|
||||
@@ -352,14 +351,14 @@ class CallbackManagerMixin:
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
inputs (dict[str, Any] | None): The inputs.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
input_str: The input string.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
inputs: The inputs.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -377,10 +376,10 @@ class RunManagerMixin:
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
text: The text.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_retry(
|
||||
@@ -394,10 +393,10 @@ class RunManagerMixin:
|
||||
"""Run on a retry event.
|
||||
|
||||
Args:
|
||||
retry_state (RetryCallState): The retry state.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
retry_state: The retry state.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_custom_event(
|
||||
@@ -415,7 +414,7 @@ class RunManagerMixin:
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
@@ -497,16 +496,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
!!! warning
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
`on_chat_model_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chat_model_start(
|
||||
@@ -524,16 +523,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
!!! warning
|
||||
This method is called for chat models. If you're implementing a handler for
|
||||
a non-chat model, you should use ``on_llm_start`` instead.
|
||||
a non-chat model, you should use `on_llm_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
messages (list[list[BaseMessage]]): The messages.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chat model.
|
||||
messages: The messages.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
# NotImplementedError is thrown intentionally
|
||||
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||
@@ -555,13 +554,12 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
For both chat models and non-chat models (legacy LLMs).
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
chunk: The new generated chunk, containing content and other information.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_llm_end(
|
||||
@@ -576,11 +574,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the model ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
response: The response which was generated.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_llm_error(
|
||||
@@ -599,7 +597,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
- response (LLMResult): The response which was generated before
|
||||
the error occurred.
|
||||
"""
|
||||
@@ -618,13 +616,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chain_end(
|
||||
@@ -639,11 +637,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chain_error(
|
||||
@@ -658,11 +656,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_start(
|
||||
@@ -680,14 +678,14 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
inputs (dict[str, Any] | None): The inputs.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized tool.
|
||||
input_str: The input string.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
inputs: The inputs.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_end(
|
||||
@@ -702,11 +700,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_error(
|
||||
@@ -721,11 +719,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_text(
|
||||
@@ -740,11 +738,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
text: The text.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retry(
|
||||
@@ -758,10 +756,10 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on a retry event.
|
||||
|
||||
Args:
|
||||
retry_state (RetryCallState): The retry state.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
retry_state: The retry state.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_agent_action(
|
||||
@@ -776,11 +774,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_agent_finish(
|
||||
@@ -795,11 +793,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_start(
|
||||
@@ -816,13 +814,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the retriever start.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized retriever.
|
||||
query (str): The query.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_end(
|
||||
@@ -837,11 +835,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the retriever end.
|
||||
|
||||
Args:
|
||||
documents (Sequence[Document]): The documents retrieved.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
documents: The documents retrieved.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_error(
|
||||
@@ -856,11 +854,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on retriever error.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_custom_event(
|
||||
@@ -878,7 +876,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
@@ -943,35 +941,29 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
within merge_configs.
|
||||
|
||||
Returns:
|
||||
BaseCallbackManager: The merged callback manager of the same type
|
||||
as the current object.
|
||||
The merged callback manager of the same type as the current object.
|
||||
|
||||
Example: Merging two callback managers.
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.callbacks.manager import (
|
||||
CallbackManager,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
CallbackManager,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
|
||||
manager = CallbackManager(
|
||||
handlers=[StdOutCallbackHandler()], tags=["tag2"]
|
||||
)
|
||||
with trace_as_chain_group(
|
||||
"My Group Name", tags=["tag1"]
|
||||
) as group_manager:
|
||||
merged_manager = group_manager.merge(manager)
|
||||
print(merged_manager.handlers)
|
||||
# [
|
||||
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
|
||||
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
|
||||
# ]
|
||||
|
||||
print(merged_manager.tags)
|
||||
# ['tag2', 'tag1']
|
||||
manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"])
|
||||
with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager:
|
||||
merged_manager = group_manager.merge(manager)
|
||||
print(merged_manager.handlers)
|
||||
# [
|
||||
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
|
||||
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
|
||||
# ]
|
||||
|
||||
print(merged_manager.tags)
|
||||
# ['tag2', 'tag1']
|
||||
```
|
||||
""" # noqa: E501
|
||||
manager = self.__class__(
|
||||
parent_run_id=self.parent_run_id or other.parent_run_id,
|
||||
@@ -1008,8 +1000,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add a handler to the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to add.
|
||||
inherit (bool): Whether to inherit the handler. Default is True.
|
||||
handler: The handler to add.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
if handler not in self.handlers:
|
||||
self.handlers.append(handler)
|
||||
@@ -1020,7 +1012,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove a handler from the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to remove.
|
||||
handler: The handler to remove.
|
||||
"""
|
||||
if handler in self.handlers:
|
||||
self.handlers.remove(handler)
|
||||
@@ -1035,8 +1027,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Set handlers as the only handlers on the callback manager.
|
||||
|
||||
Args:
|
||||
handlers (list[BaseCallbackHandler]): The handlers to set.
|
||||
inherit (bool): Whether to inherit the handlers. Default is True.
|
||||
handlers: The handlers to set.
|
||||
inherit: Whether to inherit the handlers.
|
||||
"""
|
||||
self.handlers = []
|
||||
self.inheritable_handlers = []
|
||||
@@ -1051,8 +1043,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Set handler as the only handler on the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to set.
|
||||
inherit (bool): Whether to inherit the handler. Default is True.
|
||||
handler: The handler to set.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
self.set_handlers([handler], inherit=inherit)
|
||||
|
||||
@@ -1064,8 +1056,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add tags to the callback manager.
|
||||
|
||||
Args:
|
||||
tags (list[str]): The tags to add.
|
||||
inherit (bool): Whether to inherit the tags. Default is True.
|
||||
tags: The tags to add.
|
||||
inherit: Whether to inherit the tags.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1078,7 +1070,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove tags from the callback manager.
|
||||
|
||||
Args:
|
||||
tags (list[str]): The tags to remove.
|
||||
tags: The tags to remove.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1094,8 +1086,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add metadata to the callback manager.
|
||||
|
||||
Args:
|
||||
metadata (dict[str, Any]): The metadata to add.
|
||||
inherit (bool): Whether to inherit the metadata. Default is True.
|
||||
metadata: The metadata to add.
|
||||
inherit: Whether to inherit the metadata.
|
||||
"""
|
||||
self.metadata.update(metadata)
|
||||
if inherit:
|
||||
@@ -1105,7 +1097,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove metadata from the callback manager.
|
||||
|
||||
Args:
|
||||
keys (list[str]): The keys to remove.
|
||||
keys: The keys to remove.
|
||||
"""
|
||||
for key in keys:
|
||||
self.metadata.pop(key, None)
|
||||
|
||||
@@ -27,27 +27,27 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Examples:
|
||||
Using as a context manager (recommended):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with FileCallbackHandler("output.txt") as handler:
|
||||
# Use handler with your chain/agent
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
```python
|
||||
with FileCallbackHandler("output.txt") as handler:
|
||||
# Use handler with your chain/agent
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
```
|
||||
|
||||
Direct instantiation (deprecated):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
handler = FileCallbackHandler("output.txt")
|
||||
# File remains open until handler is garbage collected
|
||||
try:
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
finally:
|
||||
handler.close() # Explicit cleanup recommended
|
||||
```python
|
||||
handler = FileCallbackHandler("output.txt")
|
||||
# File remains open until handler is garbage collected
|
||||
try:
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
finally:
|
||||
handler.close() # Explicit cleanup recommended
|
||||
```
|
||||
|
||||
Args:
|
||||
filename: The file path to write to.
|
||||
mode: The file open mode. Defaults to `'a'` (append).
|
||||
color: Default color for text output. Defaults to `None`.
|
||||
color: Default color for text output.
|
||||
|
||||
!!! note
|
||||
When not used as a context manager, a deprecation warning will be issued
|
||||
@@ -64,7 +64,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
filename: Path to the output file.
|
||||
mode: File open mode (e.g., `'w'`, `'a'`, `'x'`). Defaults to `'a'`.
|
||||
color: Default text color for output. Defaults to `None`.
|
||||
color: Default text color for output.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
@@ -132,7 +132,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
text: The text to write to the file.
|
||||
color: Optional color for the text. Defaults to `self.color`.
|
||||
end: String appended after the text. Defaults to `""`.
|
||||
end: String appended after the text.
|
||||
file: Optional file to write to. Defaults to `self.file`.
|
||||
|
||||
Raises:
|
||||
@@ -239,7 +239,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
text: The text to write.
|
||||
color: Color override for this specific output. If `None`, uses
|
||||
`self.color`.
|
||||
end: String appended after the text. Defaults to `""`.
|
||||
end: String appended after the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
"""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,7 +20,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Initialize callback handler.
|
||||
|
||||
Args:
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
"""
|
||||
self.color = color
|
||||
|
||||
@@ -31,9 +31,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we are entering a chain.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs to the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
if "name" in kwargs:
|
||||
name = kwargs["name"]
|
||||
@@ -48,8 +48,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we finished a chain.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
|
||||
|
||||
@@ -60,9 +60,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(action.log, color=color or self.color)
|
||||
|
||||
@@ -78,12 +78,11 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""If not the final action, print out observation.
|
||||
|
||||
Args:
|
||||
output (Any): The output to print.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
observation_prefix (str | None): The observation prefix.
|
||||
Defaults to `None`.
|
||||
llm_prefix (str | None): The LLM prefix. Defaults to `None`.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
output: The output to print.
|
||||
color: The color to use for the text.
|
||||
observation_prefix: The observation prefix.
|
||||
llm_prefix: The LLM prefix.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
output = str(output)
|
||||
if observation_prefix is not None:
|
||||
@@ -103,10 +102,10 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the agent ends.
|
||||
|
||||
Args:
|
||||
text (str): The text to print.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
end (str): The end character to use. Defaults to "".
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
text: The text to print.
|
||||
color: The color to use for the text.
|
||||
end: The end character to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(text, color=color or self.color, end=end)
|
||||
|
||||
@@ -117,8 +116,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(finish.log, color=color or self.color, end="\n")
|
||||
|
||||
@@ -24,9 +24,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts to run.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts to run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chat_model_start(
|
||||
@@ -38,9 +38,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
messages (list[list[BaseMessage]]): The messages to run.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
messages: The messages to run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
@override
|
||||
@@ -48,8 +48,8 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on new LLM token. Only available when streaming is enabled.
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
sys.stdout.write(token)
|
||||
sys.stdout.flush()
|
||||
@@ -58,16 +58,16 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response from the LLM.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
response: The response from the LLM.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_start(
|
||||
@@ -76,25 +76,25 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs to the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when a chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_start(
|
||||
@@ -103,47 +103,47 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized tool.
|
||||
input_str: The input string.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text to print.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
text: The text to print.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
@@ -19,30 +19,29 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback Handler that tracks AIMessage.usage_metadata.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
callback = UsageMetadataCallbackHandler()
|
||||
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
||||
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
||||
callback.usage_metadata
|
||||
|
||||
.. code-block::
|
||||
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
callback = UsageMetadataCallbackHandler()
|
||||
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
||||
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
||||
callback.usage_metadata
|
||||
```
|
||||
```txt
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.49"
|
||||
|
||||
@@ -96,40 +95,44 @@ def get_usage_metadata_callback(
|
||||
"""Get usage metadata callback.
|
||||
|
||||
Get context manager for tracking usage metadata across chat model calls using
|
||||
``AIMessage.usage_metadata``.
|
||||
`AIMessage.usage_metadata`.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context variable. Defaults to
|
||||
``'usage_metadata_callback'``.
|
||||
name: The name of the context variable.
|
||||
|
||||
Yields:
|
||||
The usage metadata callback.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import get_usage_metadata_callback
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import get_usage_metadata_callback
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
with get_usage_metadata_callback() as cb:
|
||||
llm_1.invoke("Hello")
|
||||
llm_2.invoke("Hello")
|
||||
print(cb.usage_metadata)
|
||||
|
||||
.. code-block::
|
||||
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
with get_usage_metadata_callback() as cb:
|
||||
llm_1.invoke("Hello")
|
||||
llm_2.invoke("Hello")
|
||||
print(cb.usage_metadata)
|
||||
```
|
||||
```txt
|
||||
{
|
||||
"gpt-4o-mini-2024-07-18": {
|
||||
"input_tokens": 8,
|
||||
"output_tokens": 10,
|
||||
"total_tokens": 18,
|
||||
"input_token_details": {"audio": 0, "cache_read": 0},
|
||||
"output_token_details": {"audio": 0, "reasoning": 0},
|
||||
},
|
||||
"claude-3-5-haiku-20241022": {
|
||||
"input_tokens": 8,
|
||||
"output_tokens": 21,
|
||||
"total_tokens": 29,
|
||||
"input_token_details": {"cache_read": 0, "cache_creation": 0},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.49"
|
||||
|
||||
|
||||
@@ -1,18 +1,4 @@
|
||||
"""**Chat message history** stores a history of the message interactions in a chat.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AIMessage, HumanMessage, BaseMessage
|
||||
|
||||
""" # noqa: E501
|
||||
"""**Chat message history** stores a history of the message interactions in a chat."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -63,46 +49,45 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
Example: Shows a default implementation.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import json
|
||||
import os
|
||||
from langchain_core.messages import messages_from_dict, message_to_dict
|
||||
```python
|
||||
import json
|
||||
import os
|
||||
from langchain_core.messages import messages_from_dict, message_to_dict
|
||||
|
||||
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
storage_path: str
|
||||
session_id: str
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
storage_path: str
|
||||
session_id: str
|
||||
|
||||
@property
|
||||
def messages(self) -> list[BaseMessage]:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(self.storage_path, self.session_id),
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
messages_data = json.load(f)
|
||||
return messages_from_dict(messages_data)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
@property
|
||||
def messages(self) -> list[BaseMessage]:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(self.storage_path, self.session_id),
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
messages_data = json.load(f)
|
||||
return messages_from_dict(messages_data)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(serialized, f)
|
||||
|
||||
def clear(self) -> None:
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump([], f)
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(serialized, f)
|
||||
|
||||
def clear(self) -> None:
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump([], f)
|
||||
```
|
||||
"""
|
||||
|
||||
messages: list[BaseMessage]
|
||||
@@ -130,7 +115,7 @@ class BaseChatMessageHistory(ABC):
|
||||
"""Convenience method for adding a human message string to the store.
|
||||
|
||||
!!! note
|
||||
This is a convenience method. Code should favor the bulk ``add_messages``
|
||||
This is a convenience method. Code should favor the bulk `add_messages`
|
||||
interface instead to save on round-trips to the persistence layer.
|
||||
|
||||
This method may be deprecated in a future release.
|
||||
@@ -147,7 +132,7 @@ class BaseChatMessageHistory(ABC):
|
||||
"""Convenience method for adding an AI message string to the store.
|
||||
|
||||
!!! note
|
||||
This is a convenience method. Code should favor the bulk ``add_messages``
|
||||
This is a convenience method. Code should favor the bulk `add_messages`
|
||||
interface instead to save on round-trips to the persistence layer.
|
||||
|
||||
This method may be deprecated in a future release.
|
||||
@@ -168,7 +153,7 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the sub-class has not implemented an efficient
|
||||
add_messages method.
|
||||
`add_messages` method.
|
||||
"""
|
||||
if type(self).add_messages != BaseChatMessageHistory.add_messages:
|
||||
# This means that the sub-class has implemented an efficient add_messages
|
||||
|
||||
@@ -35,38 +35,38 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Sub-classes should not implement this method directly. Instead, they
|
||||
# should implement the lazy load method.
|
||||
def load(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return list(self.lazy_load())
|
||||
|
||||
async def aload(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return [document async for document in self.alazy_load()]
|
||||
|
||||
def load_and_split(
|
||||
self, text_splitter: TextSplitter | None = None
|
||||
) -> list[Document]:
|
||||
"""Load Documents and split into chunks. Chunks are returned as Documents.
|
||||
"""Load Documents and split into chunks. Chunks are returned as `Document`.
|
||||
|
||||
Do not override this method. It should be considered to be deprecated!
|
||||
|
||||
Args:
|
||||
text_splitter: TextSplitter instance to use for splitting documents.
|
||||
Defaults to RecursiveCharacterTextSplitter.
|
||||
text_splitter: `TextSplitter` instance to use for splitting documents.
|
||||
Defaults to `RecursiveCharacterTextSplitter`.
|
||||
|
||||
Raises:
|
||||
ImportError: If langchain-text-splitters is not installed
|
||||
and no text_splitter is provided.
|
||||
ImportError: If `langchain-text-splitters` is not installed
|
||||
and no `text_splitter` is provided.
|
||||
|
||||
Returns:
|
||||
List of Documents.
|
||||
List of `Document`.
|
||||
"""
|
||||
if text_splitter is None:
|
||||
if not _HAS_TEXT_SPLITTERS:
|
||||
@@ -86,10 +86,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Attention: This method will be upgraded into an abstractmethod once it's
|
||||
# implemented in all the existing subclasses.
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
if type(self).load != BaseLoader.load:
|
||||
return iter(self.load())
|
||||
@@ -97,10 +97,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
async def alazy_load(self) -> AsyncIterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
iterator = await run_in_executor(None, self.lazy_load)
|
||||
done = object()
|
||||
@@ -115,7 +115,7 @@ class BaseBlobParser(ABC):
|
||||
"""Abstract interface for blob parsers.
|
||||
|
||||
A blob parser provides a way to parse raw data stored in a blob into one
|
||||
or more documents.
|
||||
or more `Document` objects.
|
||||
|
||||
The parser can be composed with blob loaders, making it easy to reuse
|
||||
a parser independent of how the blob was originally loaded.
|
||||
@@ -128,25 +128,25 @@ class BaseBlobParser(ABC):
|
||||
Subclasses are required to implement this method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
Generator of documents
|
||||
Generator of `Document` objects
|
||||
"""
|
||||
|
||||
def parse(self, blob: Blob) -> list[Document]:
|
||||
"""Eagerly parse the blob into a document or documents.
|
||||
"""Eagerly parse the blob into a `Document` or `Document` objects.
|
||||
|
||||
This is a convenience method for interactive development environment.
|
||||
|
||||
Production applications should favor the lazy_parse method instead.
|
||||
Production applications should favor the `lazy_parse` method instead.
|
||||
|
||||
Subclasses should generally not over-ride this parse method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
List of documents
|
||||
List of `Document` objects
|
||||
"""
|
||||
return list(self.lazy_parse(blob))
|
||||
|
||||
@@ -22,22 +22,22 @@ class LangSmithLoader(BaseLoader):
|
||||
|
||||
??? note "Lazy load"
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.document_loaders import LangSmithLoader
|
||||
|
||||
from langchain_core.document_loaders import LangSmithLoader
|
||||
loader = LangSmithLoader(dataset_id="...", limit=100)
|
||||
docs = []
|
||||
for doc in loader.lazy_load():
|
||||
docs.append(doc)
|
||||
```
|
||||
|
||||
loader = LangSmithLoader(dataset_id="...", limit=100)
|
||||
docs = []
|
||||
for doc in loader.lazy_load():
|
||||
docs.append(doc)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
```python
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.34"
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -60,15 +60,15 @@ class LangSmithLoader(BaseLoader):
|
||||
"""Create a LangSmith loader.
|
||||
|
||||
Args:
|
||||
dataset_id: The ID of the dataset to filter by. Defaults to `None`.
|
||||
dataset_name: The name of the dataset to filter by. Defaults to `None`.
|
||||
dataset_id: The ID of the dataset to filter by.
|
||||
dataset_name: The name of the dataset to filter by.
|
||||
content_key: The inputs key to set as Document page content. `'.'` characters
|
||||
are interpreted as nested keys. E.g. `content_key="first.second"` will
|
||||
result in
|
||||
`Document(page_content=format_content(example.inputs["first"]["second"]))`
|
||||
format_content: Function for converting the content extracted from the example
|
||||
inputs into a string. Defaults to JSON-encoding the contents.
|
||||
example_ids: The IDs of the examples to filter by. Defaults to `None`.
|
||||
example_ids: The IDs of the examples to filter by.
|
||||
as_of: The dataset version tag OR
|
||||
timestamp to retrieve the examples as of.
|
||||
Response examples will only be those that were present at the time
|
||||
@@ -76,10 +76,10 @@ class LangSmithLoader(BaseLoader):
|
||||
splits: A list of dataset splits, which are
|
||||
divisions of your dataset such as 'train', 'test', or 'validation'.
|
||||
Returns examples only from the specified splits.
|
||||
inline_s3_urls: Whether to inline S3 URLs. Defaults to `True`.
|
||||
offset: The offset to start from. Defaults to 0.
|
||||
inline_s3_urls: Whether to inline S3 URLs.
|
||||
offset: The offset to start from.
|
||||
limit: The maximum number of examples to return.
|
||||
metadata: Metadata to filter by. Defaults to `None`.
|
||||
metadata: Metadata to filter by.
|
||||
filter: A structured filter string to apply to the examples.
|
||||
client: LangSmith Client. If not provided will be initialized from below args.
|
||||
client_kwargs: Keyword args to pass to LangSmith client init. Should only be
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
**Document** module is a collection of classes that handle documents
|
||||
and their transformations.
|
||||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -57,52 +57,51 @@ class Blob(BaseMedia):
|
||||
|
||||
Example: Initialize a blob from in-memory data
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
|
||||
Example: Load from memory and specify mime-type and metadata
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
```
|
||||
|
||||
Example: Load the blob from a file
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
"""
|
||||
|
||||
data: bytes | str | None = None
|
||||
@@ -112,7 +111,7 @@ class Blob(BaseMedia):
|
||||
encoding: str = "utf-8"
|
||||
"""Encoding to use if decoding the bytes into a string.
|
||||
|
||||
Use utf-8 as default encoding, if decoding to string.
|
||||
Use `utf-8` as default encoding, if decoding to string.
|
||||
"""
|
||||
path: PathLike | None = None
|
||||
"""Location where the original content was found."""
|
||||
@@ -128,7 +127,7 @@ class Blob(BaseMedia):
|
||||
|
||||
If a path is associated with the blob, it will default to the path location.
|
||||
|
||||
Unless explicitly set via a metadata field called "source", in which
|
||||
Unless explicitly set via a metadata field called `"source"`, in which
|
||||
case that value will be used instead.
|
||||
"""
|
||||
if self.metadata and "source" in self.metadata:
|
||||
@@ -212,11 +211,11 @@ class Blob(BaseMedia):
|
||||
"""Load the blob from a path like object.
|
||||
|
||||
Args:
|
||||
path: path like object to file to be read
|
||||
path: Path-like object to file to be read
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
guess_type: If `True`, the mimetype will be guessed from the file extension,
|
||||
if a mime-type was not provided
|
||||
if a mime-type was not provided
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -249,10 +248,10 @@ class Blob(BaseMedia):
|
||||
"""Initialize the blob from in-memory data.
|
||||
|
||||
Args:
|
||||
data: the in-memory data associated with the blob
|
||||
data: The in-memory data associated with the blob
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
path: if provided, will be set as the source from which the data came
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
path: If provided, will be set as the source from which the data came
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -278,15 +277,13 @@ class Document(BaseMedia):
|
||||
"""Class for storing a piece of text and associated metadata.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
document = Document(
|
||||
page_content="Hello, world!", metadata={"source": "https://example.com"}
|
||||
)
|
||||
|
||||
document = Document(
|
||||
page_content="Hello, world!", metadata={"source": "https://example.com"}
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
page_content: str
|
||||
@@ -306,7 +303,7 @@ class Document(BaseMedia):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
["langchain", "schema", "document"]
|
||||
|
||||
@@ -20,35 +20,34 @@ class BaseDocumentTransformer(ABC):
|
||||
sequence of transformed Documents.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
|
||||
embeddings: Embeddings
|
||||
similarity_fn: Callable = cosine_similarity
|
||||
similarity_threshold: float = 0.95
|
||||
|
||||
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
|
||||
embeddings: Embeddings
|
||||
similarity_fn: Callable = cosine_similarity
|
||||
similarity_threshold: float = 0.95
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
stateful_documents = get_stateful_documents(documents)
|
||||
embedded_documents = _get_embeddings_from_stateful_docs(
|
||||
self.embeddings, stateful_documents
|
||||
)
|
||||
included_idxs = _filter_similar_embeddings(
|
||||
embedded_documents,
|
||||
self.similarity_fn,
|
||||
self.similarity_threshold,
|
||||
)
|
||||
return [stateful_documents[i] for i in sorted(included_idxs)]
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
stateful_documents = get_stateful_documents(documents)
|
||||
embedded_documents = _get_embeddings_from_stateful_docs(
|
||||
self.embeddings, stateful_documents
|
||||
)
|
||||
included_idxs = _filter_similar_embeddings(
|
||||
embedded_documents,
|
||||
self.similarity_fn,
|
||||
self.similarity_threshold,
|
||||
)
|
||||
return [stateful_documents[i] for i in sorted(included_idxs)]
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
```
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -18,40 +18,38 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
This embedding model creates embeddings by sampling from a normal distribution.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.embeddings import FakeEmbeddings
|
||||
|
||||
from langchain_core.embeddings import FakeEmbeddings
|
||||
|
||||
embed = FakeEmbeddings(size=100)
|
||||
embed = FakeEmbeddings(size=100)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
```
|
||||
```python
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```
|
||||
|
||||
Embed multiple texts:
|
||||
.. code-block:: python
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
```
|
||||
```python
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
```
|
||||
"""
|
||||
|
||||
size: int
|
||||
@@ -75,40 +73,38 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
This embedding model creates embeddings by sampling from a normal distribution
|
||||
with a seed based on the hash of the text.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
|
||||
embed = DeterministicFakeEmbedding(size=100)
|
||||
embed = DeterministicFakeEmbedding(size=100)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
```
|
||||
```python
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```
|
||||
|
||||
Embed multiple texts:
|
||||
.. code-block:: python
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
```
|
||||
```python
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
```
|
||||
"""
|
||||
|
||||
size: int
|
||||
|
||||
@@ -154,7 +154,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -198,7 +198,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -285,9 +285,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -333,9 +332,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
|
||||
@@ -16,7 +16,7 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
"""Exception that output parsers should raise to signify a parsing error.
|
||||
|
||||
This exists to differentiate parsing errors from other code or execution errors
|
||||
that also may arise inside the output parser. OutputParserExceptions will be
|
||||
that also may arise inside the output parser. `OutputParserException` will be
|
||||
available to catch and handle in ways to fix the parsing error, while other
|
||||
errors will be raised.
|
||||
"""
|
||||
@@ -28,24 +28,23 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
llm_output: str | None = None,
|
||||
send_to_llm: bool = False, # noqa: FBT001,FBT002
|
||||
):
|
||||
"""Create an OutputParserException.
|
||||
"""Create an `OutputParserException`.
|
||||
|
||||
Args:
|
||||
error: The error that's being re-raised or an error message.
|
||||
observation: String explanation of error which can be passed to a
|
||||
model to try and remediate the issue. Defaults to `None`.
|
||||
model to try and remediate the issue.
|
||||
llm_output: String model output which is error-ing.
|
||||
Defaults to `None`.
|
||||
|
||||
send_to_llm: Whether to send the observation and llm_output back to an Agent
|
||||
after an OutputParserException has been raised.
|
||||
after an `OutputParserException` has been raised.
|
||||
This gives the underlying model driving the agent the context that the
|
||||
previous output was improperly structured, in the hopes that it will
|
||||
update the output to the correct format.
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
ValueError: If ``send_to_llm`` is True but either observation or
|
||||
``llm_output`` are not provided.
|
||||
ValueError: If `send_to_llm` is True but either observation or
|
||||
`llm_output` are not provided.
|
||||
"""
|
||||
if isinstance(error, str):
|
||||
error = create_message(
|
||||
|
||||
@@ -326,8 +326,8 @@ def index(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -342,15 +342,12 @@ def index(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
@@ -667,8 +664,8 @@ async def aindex(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -683,15 +680,12 @@ async def aindex(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ class RecordManager(ABC):
|
||||
"""Initialize the record manager.
|
||||
|
||||
Args:
|
||||
namespace (str): The namespace for the record manager.
|
||||
namespace: The namespace for the record manager.
|
||||
"""
|
||||
self.namespace = namespace
|
||||
|
||||
@@ -244,7 +244,7 @@ class InMemoryRecordManager(RecordManager):
|
||||
"""Initialize the in-memory record manager.
|
||||
|
||||
Args:
|
||||
namespace (str): The namespace for the record manager.
|
||||
namespace: The namespace for the record manager.
|
||||
"""
|
||||
super().__init__(namespace)
|
||||
# Each key points to a dictionary
|
||||
@@ -278,10 +278,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -315,10 +315,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -361,13 +361,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -397,13 +397,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -529,7 +529,7 @@ class DocumentIndex(BaseRetriever):
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
UpsertResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully added or updated in the vectorstore and the list of IDs that
|
||||
failed to be added or updated.
|
||||
"""
|
||||
@@ -552,7 +552,7 @@ class DocumentIndex(BaseRetriever):
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
UpsertResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully added or updated in the vectorstore and the list of IDs that
|
||||
failed to be added or updated.
|
||||
"""
|
||||
@@ -571,12 +571,12 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of ids to delete.
|
||||
kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
**kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
For example, can include an option to delete the entire index,
|
||||
or else issue a non-blocking delete etc.
|
||||
|
||||
Returns:
|
||||
DeleteResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully deleted and the list of IDs that failed to be deleted.
|
||||
"""
|
||||
|
||||
@@ -589,11 +589,11 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of ids to delete.
|
||||
kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
**kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
For example, can include an option to delete the entire index.
|
||||
|
||||
Returns:
|
||||
DeleteResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully deleted and the list of IDs that failed to be deleted.
|
||||
"""
|
||||
return await run_in_executor(
|
||||
@@ -624,10 +624,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of IDs to get.
|
||||
kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
**kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
|
||||
Returns:
|
||||
list[Document]: List of documents that were found.
|
||||
List of documents that were found.
|
||||
"""
|
||||
|
||||
async def aget(
|
||||
@@ -650,10 +650,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of IDs to get.
|
||||
kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
**kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
|
||||
Returns:
|
||||
list[Document]: List of documents that were found.
|
||||
List of documents that were found.
|
||||
"""
|
||||
return await run_in_executor(
|
||||
None,
|
||||
|
||||
@@ -1,45 +1,29 @@
|
||||
"""Language models.
|
||||
|
||||
**Language Model** is a type of model that can generate text or complete
|
||||
text prompts.
|
||||
LangChain has two main classes to work with language models: chat models and
|
||||
"old-fashioned" LLMs.
|
||||
|
||||
LangChain has two main classes to work with language models: **Chat Models**
|
||||
and "old-fashioned" **LLMs**.
|
||||
|
||||
**Chat Models**
|
||||
**Chat models**
|
||||
|
||||
Language models that use a sequence of messages as inputs and return chat messages
|
||||
as outputs (as opposed to using plain text). These are traditionally newer models (
|
||||
older models are generally LLMs, see below). Chat models support the assignment of
|
||||
as outputs (as opposed to using plain text). Chat models support the assignment of
|
||||
distinct roles to conversation messages, helping to distinguish messages from the AI,
|
||||
users, and instructions such as system messages.
|
||||
|
||||
The key abstraction for chat models is `BaseChatModel`. Implementations
|
||||
should inherit from this class. Please see LangChain how-to guides with more
|
||||
information on how to implement a custom chat model.
|
||||
should inherit from this class.
|
||||
|
||||
To implement a custom Chat Model, inherit from `BaseChatModel`. See
|
||||
the following guide for more information on how to implement a custom Chat Model:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_chat_model/
|
||||
See existing [chat model integrations](https://docs.langchain.com/oss/python/integrations/chat).
|
||||
|
||||
**LLMs**
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are Chat Models,
|
||||
see below).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers
|
||||
also allow these models to take messages as input. This gives them the same interface
|
||||
as Chat Models. When messages are passed in as input, they will be formatted into a
|
||||
string under the hood before being passed to the underlying model.
|
||||
|
||||
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
|
||||
Please see the following guide for more information on how to implement a custom LLM:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_llm/
|
||||
|
||||
These are traditionally older models (newer models generally are chat models).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also
|
||||
allow these models to take messages as input. This gives them the same interface as
|
||||
chat models. When messages are passed in as input, they will be formatted into a string
|
||||
under the hood before being passed to the underlying model.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -35,7 +35,7 @@ def is_openai_data_block(
|
||||
different type, this function will return False.
|
||||
|
||||
Returns:
|
||||
True if the block is a valid OpenAI data block and matches the filter_
|
||||
`True` if the block is a valid OpenAI data block and matches the filter_
|
||||
(if provided).
|
||||
|
||||
"""
|
||||
@@ -89,21 +89,20 @@ class ParsedDataUri(TypedDict):
|
||||
def _parse_data_uri(uri: str) -> ParsedDataUri | None:
|
||||
"""Parse a data URI into its components.
|
||||
|
||||
If parsing fails, return None. If either MIME type or data is missing, return None.
|
||||
If parsing fails, return `None`. If either MIME type or data is missing, return
|
||||
`None`.
|
||||
|
||||
Example:
|
||||
```python
|
||||
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
|
||||
parsed = _parse_data_uri(data_uri)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
|
||||
parsed = _parse_data_uri(data_uri)
|
||||
|
||||
assert parsed == {
|
||||
"source_type": "base64",
|
||||
"mime_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
|
||||
assert parsed == {
|
||||
"source_type": "base64",
|
||||
"mime_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
```
|
||||
"""
|
||||
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
|
||||
match = re.match(regex, uri)
|
||||
@@ -150,48 +149,48 @@ def _normalize_messages(
|
||||
|
||||
`URLContentBlock`:
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['url'],
|
||||
url: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['url'],
|
||||
url: str,
|
||||
}
|
||||
```
|
||||
|
||||
`Base64ContentBlock`:
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['base64'],
|
||||
data: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['base64'],
|
||||
data: str,
|
||||
}
|
||||
```
|
||||
|
||||
`IDContentBlock`:
|
||||
|
||||
(In practice, this was never used)
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['id'],
|
||||
id: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
type: Literal["image", "audio", "file"],
|
||||
source_type: Literal["id"],
|
||||
id: str,
|
||||
}
|
||||
```
|
||||
|
||||
`PlainTextContentBlock`:
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['file'],
|
||||
source_type: Literal['text'],
|
||||
url: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['file'],
|
||||
source_type: Literal['text'],
|
||||
url: str,
|
||||
}
|
||||
```
|
||||
|
||||
If a v1 message is passed in, it will be returned as-is, meaning it is safe to
|
||||
always pass in v1 messages to this function for assurance.
|
||||
|
||||
@@ -96,9 +96,16 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
||||
|
||||
|
||||
LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
|
||||
"""Input to a language model."""
|
||||
|
||||
LanguageModelOutput = BaseMessage | str
|
||||
"""Output from a language model."""
|
||||
|
||||
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
||||
"""Input/output interface for a language model."""
|
||||
|
||||
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
|
||||
"""Type variable for the output of a language model."""
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
@@ -123,7 +130,6 @@ class BaseLanguageModel(
|
||||
* If instance of `BaseCache`, will use the provided cache.
|
||||
|
||||
Caching is not currently supported for streaming methods of models.
|
||||
|
||||
"""
|
||||
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
|
||||
"""Whether to print out response text."""
|
||||
@@ -146,7 +152,7 @@ class BaseLanguageModel(
|
||||
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
|
||||
"""If verbose is `None`, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
This allows users to pass in `None` as verbose to access the global setting.
|
||||
|
||||
Args:
|
||||
verbose: The verbosity setting to use.
|
||||
@@ -186,12 +192,12 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
@@ -200,8 +206,8 @@ class BaseLanguageModel(
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -223,12 +229,12 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
@@ -237,8 +243,8 @@ class BaseLanguageModel(
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An `LLMResult`, which contains a list of candidate Generations for each
|
||||
input prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
|
||||
|
||||
|
||||
def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
|
||||
"""Format messages for tracing in ``on_chat_model_start``.
|
||||
"""Format messages for tracing in `on_chat_model_start`.
|
||||
|
||||
- Update image content blocks to OpenAI Chat Completions format (backward
|
||||
compatibility).
|
||||
@@ -185,7 +185,7 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
|
||||
ValueError: If no generations are found in the stream.
|
||||
|
||||
Returns:
|
||||
ChatResult: Chat result.
|
||||
Chat result.
|
||||
|
||||
"""
|
||||
generation = next(stream, None)
|
||||
@@ -213,7 +213,7 @@ async def agenerate_from_stream(
|
||||
stream: Iterator of `ChatGenerationChunk`.
|
||||
|
||||
Returns:
|
||||
ChatResult: Chat result.
|
||||
Chat result.
|
||||
|
||||
"""
|
||||
chunks = [chunk async for chunk in stream]
|
||||
@@ -240,79 +240,52 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
"""Base class for chat models.
|
||||
r"""Base class for chat models.
|
||||
|
||||
Key imperative methods:
|
||||
Methods that actually call the underlying model.
|
||||
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| Method | Input | Output | Description |
|
||||
+===========================+================================================================+=====================================================================+==================================================================================================+
|
||||
| `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
|
||||
|
||||
This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
|
||||
| Method | Input | Output | Description |
|
||||
| ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
|
||||
| `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
|
||||
| `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
|
||||
| `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
|
||||
| `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
|
||||
| `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
|
||||
| `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
| `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
|
||||
Key declarative methods:
|
||||
Methods for creating another Runnable using the ChatModel.
|
||||
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Method | Description |
|
||||
+==================================+===========================================================================================================+
|
||||
| `bind_tools` | Create ChatModel that can call tools. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
Methods for creating another `Runnable` using the chat model.
|
||||
|
||||
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
|
||||
|
||||
| Method | Description |
|
||||
| ---------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| `bind_tools` | Create chat model that can call tools. |
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
|
||||
|
||||
Creating custom chat model:
|
||||
Custom chat model implementations should inherit from this class.
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| Method/Property | Description | Required/Optional |
|
||||
+==================================+====================================================================+===================+
|
||||
| Method/Property | Description | Required |
|
||||
| -------------------------------- | ------------------------------------------------------------------ | ----------------- |
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_stream` | Use to implement streaming | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
|
||||
Follow the guide for more information on how to implement a custom Chat Model:
|
||||
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
@@ -327,9 +300,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
|
||||
- If `True`, will always bypass streaming case.
|
||||
- If `'tool_calling'`, will bypass streaming case only when the model is called
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
- If `False` (Default), will always use streaming case if available.
|
||||
|
||||
The main reason for this flag is that code might be written using `stream` and
|
||||
@@ -342,18 +315,19 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
)
|
||||
"""Version of `AIMessage` output format to store in message content.
|
||||
|
||||
`AIMessage.content_blocks` will lazily parse the contents of ``content`` into a
|
||||
`AIMessage.content_blocks` will lazily parse the contents of `content` into a
|
||||
standard format. This flag can be used to additionally store the standard format
|
||||
in message content, e.g., for serialization purposes.
|
||||
|
||||
Supported values:
|
||||
|
||||
- `'v0'`: provider-specific format in content (can lazily-parse with
|
||||
`.content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `.content_blocks`)
|
||||
`content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `content_blocks`)
|
||||
|
||||
Partner packages (e.g., `langchain-openai`) can also use this field to roll out
|
||||
new content formats in a backward-compatible way.
|
||||
Partner packages (e.g.,
|
||||
[`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
|
||||
field to roll out new content formats in a backward-compatible way.
|
||||
|
||||
!!! version-added "Added in version 1.0"
|
||||
|
||||
@@ -1533,7 +1507,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
- a `TypedDict` class,
|
||||
- or a Pydantic class.
|
||||
|
||||
If ``schema`` is a Pydantic class then the model output will be a
|
||||
If `schema` is a Pydantic class then the model output will be a
|
||||
Pydantic instance of that class, and the model-generated fields will be
|
||||
validated by the Pydantic class. Otherwise the model output will be a
|
||||
dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
|
||||
@@ -1543,110 +1517,111 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
include_raw:
|
||||
If `False` then only the parsed structured output is returned. If
|
||||
an error occurs during model output parsing it will be raised. If `True`
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
then both the raw model response (a `BaseMessage`) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
|
||||
with keys `'raw'`, `'parsed'`, and `'parsing_error'`.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are any unsupported ``kwargs``.
|
||||
ValueError: If there are any unsupported `kwargs`.
|
||||
NotImplementedError: If the model does not implement
|
||||
``with_structured_output()``.
|
||||
`with_structured_output()`.
|
||||
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
|
||||
|
||||
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
|
||||
an instance of ``schema`` (i.e., a Pydantic object).
|
||||
If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
|
||||
an instance of `schema` (i.e., a Pydantic object).
|
||||
|
||||
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||
Otherwise, if `include_raw` is False then Runnable outputs a dict.
|
||||
|
||||
If ``include_raw`` is True, then Runnable outputs a dict with keys:
|
||||
If `include_raw` is True, then Runnable outputs a dict with keys:
|
||||
|
||||
- ``'raw'``: BaseMessage
|
||||
- ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- ``'parsing_error'``: BaseException | None
|
||||
- `'raw'`: BaseMessage
|
||||
- `'parsed'`: None if there was a parsing error, otherwise the type
|
||||
depends on the `schema` as described above.
|
||||
- `'parsing_error'`: BaseException | None
|
||||
|
||||
Example: Pydantic schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(AnswerWithJustification)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
```
|
||||
|
||||
Example: Pydantic schema (include_raw=True):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
```
|
||||
|
||||
Example: Dict schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(dict_schema)
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(dict_schema)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.2.26"
|
||||
Added support for TypedDict class.
|
||||
Added support for TypedDict class.
|
||||
|
||||
""" # noqa: E501
|
||||
_ = kwargs.pop("method", None)
|
||||
@@ -1693,7 +1668,7 @@ class SimpleChatModel(BaseChatModel):
|
||||
|
||||
!!! note
|
||||
This implementation is primarily here for backwards compatibility. For new
|
||||
implementations, please use ``BaseChatModel`` directly.
|
||||
implementations, please use `BaseChatModel` directly.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
|
||||
|
||||
|
||||
class FakeMessagesListChatModel(BaseChatModel):
|
||||
"""Fake ``ChatModel`` for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[BaseMessage]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
@@ -57,7 +57,7 @@ class FakeListChatModelError(Exception):
|
||||
|
||||
|
||||
class FakeListChatModel(SimpleChatModel):
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[str]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
@@ -228,10 +228,10 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
"""Generic fake chat model that can be used to test the chat model interface.
|
||||
|
||||
* Chat model should be usable in both sync and async tests
|
||||
* Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
|
||||
tokens.
|
||||
* Invokes `on_llm_new_token` to allow for testing of callback related code for new
|
||||
tokens.
|
||||
* Includes logic to break messages into message chunk to facilitate testing of
|
||||
streaming.
|
||||
streaming.
|
||||
|
||||
"""
|
||||
|
||||
@@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
to make the interface more generic if needed.
|
||||
|
||||
!!! note
|
||||
if you want to pass a list, you can use ``iter`` to convert it to an iterator.
|
||||
if you want to pass a list, you can use `iter` to convert it to an iterator.
|
||||
|
||||
!!! warning
|
||||
Streaming is not implemented yet. We should try to implement it in the future by
|
||||
|
||||
@@ -74,8 +74,8 @@ def create_base_retry_decorator(
|
||||
|
||||
Args:
|
||||
error_types: List of error types to retry on.
|
||||
max_retries: Number of retries. Default is 1.
|
||||
run_manager: Callback manager for the run. Default is None.
|
||||
max_retries: Number of retries.
|
||||
run_manager: Callback manager for the run.
|
||||
|
||||
Returns:
|
||||
A retry decorator.
|
||||
@@ -153,7 +153,7 @@ def get_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -189,7 +189,7 @@ async def aget_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -835,7 +835,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of string prompts.
|
||||
@@ -857,8 +857,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
|
||||
Raises:
|
||||
ValueError: If prompts is not a list.
|
||||
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
||||
``run_name`` (if provided) does not match the length of prompts.
|
||||
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
@@ -1105,7 +1105,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of string prompts.
|
||||
@@ -1126,8 +1126,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Raises:
|
||||
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
||||
``run_name`` (if provided) does not match the length of prompts.
|
||||
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
@@ -1340,11 +1340,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
ValueError: If the file path is not a string or Path object.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
|
||||
```python
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
```
|
||||
"""
|
||||
# Convert file to Path object.
|
||||
save_path = Path(file_path)
|
||||
|
||||
@@ -42,10 +42,9 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
|
||||
Args:
|
||||
obj: The object to dump.
|
||||
pretty: Whether to pretty print the json. If true, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of kwargs).
|
||||
Default is False.
|
||||
kwargs: Additional arguments to pass to json.dumps
|
||||
pretty: Whether to pretty print the json. If `True`, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of `kwargs`).
|
||||
**kwargs: Additional arguments to pass to `json.dumps`
|
||||
|
||||
Returns:
|
||||
A json string representation of the object.
|
||||
|
||||
@@ -63,16 +63,13 @@ class Reviver:
|
||||
Args:
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
"""
|
||||
self.secrets_from_env = secrets_from_env
|
||||
self.secrets_map = secrets_map or {}
|
||||
@@ -107,7 +104,7 @@ class Reviver:
|
||||
ValueError: If trying to deserialize something that cannot
|
||||
be deserialized in the current version of langchain-core.
|
||||
NotImplementedError: If the object is not implemented and
|
||||
``ignore_unserializable_fields`` is False.
|
||||
`ignore_unserializable_fields` is False.
|
||||
"""
|
||||
if (
|
||||
value.get("lc") == 1
|
||||
@@ -200,16 +197,13 @@ def loads(
|
||||
text: The string to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
@@ -245,16 +239,13 @@ def load(
|
||||
obj: The object to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
|
||||
@@ -25,16 +25,16 @@ class BaseSerialized(TypedDict):
|
||||
id: list[str]
|
||||
"""The unique identifier of the object."""
|
||||
name: NotRequired[str]
|
||||
"""The name of the object. Optional."""
|
||||
"""The name of the object."""
|
||||
graph: NotRequired[dict[str, Any]]
|
||||
"""The graph of the object. Optional."""
|
||||
"""The graph of the object."""
|
||||
|
||||
|
||||
class SerializedConstructor(BaseSerialized):
|
||||
"""Serialized constructor."""
|
||||
|
||||
type: Literal["constructor"]
|
||||
"""The type of the object. Must be ``'constructor'``."""
|
||||
"""The type of the object. Must be `'constructor'`."""
|
||||
kwargs: dict[str, Any]
|
||||
"""The constructor arguments."""
|
||||
|
||||
@@ -43,16 +43,16 @@ class SerializedSecret(BaseSerialized):
|
||||
"""Serialized secret."""
|
||||
|
||||
type: Literal["secret"]
|
||||
"""The type of the object. Must be ``'secret'``."""
|
||||
"""The type of the object. Must be `'secret'`."""
|
||||
|
||||
|
||||
class SerializedNotImplemented(BaseSerialized):
|
||||
"""Serialized not implemented."""
|
||||
|
||||
type: Literal["not_implemented"]
|
||||
"""The type of the object. Must be ``'not_implemented'``."""
|
||||
"""The type of the object. Must be `'not_implemented'`."""
|
||||
repr: str | None
|
||||
"""The representation of the object. Optional."""
|
||||
"""The representation of the object."""
|
||||
|
||||
|
||||
def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
@@ -61,7 +61,7 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
Args:
|
||||
value: The value.
|
||||
key: The key.
|
||||
model: The pydantic model.
|
||||
model: The Pydantic model.
|
||||
|
||||
Returns:
|
||||
Whether the value is different from the default.
|
||||
@@ -93,18 +93,18 @@ class Serializable(BaseModel, ABC):
|
||||
It relies on the following methods and properties:
|
||||
|
||||
- `is_lc_serializable`: Is this class serializable?
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
- ``get_lc_namespace``: Get the namespace of the langchain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
|
||||
During deserialization an additional mapping is handle
|
||||
classes that have moved or been renamed across package versions.
|
||||
- ``lc_secrets``: A map of constructor argument names to secret ids.
|
||||
- ``lc_attributes``: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the LangChain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the `Reviver` class in `langchain_core.load.load` for more details.
|
||||
During deserialization an additional mapping is handle classes that have moved
|
||||
or been renamed across package versions.
|
||||
- `lc_secrets`: A map of constructor argument names to secret ids.
|
||||
- `lc_attributes`: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
"""
|
||||
|
||||
# Remove default BaseModel init docstring.
|
||||
@@ -116,24 +116,24 @@ class Serializable(BaseModel, ABC):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""Is this class serializable?
|
||||
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
|
||||
Returns:
|
||||
Whether the class is serializable. Default is False.
|
||||
Whether the class is serializable. Default is `False`.
|
||||
"""
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
||||
namespace is ["langchain", "llms", "openai"]
|
||||
namespace is `["langchain", "llms", "openai"]`
|
||||
|
||||
Returns:
|
||||
The namespace as a list of strings.
|
||||
The namespace.
|
||||
"""
|
||||
return cls.__module__.split(".")
|
||||
|
||||
@@ -141,8 +141,7 @@ class Serializable(BaseModel, ABC):
|
||||
def lc_secrets(self) -> dict[str, str]:
|
||||
"""A map of constructor argument names to secret ids.
|
||||
|
||||
For example,
|
||||
{"openai_api_key": "OPENAI_API_KEY"}
|
||||
For example, `{"openai_api_key": "OPENAI_API_KEY"}`
|
||||
"""
|
||||
return {}
|
||||
|
||||
@@ -151,6 +150,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""List of attribute names that should be included in the serialized kwargs.
|
||||
|
||||
These attributes must be accepted by the constructor.
|
||||
|
||||
Default is an empty dictionary.
|
||||
"""
|
||||
return {}
|
||||
@@ -194,7 +194,7 @@ class Serializable(BaseModel, ABC):
|
||||
ValueError: If the class has deprecated attributes.
|
||||
|
||||
Returns:
|
||||
A json serializable object or a SerializedNotImplemented object.
|
||||
A json serializable object or a `SerializedNotImplemented` object.
|
||||
"""
|
||||
if not self.is_lc_serializable():
|
||||
return self.to_json_not_implemented()
|
||||
@@ -269,7 +269,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented.
|
||||
`SerializedNotImplemented`.
|
||||
"""
|
||||
return to_json_not_implemented(self)
|
||||
|
||||
@@ -284,8 +284,8 @@ def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool:
|
||||
|
||||
Returns:
|
||||
Whether the field is useful. If the field is required, it is useful.
|
||||
If the field is not required, it is useful if the value is not None.
|
||||
If the field is not required and the value is None, it is useful if the
|
||||
If the field is not required, it is useful if the value is not `None`.
|
||||
If the field is not required and the value is `None`, it is useful if the
|
||||
default value is different from the value.
|
||||
"""
|
||||
field = type(inst).model_fields.get(key)
|
||||
@@ -344,10 +344,10 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Args:
|
||||
obj: object to serialize.
|
||||
obj: Object to serialize.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented
|
||||
`SerializedNotImplemented`
|
||||
"""
|
||||
id_: list[str] = []
|
||||
try:
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
"""**Messages** are objects used in prompts and chat conversations.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
|
||||
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
ChatPromptTemplate
|
||||
|
||||
""" # noqa: E501
|
||||
"""**Messages** are objects used in prompts and chat conversations."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -40,13 +40,13 @@ class InputTokenDetails(TypedDict, total=False):
|
||||
Does *not* need to sum to full input token count. Does *not* need to have all keys.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
}
|
||||
```python
|
||||
{
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.9"
|
||||
|
||||
@@ -76,12 +76,12 @@ class OutputTokenDetails(TypedDict, total=False):
|
||||
Does *not* need to sum to full output token count. Does *not* need to have all keys.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
```python
|
||||
{
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.9"
|
||||
|
||||
@@ -104,22 +104,22 @@ class UsageMetadata(TypedDict):
|
||||
This is a standard representation of token usage that is consistent across models.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
}
|
||||
```python
|
||||
{
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.3.9"
|
||||
Added `input_token_details` and `output_token_details`.
|
||||
@@ -148,27 +148,26 @@ class UsageMetadata(TypedDict):
|
||||
class AIMessage(BaseMessage):
|
||||
"""Message from an AI.
|
||||
|
||||
AIMessage is returned from a chat model as a response to a prompt.
|
||||
An `AIMessage` is returned from a chat model as a response to a prompt.
|
||||
|
||||
This message represents the output of the model and consists of both
|
||||
the raw output as returned by the model together standardized fields
|
||||
the raw output as returned by the model and standardized fields
|
||||
(e.g., tool calls, usage metadata) added by the LangChain framework.
|
||||
|
||||
"""
|
||||
|
||||
tool_calls: list[ToolCall] = []
|
||||
"""If provided, tool calls associated with the message."""
|
||||
"""If present, tool calls associated with the message."""
|
||||
invalid_tool_calls: list[InvalidToolCall] = []
|
||||
"""If provided, tool calls with parsing errors associated with the message."""
|
||||
"""If present, tool calls with parsing errors associated with the message."""
|
||||
usage_metadata: UsageMetadata | None = None
|
||||
"""If provided, usage metadata for a message, such as token counts.
|
||||
"""If present, usage metadata for a message, such as token counts.
|
||||
|
||||
This is a standard representation of token usage that is consistent across models.
|
||||
|
||||
"""
|
||||
|
||||
type: Literal["ai"] = "ai"
|
||||
"""The type of the message (used for deserialization). Defaults to "ai"."""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -191,14 +190,14 @@ class AIMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `AIMessage`.
|
||||
"""Initialize an `AIMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The content of the message.
|
||||
content_blocks: Typed standard content.
|
||||
kwargs: Additional arguments to pass to the parent class.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
if content_blocks is not None:
|
||||
# If there are tool calls in content_blocks, but not in tool_calls, add them
|
||||
@@ -217,7 +216,11 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized.
|
||||
|
||||
Includes all attributes, even if they are derived from other initialization
|
||||
arguments.
|
||||
"""
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -225,7 +228,7 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message.
|
||||
"""Return standard, typed `ContentBlock` dicts from the message.
|
||||
|
||||
If the message has a known model provider, use the provider-specific translator
|
||||
first before falling back to best-effort parsing. For details, see the property
|
||||
@@ -331,11 +334,10 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@override
|
||||
def pretty_repr(self, html: bool = False) -> str:
|
||||
"""Return a pretty representation of the message.
|
||||
"""Return a pretty representation of the message for display.
|
||||
|
||||
Args:
|
||||
html: Whether to return an HTML-formatted string.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -372,31 +374,27 @@ class AIMessage(BaseMessage):
|
||||
|
||||
|
||||
class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
"""Message chunk from an AI."""
|
||||
"""Message chunk from an AI (yielded when streaming)."""
|
||||
|
||||
# Ignoring mypy re-assignment here since we're overriding the value
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for deserialization).
|
||||
|
||||
Defaults to ``AIMessageChunk``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
tool_call_chunks: list[ToolCallChunk] = []
|
||||
"""If provided, tool call chunks associated with the message."""
|
||||
|
||||
chunk_position: Literal["last"] | None = None
|
||||
"""Optional span represented by an aggregated AIMessageChunk.
|
||||
"""Optional span represented by an aggregated `AIMessageChunk`.
|
||||
|
||||
If a chunk with ``chunk_position="last"`` is aggregated into a stream,
|
||||
``tool_call_chunks`` in message content will be parsed into `tool_calls`.
|
||||
If a chunk with `chunk_position="last"` is aggregated into a stream,
|
||||
`tool_call_chunks` in message content will be parsed into `tool_calls`.
|
||||
"""
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized, even if they are derived from other initialization args.""" # noqa: E501
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -404,7 +402,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message."""
|
||||
"""Return standard, typed `ContentBlock` dicts from the message."""
|
||||
if self.response_metadata.get("output_version") == "v1":
|
||||
return cast("list[types.ContentBlock]", self.content)
|
||||
|
||||
@@ -545,12 +543,15 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
and call_id in id_to_tc
|
||||
):
|
||||
self.content[idx] = cast("dict[str, Any]", id_to_tc[call_id])
|
||||
if "extras" in block:
|
||||
# mypy does not account for instance check for dict above
|
||||
self.content[idx]["extras"] = block["extras"] # type: ignore[index]
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def init_server_tool_calls(self) -> Self:
|
||||
"""Parse server_tool_call_chunks."""
|
||||
"""Parse `server_tool_call_chunks`."""
|
||||
if (
|
||||
self.chunk_position == "last"
|
||||
and self.response_metadata.get("output_version") == "v1"
|
||||
@@ -596,14 +597,14 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
def add_ai_message_chunks(
|
||||
left: AIMessageChunk, *others: AIMessageChunk
|
||||
) -> AIMessageChunk:
|
||||
"""Add multiple ``AIMessageChunk``s together.
|
||||
"""Add multiple `AIMessageChunk`s together.
|
||||
|
||||
Args:
|
||||
left: The first ``AIMessageChunk``.
|
||||
*others: Other ``AIMessageChunk``s to add.
|
||||
left: The first `AIMessageChunk`.
|
||||
*others: Other `AIMessageChunk`s to add.
|
||||
|
||||
Returns:
|
||||
The resulting ``AIMessageChunk``.
|
||||
The resulting `AIMessageChunk`.
|
||||
|
||||
"""
|
||||
content = merge_content(left.content, *(o.content for o in others))
|
||||
@@ -681,43 +682,42 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
|
||||
"""Recursively add two UsageMetadata objects.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages.ai import add_usage
|
||||
|
||||
from langchain_core.messages.ai import add_usage
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=0,
|
||||
total_tokens=5,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=0,
|
||||
output_tokens=10,
|
||||
total_tokens=10,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=0,
|
||||
total_tokens=5,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=0,
|
||||
output_tokens=10,
|
||||
total_tokens=10,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
add_usage(left, right)
|
||||
add_usage(left, right)
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
```python
|
||||
UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
```
|
||||
Args:
|
||||
left: The first ``UsageMetadata`` object.
|
||||
right: The second ``UsageMetadata`` object.
|
||||
left: The first `UsageMetadata` object.
|
||||
right: The second `UsageMetadata` object.
|
||||
|
||||
Returns:
|
||||
The sum of the two ``UsageMetadata`` objects.
|
||||
The sum of the two `UsageMetadata` objects.
|
||||
|
||||
"""
|
||||
if not (left or right):
|
||||
@@ -740,48 +740,47 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
|
||||
def subtract_usage(
|
||||
left: UsageMetadata | None, right: UsageMetadata | None
|
||||
) -> UsageMetadata:
|
||||
"""Recursively subtract two ``UsageMetadata`` objects.
|
||||
"""Recursively subtract two `UsageMetadata` objects.
|
||||
|
||||
Token counts cannot be negative so the actual operation is ``max(left - right, 0)``.
|
||||
Token counts cannot be negative so the actual operation is `max(left - right, 0)`.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages.ai import subtract_usage
|
||||
|
||||
from langchain_core.messages.ai import subtract_usage
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=3,
|
||||
output_tokens=8,
|
||||
total_tokens=11,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=3,
|
||||
output_tokens=8,
|
||||
total_tokens=11,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
subtract_usage(left, right)
|
||||
subtract_usage(left, right)
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
UsageMetadata(
|
||||
input_tokens=2,
|
||||
output_tokens=2,
|
||||
total_tokens=4,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
output_token_details=OutputTokenDetails(reasoning=0),
|
||||
)
|
||||
|
||||
```python
|
||||
UsageMetadata(
|
||||
input_tokens=2,
|
||||
output_tokens=2,
|
||||
total_tokens=4,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
output_token_details=OutputTokenDetails(reasoning=0),
|
||||
)
|
||||
```
|
||||
Args:
|
||||
left: The first ``UsageMetadata`` object.
|
||||
right: The second ``UsageMetadata`` object.
|
||||
left: The first `UsageMetadata` object.
|
||||
right: The second `UsageMetadata` object.
|
||||
|
||||
Returns:
|
||||
The resulting ``UsageMetadata`` after subtraction.
|
||||
The resulting `UsageMetadata` after subtraction.
|
||||
|
||||
"""
|
||||
if not (left or right):
|
||||
|
||||
@@ -48,13 +48,13 @@ class TextAccessor(str):
|
||||
|
||||
Exists to maintain backward compatibility while transitioning from method-based to
|
||||
property-based text access in message objects. In LangChain <v1.0, message text was
|
||||
accessed via ``.text()`` method calls. In v1.0=<, the preferred pattern is property
|
||||
access via ``.text``.
|
||||
accessed via `.text()` method calls. In v1.0=<, the preferred pattern is property
|
||||
access via `.text`.
|
||||
|
||||
Rather than breaking existing code immediately, ``TextAccessor`` allows both
|
||||
Rather than breaking existing code immediately, `TextAccessor` allows both
|
||||
patterns:
|
||||
- Modern property access: ``message.text`` (returns string directly)
|
||||
- Legacy method access: ``message.text()`` (callable, emits deprecation warning)
|
||||
- Modern property access: `message.text` (returns string directly)
|
||||
- Legacy method access: `message.text()` (callable, emits deprecation warning)
|
||||
|
||||
"""
|
||||
|
||||
@@ -67,12 +67,12 @@ class TextAccessor(str):
|
||||
def __call__(self) -> str:
|
||||
"""Enable method-style text access for backward compatibility.
|
||||
|
||||
This method exists solely to support legacy code that calls ``.text()``
|
||||
as a method. New code should use property access (``.text``) instead.
|
||||
This method exists solely to support legacy code that calls `.text()`
|
||||
as a method. New code should use property access (`.text`) instead.
|
||||
|
||||
!!! deprecated
|
||||
As of `langchain-core` 1.0.0, calling ``.text()`` as a method is deprecated.
|
||||
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
|
||||
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
The string content, identical to property access.
|
||||
@@ -92,11 +92,11 @@ class TextAccessor(str):
|
||||
class BaseMessage(Serializable):
|
||||
"""Base abstract message class.
|
||||
|
||||
Messages are the inputs and outputs of a ``ChatModel``.
|
||||
Messages are the inputs and outputs of a chat model.
|
||||
"""
|
||||
|
||||
content: str | list[str | dict]
|
||||
"""The string contents of the message."""
|
||||
"""The contents of the message."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict)
|
||||
"""Reserved for additional payload data associated with the message.
|
||||
@@ -159,14 +159,14 @@ class BaseMessage(Serializable):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `BaseMessage`.
|
||||
"""Initialize a `BaseMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
kwargs: Additional arguments to pass to the parent class.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
if content_blocks is not None:
|
||||
super().__init__(content=content_blocks, **kwargs)
|
||||
@@ -184,10 +184,10 @@ class BaseMessage(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "messages"]``
|
||||
`["langchain", "schema", "messages"]`
|
||||
"""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
@@ -259,11 +259,11 @@ class BaseMessage(Serializable):
|
||||
def text(self) -> TextAccessor:
|
||||
"""Get the text content of the message as a string.
|
||||
|
||||
Can be used as both property (``message.text``) and method (``message.text()``).
|
||||
Can be used as both property (`message.text`) and method (`message.text()`).
|
||||
|
||||
!!! deprecated
|
||||
As of langchain-core 1.0.0, calling ``.text()`` as a method is deprecated.
|
||||
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
|
||||
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
The text content of the message.
|
||||
@@ -307,7 +307,7 @@ class BaseMessage(Serializable):
|
||||
|
||||
Args:
|
||||
html: Whether to format the message as HTML. If `True`, the message will be
|
||||
formatted with HTML tags. Default is False.
|
||||
formatted with HTML tags.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -331,8 +331,8 @@ def merge_content(
|
||||
"""Merge multiple message contents.
|
||||
|
||||
Args:
|
||||
first_content: The first ``content``. Can be a string or a list.
|
||||
contents: The other ``content``s. Can be a string or a list.
|
||||
first_content: The first `content`. Can be a string or a list.
|
||||
contents: The other `content`s. Can be a string or a list.
|
||||
|
||||
Returns:
|
||||
The merged content.
|
||||
@@ -388,9 +388,9 @@ class BaseMessageChunk(BaseMessage):
|
||||
|
||||
For example,
|
||||
|
||||
``AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")``
|
||||
`AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
|
||||
|
||||
will give ``AIMessageChunk(content="Hello World")``
|
||||
will give `AIMessageChunk(content="Hello World")`
|
||||
|
||||
"""
|
||||
if isinstance(other, BaseMessageChunk):
|
||||
@@ -440,7 +440,7 @@ def message_to_dict(message: BaseMessage) -> dict:
|
||||
|
||||
Returns:
|
||||
Message as a dict. The dict will have a `type` key with the message type
|
||||
and a ``data`` key with the message data as a dict.
|
||||
and a `data` key with the message data as a dict.
|
||||
|
||||
"""
|
||||
return {"type": message.type, "data": message.model_dump()}
|
||||
@@ -464,7 +464,7 @@ def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
|
||||
|
||||
Args:
|
||||
title: The title.
|
||||
bold: Whether to bold the title. Default is False.
|
||||
bold: Whether to bold the title.
|
||||
|
||||
Returns:
|
||||
The title representation.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Derivations of standard content blocks from provider content.
|
||||
|
||||
`AIMessage` will first attempt to use a provider-specific translator if
|
||||
``model_provider`` is set in `response_metadata` on the message. Consequently, each
|
||||
`model_provider` is set in `response_metadata` on the message. Consequently, each
|
||||
provider translator must handle all possible content response types from the provider,
|
||||
including text.
|
||||
|
||||
@@ -23,13 +23,13 @@ if TYPE_CHECKING:
|
||||
PROVIDER_TRANSLATORS: dict[str, dict[str, Callable[..., list[types.ContentBlock]]]] = {}
|
||||
"""Map model provider names to translator functions.
|
||||
|
||||
The dictionary maps provider names (e.g. ``'openai'``, ``'anthropic'``) to another
|
||||
The dictionary maps provider names (e.g. `'openai'`, `'anthropic'`) to another
|
||||
dictionary with two keys:
|
||||
- ``'translate_content'``: Function to translate `AIMessage` content.
|
||||
- ``'translate_content_chunk'``: Function to translate ``AIMessageChunk`` content.
|
||||
- `'translate_content'`: Function to translate `AIMessage` content.
|
||||
- `'translate_content_chunk'`: Function to translate `AIMessageChunk` content.
|
||||
|
||||
When calling `.content_blocks` on an `AIMessage` or ``AIMessageChunk``, if
|
||||
``model_provider`` is set in `response_metadata`, the corresponding translator
|
||||
When calling `content_blocks` on an `AIMessage` or `AIMessageChunk`, if
|
||||
`model_provider` is set in `response_metadata`, the corresponding translator
|
||||
functions will be used to parse the content into blocks. Otherwise, best-effort parsing
|
||||
in `BaseMessage` will be used.
|
||||
"""
|
||||
@@ -43,9 +43,9 @@ def register_translator(
|
||||
"""Register content translators for a provider in `PROVIDER_TRANSLATORS`.
|
||||
|
||||
Args:
|
||||
provider: The model provider name (e.g. ``'openai'``, ``'anthropic'``).
|
||||
provider: The model provider name (e.g. `'openai'`, `'anthropic'`).
|
||||
translate_content: Function to translate `AIMessage` content.
|
||||
translate_content_chunk: Function to translate ``AIMessageChunk`` content.
|
||||
translate_content_chunk: Function to translate `AIMessageChunk` content.
|
||||
"""
|
||||
PROVIDER_TRANSLATORS[provider] = {
|
||||
"translate_content": translate_content,
|
||||
@@ -62,7 +62,7 @@ def get_translator(
|
||||
provider: The model provider name.
|
||||
|
||||
Returns:
|
||||
Dictionary with ``'translate_content'`` and ``'translate_content_chunk'``
|
||||
Dictionary with `'translate_content'` and `'translate_content_chunk'`
|
||||
functions, or None if no translator is registered for the provider. In such
|
||||
case, best-effort parsing in `BaseMessage` will be used.
|
||||
"""
|
||||
@@ -72,10 +72,10 @@ def get_translator(
|
||||
def _register_translators() -> None:
|
||||
"""Register all translators in langchain-core.
|
||||
|
||||
A unit test ensures all modules in ``block_translators`` are represented here.
|
||||
A unit test ensures all modules in `block_translators` are represented here.
|
||||
|
||||
For translators implemented outside langchain-core, they can be registered by
|
||||
calling ``register_translator`` from within the integration package.
|
||||
calling `register_translator` from within the integration package.
|
||||
"""
|
||||
from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415
|
||||
_register_anthropic_translator,
|
||||
|
||||
@@ -31,12 +31,12 @@ def _convert_to_v1_from_anthropic_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Anthropic format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Anthropic format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -35,12 +35,12 @@ def _convert_to_v1_from_converse_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Bedrock Converse format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Converse format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -105,12 +105,12 @@ def _convert_to_v1_from_genai_input(
|
||||
Called when message isn't an `AIMessage` or `model_provider` isn't set on
|
||||
`response_metadata`.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be GenAI format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
@@ -453,9 +453,10 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"status": status, # type: ignore[typeddict-item]
|
||||
"output": item.get("code_execution_result", ""),
|
||||
}
|
||||
server_tool_result_block["extras"] = {"block_type": item_type}
|
||||
# Preserve original outcome in extras
|
||||
if outcome is not None:
|
||||
server_tool_result_block["extras"] = {"outcome": outcome}
|
||||
server_tool_result_block["extras"]["outcome"] = outcome
|
||||
converted_blocks.append(server_tool_result_block)
|
||||
else:
|
||||
# Unknown type, preserve as non-standard
|
||||
|
||||
@@ -1,37 +1,9 @@
|
||||
"""Derivations of standard content blocks from Google (VertexAI) content."""
|
||||
|
||||
import warnings
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a chunk with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
from langchain_core.messages.block_translators.google_genai import (
|
||||
translate_content,
|
||||
translate_content_chunk,
|
||||
)
|
||||
|
||||
|
||||
def _register_google_vertexai_translator() -> None:
|
||||
|
||||
@@ -1,39 +1,135 @@
|
||||
"""Derivations of standard content blocks from Groq content."""
|
||||
|
||||
import warnings
|
||||
import json
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _populate_extras(
|
||||
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
|
||||
) -> types.ContentBlock:
|
||||
"""Mutate a block, populating extras."""
|
||||
if standard_block.get("type") == "non_standard":
|
||||
return standard_block
|
||||
|
||||
for key, value in block.items():
|
||||
if key not in known_fields:
|
||||
if "extras" not in standard_block:
|
||||
# Below type-ignores are because mypy thinks a non-standard block can
|
||||
# get here, although we exclude them above.
|
||||
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
|
||||
standard_block["extras"][key] = value # type: ignore[typeddict-item]
|
||||
|
||||
return standard_block
|
||||
|
||||
|
||||
def _parse_code_json(s: str) -> dict:
|
||||
"""Extract Python code from Groq built-in tool content.
|
||||
|
||||
Extracts the value of the 'code' field from a string of the form:
|
||||
{"code": some_arbitrary_text_with_unescaped_quotes}
|
||||
|
||||
As Groq may not escape quotes in the executed tools, e.g.:
|
||||
```
|
||||
'{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
|
||||
```
|
||||
""" # noqa: E501
|
||||
m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
|
||||
if not m:
|
||||
msg = (
|
||||
"Could not extract Python code from Groq tool arguments. "
|
||||
"Expected a JSON object with a 'code' field."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
raise ValueError(msg)
|
||||
return {"code": m.group(1)}
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message chunk with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Convert groq message content to v1 format."""
|
||||
content_blocks: list[types.ContentBlock] = []
|
||||
|
||||
if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
|
||||
content_blocks.append(reasoning_block)
|
||||
|
||||
if executed_tools := message.additional_kwargs.get("executed_tools"):
|
||||
for idx, executed_tool in enumerate(executed_tools):
|
||||
args: dict[str, Any] | None = None
|
||||
if arguments := executed_tool.get("arguments"):
|
||||
try:
|
||||
args = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
if executed_tool.get("type") == "python":
|
||||
try:
|
||||
args = _parse_code_json(arguments)
|
||||
except ValueError:
|
||||
continue
|
||||
elif (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
# GPT-OSS
|
||||
args = {"code": arguments}
|
||||
else:
|
||||
continue
|
||||
if isinstance(args, dict):
|
||||
name = ""
|
||||
if executed_tool.get("type") == "search":
|
||||
name = "web_search"
|
||||
elif executed_tool.get("type") == "python" or (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
name = "code_interpreter"
|
||||
server_tool_call: types.ServerToolCall = {
|
||||
"type": "server_tool_call",
|
||||
"name": name,
|
||||
"id": str(idx),
|
||||
"args": args,
|
||||
}
|
||||
content_blocks.append(server_tool_call)
|
||||
if tool_output := executed_tool.get("output"):
|
||||
tool_result: types.ServerToolResult = {
|
||||
"type": "server_tool_result",
|
||||
"tool_call_id": str(idx),
|
||||
"output": tool_output,
|
||||
"status": "success",
|
||||
}
|
||||
known_fields = {"type", "arguments", "index", "output"}
|
||||
_populate_extras(tool_result, executed_tool, known_fields)
|
||||
content_blocks.append(tool_result)
|
||||
|
||||
if isinstance(message.content, str) and message.content:
|
||||
content_blocks.append({"type": "text", "text": message.content})
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
content_blocks.append( # noqa: PERF401
|
||||
{
|
||||
"type": "tool_call",
|
||||
"name": tool_call["name"],
|
||||
"args": tool_call["args"],
|
||||
"id": tool_call.get("id"),
|
||||
}
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
return content_blocks
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message chunk with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def _register_groq_translator() -> None:
|
||||
"""Register the Groq translator with the central registry.
|
||||
"""Register the groq translator with the central registry.
|
||||
|
||||
Run automatically when the module is imported.
|
||||
"""
|
||||
|
||||
@@ -10,12 +10,12 @@ def _convert_v0_multimodal_input_to_v1(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert v0 multimodal blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any v0 format
|
||||
blocks to v1 format.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -18,7 +18,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
|
||||
"""Convert ``ImageContentBlock`` to format expected by OpenAI Chat Completions."""
|
||||
"""Convert `ImageContentBlock` to format expected by OpenAI Chat Completions."""
|
||||
if "url" in block:
|
||||
return {
|
||||
"type": "image_url",
|
||||
@@ -155,12 +155,12 @@ def _convert_to_v1_from_chat_completions_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert OpenAI Chat Completions format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be OpenAI format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
@@ -263,7 +263,7 @@ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
|
||||
|
||||
|
||||
def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
|
||||
"""Convert v0 AIMessage into ``output_version="responses/v1"`` format."""
|
||||
"""Convert v0 AIMessage into `output_version="responses/v1"` format."""
|
||||
from langchain_core.messages import AIMessageChunk # noqa: PLC0415
|
||||
|
||||
# Only update ChatOpenAI v0.3 AIMessages
|
||||
|
||||
@@ -19,7 +19,7 @@ class ChatMessage(BaseMessage):
|
||||
"""The speaker / role of the Message."""
|
||||
|
||||
type: Literal["chat"] = "chat"
|
||||
"""The type of the message (used during serialization). Defaults to "chat"."""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
|
||||
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
@@ -29,11 +29,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used during serialization).
|
||||
|
||||
Defaults to ``'ChatMessageChunk'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
change in future releases.
|
||||
|
||||
This module provides standardized data structures for representing inputs to and
|
||||
outputs from LLMs. The core abstraction is the **Content Block**, a ``TypedDict``.
|
||||
outputs from LLMs. The core abstraction is the **Content Block**, a `TypedDict`.
|
||||
|
||||
**Rationale**
|
||||
|
||||
@@ -20,59 +20,59 @@ blocks into the format required by its API.
|
||||
**Extensibility**
|
||||
|
||||
Data **not yet mapped** to a standard block may be represented using the
|
||||
``NonStandardContentBlock``, which allows for provider-specific data to be included
|
||||
`NonStandardContentBlock`, which allows for provider-specific data to be included
|
||||
without losing the benefits of type checking and validation.
|
||||
|
||||
Furthermore, provider-specific fields **within** a standard block are fully supported
|
||||
by default in the ``extras`` field of each block. This allows for additional metadata
|
||||
by default in the `extras` field of each block. This allows for additional metadata
|
||||
to be included without breaking the standard structure.
|
||||
|
||||
!!! warning
|
||||
Do not heavily rely on the ``extras`` field for provider-specific data! This field
|
||||
Do not heavily rely on the `extras` field for provider-specific data! This field
|
||||
is subject to deprecation in future releases as we move towards PEP 728.
|
||||
|
||||
!!! note
|
||||
Following widespread adoption of `PEP 728 <https://peps.python.org/pep-0728/>`__, we
|
||||
will add ``extra_items=Any`` as a param to Content Blocks. This will signify to type
|
||||
Following widespread adoption of [PEP 728](https://peps.python.org/pep-0728/), we
|
||||
will add `extra_items=Any` as a param to Content Blocks. This will signify to type
|
||||
checkers that additional provider-specific fields are allowed outside of the
|
||||
``extras`` field, and that will become the new standard approach to adding
|
||||
`extras` field, and that will become the new standard approach to adding
|
||||
provider-specific metadata.
|
||||
|
||||
??? note
|
||||
|
||||
**Example with PEP 728 provider-specific fields:**
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# Content block definition
|
||||
# NOTE: `extra_items=Any`
|
||||
class TextContentBlock(TypedDict, extra_items=Any):
|
||||
type: Literal["text"]
|
||||
id: NotRequired[str]
|
||||
text: str
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
index: NotRequired[int]
|
||||
```
|
||||
|
||||
# Content block definition
|
||||
# NOTE: `extra_items=Any`
|
||||
class TextContentBlock(TypedDict, extra_items=Any):
|
||||
type: Literal["text"]
|
||||
id: NotRequired[str]
|
||||
text: str
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
index: NotRequired[int]
|
||||
```python
|
||||
from langchain_core.messages.content import TextContentBlock
|
||||
|
||||
.. code-block:: python
|
||||
# Create a text content block with provider-specific fields
|
||||
my_block: TextContentBlock = {
|
||||
# Add required fields
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
# Additional fields not specified in the TypedDict
|
||||
# These are valid with PEP 728 and are typed as Any
|
||||
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
|
||||
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
|
||||
"custom_field": "any value",
|
||||
}
|
||||
|
||||
from langchain_core.messages.content import TextContentBlock
|
||||
# Mutating an existing block to add provider-specific fields
|
||||
openai_data = my_block["openai_metadata"] # Type: Any
|
||||
```
|
||||
|
||||
# Create a text content block with provider-specific fields
|
||||
my_block: TextContentBlock = {
|
||||
# Add required fields
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
# Additional fields not specified in the TypedDict
|
||||
# These are valid with PEP 728 and are typed as Any
|
||||
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
|
||||
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
|
||||
"custom_field": "any value",
|
||||
}
|
||||
|
||||
# Mutating an existing block to add provider-specific fields
|
||||
openai_data = my_block["openai_metadata"] # Type: Any
|
||||
|
||||
PEP 728 is enabled with ``# type: ignore[call-arg]`` comments to suppress
|
||||
PEP 728 is enabled with `# type: ignore[call-arg]` comments to suppress
|
||||
warnings from type checkers that don't yet support it. The functionality works
|
||||
correctly in Python 3.13+ and will be fully supported as the ecosystem catches
|
||||
up.
|
||||
@@ -81,52 +81,51 @@ to be included without breaking the standard structure.
|
||||
|
||||
The module defines several types of content blocks, including:
|
||||
|
||||
- ``TextContentBlock``: Standard text output.
|
||||
- ``Citation``: For annotations that link text output to a source document.
|
||||
- ``ToolCall``: For function calling.
|
||||
- ``ReasoningContentBlock``: To capture a model's thought process.
|
||||
- `TextContentBlock`: Standard text output.
|
||||
- `Citation`: For annotations that link text output to a source document.
|
||||
- `ToolCall`: For function calling.
|
||||
- `ReasoningContentBlock`: To capture a model's thought process.
|
||||
- Multimodal data:
|
||||
- ``ImageContentBlock``
|
||||
- ``AudioContentBlock``
|
||||
- ``VideoContentBlock``
|
||||
- ``PlainTextContentBlock`` (e.g. .txt or .md files)
|
||||
- ``FileContentBlock`` (e.g. PDFs, etc.)
|
||||
- `ImageContentBlock`
|
||||
- `AudioContentBlock`
|
||||
- `VideoContentBlock`
|
||||
- `PlainTextContentBlock` (e.g. .txt or .md files)
|
||||
- `FileContentBlock` (e.g. PDFs, etc.)
|
||||
|
||||
**Example Usage**
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# Direct construction:
|
||||
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
|
||||
|
||||
# Direct construction:
|
||||
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
|
||||
multimodal_message: AIMessage(
|
||||
content_blocks=[
|
||||
TextContentBlock(type="text", text="What is shown in this image?"),
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
multimodal_message: AIMessage(
|
||||
content_blocks=[
|
||||
TextContentBlock(type="text", text="What is shown in this image?"),
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
# Using factories:
|
||||
from langchain_core.messages.content import create_text_block, create_image_block
|
||||
|
||||
# Using factories:
|
||||
from langchain_core.messages.content import create_text_block, create_image_block
|
||||
|
||||
multimodal_message: AIMessage(
|
||||
content=[
|
||||
create_text_block("What is shown in this image?"),
|
||||
create_image_block(
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
multimodal_message: AIMessage(
|
||||
content=[
|
||||
create_text_block("What is shown in this image?"),
|
||||
create_image_block(
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
Factory functions offer benefits such as:
|
||||
- Automatic ID generation (when not provided)
|
||||
- No need to manually specify the `type` field
|
||||
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args, get_type_hints
|
||||
@@ -140,12 +139,12 @@ class Citation(TypedDict):
|
||||
"""Annotation for citing data from a document.
|
||||
|
||||
!!! note
|
||||
``start``/``end`` indices refer to the **response text**,
|
||||
`start`/`end` indices refer to the **response text**,
|
||||
not the source text. This means that the indices are relative to the model's
|
||||
response, not the original document (as specified in the ``url``).
|
||||
response, not the original document (as specified in the `url`).
|
||||
|
||||
!!! note
|
||||
``create_citation`` may also be used as a factory to create a ``Citation``.
|
||||
!!! note "Factory function"
|
||||
`create_citation` may also be used as a factory to create a `Citation`.
|
||||
Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
@@ -157,10 +156,12 @@ class Citation(TypedDict):
|
||||
"""Type of the content block. Used for discrimination."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -174,10 +175,10 @@ class Citation(TypedDict):
|
||||
"""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the **response text** (``TextContentBlock.text``)."""
|
||||
"""Start index of the **response text** (`TextContentBlock.text`)."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the **response text** (``TextContentBlock.text``)"""
|
||||
"""End index of the **response text** (`TextContentBlock.text`)"""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Excerpt of source text being cited."""
|
||||
@@ -202,8 +203,9 @@ class NonStandardAnnotation(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -212,6 +214,7 @@ class NonStandardAnnotation(TypedDict):
|
||||
|
||||
|
||||
Annotation = Citation | NonStandardAnnotation
|
||||
"""A union of all defined `Annotation` types."""
|
||||
|
||||
|
||||
class TextContentBlock(TypedDict):
|
||||
@@ -220,9 +223,9 @@ class TextContentBlock(TypedDict):
|
||||
This typically represents the main text content of a message, such as the response
|
||||
from a language model or the text of a user message.
|
||||
|
||||
!!! note
|
||||
``create_text_block`` may also be used as a factory to create a
|
||||
``TextContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_text_block` may also be used as a factory to create a
|
||||
`TextContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -236,8 +239,9 @@ class TextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -245,7 +249,7 @@ class TextContentBlock(TypedDict):
|
||||
"""Block text."""
|
||||
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
"""``Citation``s and other annotations."""
|
||||
"""`Citation`s and other annotations."""
|
||||
|
||||
index: NotRequired[int | str]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
@@ -255,20 +259,19 @@ class TextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```python
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```
|
||||
|
||||
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
||||
and an identifier of "123".
|
||||
|
||||
!!! note
|
||||
``create_tool_call`` may also be used as a factory to create a
|
||||
``ToolCall``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_tool_call` may also be used as a factory to create a
|
||||
`ToolCall`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -301,24 +304,22 @@ class ToolCall(TypedDict):
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``),
|
||||
When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of ``index`` are equal and not `None`.
|
||||
values of `index` are equal and not `None`.
|
||||
|
||||
Example:
|
||||
```python
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
```
|
||||
"""
|
||||
|
||||
# TODO: Consider making fields NotRequired[str] in the future.
|
||||
@@ -385,7 +386,10 @@ class InvalidToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCall(TypedDict):
|
||||
"""Tool call that is executed server-side."""
|
||||
"""Tool call that is executed server-side.
|
||||
|
||||
For example: code execution, web search, etc.
|
||||
"""
|
||||
|
||||
type: Literal["server_tool_call"]
|
||||
"""Used for discrimination."""
|
||||
@@ -407,7 +411,7 @@ class ServerToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (as part of a stream)."""
|
||||
"""A chunk of a server-side tool call (yielded when streaming)."""
|
||||
|
||||
type: Literal["server_tool_call_chunk"]
|
||||
"""Used for discrimination."""
|
||||
@@ -456,9 +460,9 @@ class ServerToolResult(TypedDict):
|
||||
class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning output from a LLM.
|
||||
|
||||
!!! note
|
||||
``create_reasoning_block`` may also be used as a factory to create a
|
||||
``ReasoningContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_reasoning_block` may also be used as a factory to create a
|
||||
`ReasoningContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -472,8 +476,9 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -481,7 +486,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning text.
|
||||
|
||||
Either the thought summary or the raw reasoning text itself. This is often parsed
|
||||
from ``<think>`` tags in the model's response.
|
||||
from `<think>` tags in the model's response.
|
||||
|
||||
"""
|
||||
|
||||
@@ -498,9 +503,9 @@ class ReasoningContentBlock(TypedDict):
|
||||
class ImageContentBlock(TypedDict):
|
||||
"""Image data.
|
||||
|
||||
!!! note
|
||||
``create_image_block`` may also be used as a factory to create a
|
||||
``ImageContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_image_block` may also be used as a factory to create a
|
||||
`ImageContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -514,8 +519,9 @@ class ImageContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -525,7 +531,7 @@ class ImageContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the image. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#image>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#image)
|
||||
|
||||
"""
|
||||
|
||||
@@ -545,9 +551,9 @@ class ImageContentBlock(TypedDict):
|
||||
class VideoContentBlock(TypedDict):
|
||||
"""Video data.
|
||||
|
||||
!!! note
|
||||
``create_video_block`` may also be used as a factory to create a
|
||||
``VideoContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_video_block` may also be used as a factory to create a
|
||||
`VideoContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -561,8 +567,9 @@ class VideoContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -572,7 +579,7 @@ class VideoContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the video. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#video>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#video)
|
||||
|
||||
"""
|
||||
|
||||
@@ -592,9 +599,9 @@ class VideoContentBlock(TypedDict):
|
||||
class AudioContentBlock(TypedDict):
|
||||
"""Audio data.
|
||||
|
||||
!!! note
|
||||
``create_audio_block`` may also be used as a factory to create an
|
||||
``AudioContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_audio_block` may also be used as a factory to create an
|
||||
`AudioContentBlock`. Benefits include:
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
|
||||
@@ -607,8 +614,9 @@ class AudioContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -618,7 +626,7 @@ class AudioContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the audio. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#audio>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#audio)
|
||||
|
||||
"""
|
||||
|
||||
@@ -639,18 +647,18 @@ class PlainTextContentBlock(TypedDict):
|
||||
"""Plaintext data (e.g., from a document).
|
||||
|
||||
!!! note
|
||||
A ``PlainTextContentBlock`` existed in ``langchain-core<1.0.0``. Although the
|
||||
A `PlainTextContentBlock` existed in `langchain-core<1.0.0`. Although the
|
||||
name has carried over, the structure has changed significantly. The only shared
|
||||
keys between the old and new versions are `type` and ``text``, though the
|
||||
`type` value has changed from ``'text'`` to ``'text-plain'``.
|
||||
keys between the old and new versions are `type` and `text`, though the
|
||||
`type` value has changed from `'text'` to `'text-plain'`.
|
||||
|
||||
!!! note
|
||||
Title and context are optional fields that may be passed to the model. See
|
||||
Anthropic `example <https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content>`__.
|
||||
Anthropic [example](https://docs.claude.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
|
||||
|
||||
!!! note
|
||||
``create_plaintext_block`` may also be used as a factory to create a
|
||||
``PlainTextContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_plaintext_block` may also be used as a factory to create a
|
||||
`PlainTextContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -664,8 +672,9 @@ class PlainTextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -698,18 +707,18 @@ class PlainTextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class FileContentBlock(TypedDict):
|
||||
"""File data that doesn't fit into other multimodal blocks.
|
||||
"""File data that doesn't fit into other multimodal block types.
|
||||
|
||||
This block is intended for files that are not images, audio, or plaintext. For
|
||||
example, it can be used for PDFs, Word documents, etc.
|
||||
|
||||
If the file is an image, audio, or plaintext, you should use the corresponding
|
||||
content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``,
|
||||
``PlainTextContentBlock``).
|
||||
content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
|
||||
`PlainTextContentBlock`).
|
||||
|
||||
!!! note
|
||||
``create_file_block`` may also be used as a factory to create a
|
||||
``FileContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_file_block` may also be used as a factory to create a
|
||||
`FileContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -723,8 +732,9 @@ class FileContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -734,7 +744,7 @@ class FileContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the file. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml)
|
||||
|
||||
"""
|
||||
|
||||
@@ -764,14 +774,14 @@ class NonStandardContentBlock(TypedDict):
|
||||
The purpose of this block should be to simply hold a provider-specific payload.
|
||||
If a provider's non-standard output includes reasoning and tool calls, it should be
|
||||
the adapter's job to parse that payload and emit the corresponding standard
|
||||
``ReasoningContentBlock`` and ``ToolCalls``.
|
||||
`ReasoningContentBlock` and `ToolCalls`.
|
||||
|
||||
Has no ``extras`` field, as provider-specific data should be included in the
|
||||
``value`` field.
|
||||
Has no `extras` field, as provider-specific data should be included in the
|
||||
`value` field.
|
||||
|
||||
!!! note
|
||||
``create_non_standard_block`` may also be used as a factory to create a
|
||||
``NonStandardContentBlock``. Benefits include:
|
||||
!!! note "Factory function"
|
||||
`create_non_standard_block` may also be used as a factory to create a
|
||||
`NonStandardContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -785,8 +795,9 @@ class NonStandardContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -805,6 +816,7 @@ DataContentBlock = (
|
||||
| PlainTextContentBlock
|
||||
| FileContentBlock
|
||||
)
|
||||
"""A union of all defined multimodal data `ContentBlock` types."""
|
||||
|
||||
ToolContentBlock = (
|
||||
ToolCall | ToolCallChunk | ServerToolCall | ServerToolCallChunk | ServerToolResult
|
||||
@@ -818,6 +830,7 @@ ContentBlock = (
|
||||
| DataContentBlock
|
||||
| ToolContentBlock
|
||||
)
|
||||
"""A union of all defined `ContentBlock` types and aliases."""
|
||||
|
||||
|
||||
KNOWN_BLOCK_TYPES = {
|
||||
@@ -842,7 +855,7 @@ KNOWN_BLOCK_TYPES = {
|
||||
"non_standard",
|
||||
# citation and non_standard_annotation intentionally omitted
|
||||
}
|
||||
"""These are block types known to ``langchain-core>=1.0.0``.
|
||||
"""These are block types known to `langchain-core>=1.0.0`.
|
||||
|
||||
If a block has a type not in this set, it is considered to be provider-specific.
|
||||
"""
|
||||
@@ -881,7 +894,7 @@ def is_data_content_block(block: dict) -> bool:
|
||||
block: The content block to check.
|
||||
|
||||
Returns:
|
||||
True if the content block is a data content block, False otherwise.
|
||||
`True` if the content block is a data content block, `False` otherwise.
|
||||
|
||||
"""
|
||||
if block.get("type") not in _get_data_content_block_types():
|
||||
@@ -923,20 +936,20 @@ def create_text_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> TextContentBlock:
|
||||
"""Create a ``TextContentBlock``.
|
||||
"""Create a `TextContentBlock`.
|
||||
|
||||
Args:
|
||||
text: The text content of the block.
|
||||
id: Content block identifier. Generated automatically if not provided.
|
||||
annotations: ``Citation``s and other annotations for the text.
|
||||
annotations: `Citation`s and other annotations for the text.
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``TextContentBlock``.
|
||||
A properly formatted `TextContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = TextContentBlock(
|
||||
@@ -966,7 +979,7 @@ def create_image_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ImageContentBlock:
|
||||
"""Create an ``ImageContentBlock``.
|
||||
"""Create an `ImageContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the image.
|
||||
@@ -977,15 +990,15 @@ def create_image_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ImageContentBlock``.
|
||||
A properly formatted `ImageContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no image source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no image source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1022,7 +1035,7 @@ def create_video_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> VideoContentBlock:
|
||||
"""Create a ``VideoContentBlock``.
|
||||
"""Create a `VideoContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the video.
|
||||
@@ -1033,15 +1046,15 @@ def create_video_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``VideoContentBlock``.
|
||||
A properly formatted `VideoContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no video source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no video source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1082,7 +1095,7 @@ def create_audio_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> AudioContentBlock:
|
||||
"""Create an ``AudioContentBlock``.
|
||||
"""Create an `AudioContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the audio.
|
||||
@@ -1093,15 +1106,15 @@ def create_audio_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``AudioContentBlock``.
|
||||
A properly formatted `AudioContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no audio source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no audio source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1142,7 +1155,7 @@ def create_file_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> FileContentBlock:
|
||||
"""Create a ``FileContentBlock``.
|
||||
"""Create a `FileContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the file.
|
||||
@@ -1153,15 +1166,15 @@ def create_file_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``FileContentBlock``.
|
||||
A properly formatted `FileContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no file source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no file source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1203,7 +1216,7 @@ def create_plaintext_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> PlainTextContentBlock:
|
||||
"""Create a ``PlainTextContentBlock``.
|
||||
"""Create a `PlainTextContentBlock`.
|
||||
|
||||
Args:
|
||||
text: The plaintext content.
|
||||
@@ -1216,11 +1229,11 @@ def create_plaintext_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``PlainTextContentBlock``.
|
||||
A properly formatted `PlainTextContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = PlainTextContentBlock(
|
||||
@@ -1259,7 +1272,7 @@ def create_tool_call(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ToolCall:
|
||||
"""Create a ``ToolCall``.
|
||||
"""Create a `ToolCall`.
|
||||
|
||||
Args:
|
||||
name: The name of the tool to be called.
|
||||
@@ -1268,11 +1281,11 @@ def create_tool_call(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ToolCall``.
|
||||
A properly formatted `ToolCall`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = ToolCall(
|
||||
@@ -1298,7 +1311,7 @@ def create_reasoning_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ReasoningContentBlock:
|
||||
"""Create a ``ReasoningContentBlock``.
|
||||
"""Create a `ReasoningContentBlock`.
|
||||
|
||||
Args:
|
||||
reasoning: The reasoning text or thought summary.
|
||||
@@ -1306,11 +1319,11 @@ def create_reasoning_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ReasoningContentBlock``.
|
||||
A properly formatted `ReasoningContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = ReasoningContentBlock(
|
||||
@@ -1339,7 +1352,7 @@ def create_citation(
|
||||
id: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Citation:
|
||||
"""Create a ``Citation``.
|
||||
"""Create a `Citation`.
|
||||
|
||||
Args:
|
||||
url: URL of the document source.
|
||||
@@ -1350,11 +1363,11 @@ def create_citation(
|
||||
id: Content block identifier. Generated automatically if not provided.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``Citation``.
|
||||
A properly formatted `Citation`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = Citation(type="citation", id=ensure_id(id))
|
||||
@@ -1383,7 +1396,7 @@ def create_non_standard_block(
|
||||
id: str | None = None,
|
||||
index: int | str | None = None,
|
||||
) -> NonStandardContentBlock:
|
||||
"""Create a ``NonStandardContentBlock``.
|
||||
"""Create a `NonStandardContentBlock`.
|
||||
|
||||
Args:
|
||||
value: Provider-specific data.
|
||||
@@ -1391,11 +1404,11 @@ def create_non_standard_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``NonStandardContentBlock``.
|
||||
A properly formatted `NonStandardContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = NonStandardContentBlock(
|
||||
|
||||
@@ -15,11 +15,11 @@ from langchain_core.utils._merge import merge_dicts
|
||||
class FunctionMessage(BaseMessage):
|
||||
"""Message for passing the result of executing a tool back to a model.
|
||||
|
||||
``FunctionMessage`` are an older version of the `ToolMessage` schema, and
|
||||
`FunctionMessage` are an older version of the `ToolMessage` schema, and
|
||||
do not contain the `tool_call_id` field.
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
|
||||
"""The name of the function that was executed."""
|
||||
|
||||
type: Literal["function"] = "function"
|
||||
"""The type of the message (used for serialization). Defaults to ``'function'``."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
|
||||
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
@@ -38,11 +38,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'FunctionMessageChunk'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -7,33 +7,27 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
|
||||
|
||||
class HumanMessage(BaseMessage):
|
||||
"""Message from a human.
|
||||
"""Message from the user.
|
||||
|
||||
`HumanMessage`s are messages that are passed in from a human to the model.
|
||||
A `HumanMessage` is a message that is passed in from a user to the model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
```
|
||||
"""
|
||||
|
||||
type: Literal["human"] = "human"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'human'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -56,7 +50,7 @@ class HumanMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
||||
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
||||
if content_blocks is not None:
|
||||
super().__init__(
|
||||
content=cast("str | list[str | dict]", content_blocks),
|
||||
@@ -73,5 +67,4 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
Defaults to "HumanMessageChunk"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -9,7 +9,7 @@ class RemoveMessage(BaseMessage):
|
||||
"""Message responsible for deleting other messages."""
|
||||
|
||||
type: Literal["remove"] = "remove"
|
||||
"""The type of the message (used for serialization). Defaults to "remove"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -20,7 +20,7 @@ class RemoveMessage(BaseMessage):
|
||||
|
||||
Args:
|
||||
id: The ID of the message to remove.
|
||||
kwargs: Additional fields to pass to the message.
|
||||
**kwargs: Additional fields to pass to the message.
|
||||
|
||||
Raises:
|
||||
ValueError: If the 'content' field is passed in kwargs.
|
||||
|
||||
@@ -13,27 +13,21 @@ class SystemMessage(BaseMessage):
|
||||
of input messages.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Define a chat model and invoke it with the messages
|
||||
print(model.invoke(messages))
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Define a chat model and invoke it with the messages
|
||||
print(model.invoke(messages))
|
||||
```
|
||||
"""
|
||||
|
||||
type: Literal["system"] = "system"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'system'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -56,7 +50,7 @@ class SystemMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
||||
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
||||
if content_blocks is not None:
|
||||
super().__init__(
|
||||
content=cast("str | list[str | dict]", content_blocks),
|
||||
@@ -73,8 +67,4 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'SystemMessageChunk'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -16,8 +16,8 @@ from langchain_core.utils._merge import merge_dicts, merge_obj
|
||||
class ToolOutputMixin:
|
||||
"""Mixin for objects that tools can return directly.
|
||||
|
||||
If a custom BaseTool is invoked with a ``ToolCall`` and the output of custom code is
|
||||
not an instance of ``ToolOutputMixin``, the output will automatically be coerced to
|
||||
If a custom BaseTool is invoked with a `ToolCall` and the output of custom code is
|
||||
not an instance of `ToolOutputMixin`, the output will automatically be coerced to
|
||||
a string and wrapped in a `ToolMessage`.
|
||||
|
||||
"""
|
||||
@@ -27,41 +27,38 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
"""Message for passing the result of executing a tool back to a model.
|
||||
|
||||
`ToolMessage` objects contain the result of a tool invocation. Typically, the result
|
||||
is encoded inside the ``content`` field.
|
||||
is encoded inside the `content` field.
|
||||
|
||||
Example: A `ToolMessage` representing a result of ``42`` from a tool call with id
|
||||
Example: A `ToolMessage` representing a result of `42` from a tool call with id
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```
|
||||
|
||||
Example: A `ToolMessage` where only part of the tool output is sent to the model
|
||||
and the full output is passed in to artifact.
|
||||
and the full output is passed in to artifact.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
.. code-block:: python
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
```
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -70,11 +67,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
"""Tool call that this message is responding to."""
|
||||
|
||||
type: Literal["tool"] = "tool"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'tool'``.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
artifact: Any = None
|
||||
"""Artifact of the Tool execution which is not meant to be sent to the model.
|
||||
@@ -83,21 +76,15 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
a subset of the full tool output is being passed as message content but the full
|
||||
output is needed in other parts of the code.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
|
||||
"""
|
||||
|
||||
status: Literal["success", "error"] = "success"
|
||||
"""Status of the tool invocation.
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
|
||||
"""
|
||||
"""Status of the tool invocation."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
response_metadata: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -165,12 +152,12 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `ToolMessage`.
|
||||
"""Initialize a `ToolMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
**kwargs: Additional fields.
|
||||
"""
|
||||
@@ -216,16 +203,15 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
```python
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
|
||||
This represents a request to call the tool named ``'foo'`` with arguments
|
||||
``{"a": 1}`` and an identifier of ``'123'``.
|
||||
This represents a request to call the tool named `'foo'` with arguments
|
||||
`{"a": 1}` and an identifier of `'123'`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -263,24 +249,22 @@ def tool_call(
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``),
|
||||
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of ``index`` are equal and not None.
|
||||
values of `index` are equal and not None.
|
||||
|
||||
Example:
|
||||
```python
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
```
|
||||
"""
|
||||
|
||||
name: str | None
|
||||
|
||||
@@ -5,7 +5,6 @@ Some examples of what you can do with these functions include:
|
||||
* Convert messages to strings (serialization)
|
||||
* Convert messages from dicts to Message objects (deserialization)
|
||||
* Filter messages from a list of messages based on name, type or id etc.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -87,6 +86,7 @@ AnyMessage = Annotated[
|
||||
| Annotated[ToolMessageChunk, Tag(tag="ToolMessageChunk")],
|
||||
Field(discriminator=Discriminator(_get_type)),
|
||||
]
|
||||
""""A type representing any defined `Message` or `MessageChunk` type."""
|
||||
|
||||
|
||||
def get_buffer_string(
|
||||
@@ -97,9 +97,7 @@ def get_buffer_string(
|
||||
Args:
|
||||
messages: Messages to be converted to strings.
|
||||
human_prefix: The prefix to prepend to contents of `HumanMessage`s.
|
||||
Default is ``'Human'``.
|
||||
ai_prefix: The prefix to prepend to contents of `AIMessage`. Default is
|
||||
``'AI'``.
|
||||
ai_prefix: The prefix to prepend to contents of `AIMessage`.
|
||||
|
||||
Returns:
|
||||
A single string concatenation of all input messages.
|
||||
@@ -108,17 +106,16 @@ def get_buffer_string(
|
||||
ValueError: If an unsupported message type is encountered.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core import AIMessage, HumanMessage
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="Hi, how are you?"),
|
||||
AIMessage(content="Good, how are you?"),
|
||||
]
|
||||
get_buffer_string(messages)
|
||||
# -> "Human: Hi, how are you?\nAI: Good, how are you?"
|
||||
```python
|
||||
from langchain_core import AIMessage, HumanMessage
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="Hi, how are you?"),
|
||||
AIMessage(content="Good, how are you?"),
|
||||
]
|
||||
get_buffer_string(messages)
|
||||
# -> "Human: Hi, how are you?\nAI: Good, how are you?"
|
||||
```
|
||||
"""
|
||||
string_messages = []
|
||||
for m in messages:
|
||||
@@ -178,7 +175,7 @@ def _message_from_dict(message: dict) -> BaseMessage:
|
||||
|
||||
|
||||
def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
|
||||
"""Convert a sequence of messages from dicts to ``Message`` objects.
|
||||
"""Convert a sequence of messages from dicts to `Message` objects.
|
||||
|
||||
Args:
|
||||
messages: Sequence of messages (as dicts) to convert.
|
||||
@@ -191,7 +188,7 @@ def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
|
||||
|
||||
|
||||
def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
||||
"""Convert a message chunk to a ``Message``.
|
||||
"""Convert a message chunk to a `Message`.
|
||||
|
||||
Args:
|
||||
chunk: Message chunk to convert.
|
||||
@@ -213,6 +210,7 @@ def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
||||
MessageLikeRepresentation = (
|
||||
BaseMessage | list[str] | tuple[str, str] | str | dict[str, Any]
|
||||
)
|
||||
"""A type representing the various ways a message can be represented."""
|
||||
|
||||
|
||||
def _create_message_from_message_type(
|
||||
@@ -224,24 +222,24 @@ def _create_message_from_message_type(
|
||||
id: str | None = None,
|
||||
**additional_kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
"""Create a message from a ``Message`` type and content string.
|
||||
"""Create a message from a `Message` type and content string.
|
||||
|
||||
Args:
|
||||
message_type: (str) the type of the message (e.g., ``'human'``, ``'ai'``, etc.).
|
||||
message_type: (str) the type of the message (e.g., `'human'`, `'ai'`, etc.).
|
||||
content: (str) the content string.
|
||||
name: (str) the name of the message. Default is None.
|
||||
tool_call_id: (str) the tool call id. Default is None.
|
||||
tool_calls: (list[dict[str, Any]]) the tool calls. Default is None.
|
||||
id: (str) the id of the message. Default is None.
|
||||
name: (str) the name of the message.
|
||||
tool_call_id: (str) the tool call id.
|
||||
tool_calls: (list[dict[str, Any]]) the tool calls.
|
||||
id: (str) the id of the message.
|
||||
additional_kwargs: (dict[str, Any]) additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
a message of the appropriate type.
|
||||
|
||||
Raises:
|
||||
ValueError: if the message type is not one of ``'human'``, ``'user'``, ``'ai'``,
|
||||
``'assistant'``, ``'function'``, ``'tool'``, ``'system'``, or
|
||||
``'developer'``.
|
||||
ValueError: if the message type is not one of `'human'`, `'user'`, `'ai'`,
|
||||
`'assistant'`, `'function'`, `'tool'`, `'system'`, or
|
||||
`'developer'`.
|
||||
"""
|
||||
kwargs: dict[str, Any] = {}
|
||||
if name is not None:
|
||||
@@ -307,21 +305,21 @@ def _create_message_from_message_type(
|
||||
|
||||
|
||||
def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
||||
"""Instantiate a ``Message`` from a variety of message formats.
|
||||
"""Instantiate a `Message` from a variety of message formats.
|
||||
|
||||
The message format can be one of the following:
|
||||
|
||||
- ``BaseMessagePromptTemplate``
|
||||
- `BaseMessagePromptTemplate`
|
||||
- `BaseMessage`
|
||||
- 2-tuple of (role string, template); e.g., (``'human'``, ``'{user_input}'``)
|
||||
- 2-tuple of (role string, template); e.g., (`'human'`, `'{user_input}'`)
|
||||
- dict: a message dict with role and content keys
|
||||
- string: shorthand for (``'human'``, template); e.g., ``'{user_input}'``
|
||||
- string: shorthand for (`'human'`, template); e.g., `'{user_input}'`
|
||||
|
||||
Args:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
|
||||
Returns:
|
||||
an instance of a message or a message template.
|
||||
An instance of a message or a message template.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: if the message type is not supported.
|
||||
@@ -427,75 +425,74 @@ def filter_messages(
|
||||
|
||||
Args:
|
||||
messages: Sequence Message-like objects to filter.
|
||||
include_names: Message names to include. Default is None.
|
||||
exclude_names: Messages names to exclude. Default is None.
|
||||
include_names: Message names to include.
|
||||
exclude_names: Messages names to exclude.
|
||||
include_types: Message types to include. Can be specified as string names
|
||||
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as `BaseMessage`
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
|
||||
exclude_types: Message types to exclude. Can be specified as string names
|
||||
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as `BaseMessage`
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
include_ids: Message IDs to include. Default is None.
|
||||
exclude_ids: Message IDs to exclude. Default is None.
|
||||
exclude_tool_calls: Tool call IDs to exclude. Default is None.
|
||||
|
||||
include_ids: Message IDs to include.
|
||||
exclude_ids: Message IDs to exclude.
|
||||
exclude_tool_calls: Tool call IDs to exclude.
|
||||
Can be one of the following:
|
||||
- `True`: all `AIMessage`s with tool calls and all
|
||||
`ToolMessage` objects will be excluded.
|
||||
`ToolMessage` objects will be excluded.
|
||||
- a sequence of tool call IDs to exclude:
|
||||
- `ToolMessage` objects with the corresponding tool call ID will be
|
||||
excluded.
|
||||
- The `tool_calls` in the AIMessage will be updated to exclude
|
||||
matching tool calls. If all `tool_calls` are filtered from an
|
||||
AIMessage, the whole message is excluded.
|
||||
- `ToolMessage` objects with the corresponding tool call ID will be
|
||||
excluded.
|
||||
- The `tool_calls` in the AIMessage will be updated to exclude
|
||||
matching tool calls. If all `tool_calls` are filtered from an
|
||||
AIMessage, the whole message is excluded.
|
||||
|
||||
Returns:
|
||||
A list of Messages that meets at least one of the ``incl_*`` conditions and none
|
||||
of the ``excl_*`` conditions. If not ``incl_*`` conditions are specified then
|
||||
A list of Messages that meets at least one of the `incl_*` conditions and none
|
||||
of the `excl_*` conditions. If not `incl_*` conditions are specified then
|
||||
anything that is not explicitly excluded will be included.
|
||||
|
||||
Raises:
|
||||
ValueError: If two incompatible arguments are provided.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
filter_messages,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
|
||||
from langchain_core.messages import (
|
||||
filter_messages,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
AIMessage("steve-o", id="bar", name="example_assistant"),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
"silicon blue",
|
||||
id="blah",
|
||||
),
|
||||
]
|
||||
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
AIMessage("steve-o", id="bar", name="example_assistant"),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
"silicon blue",
|
||||
id="blah",
|
||||
),
|
||||
]
|
||||
|
||||
filter_messages(
|
||||
messages,
|
||||
incl_names=("example_user", "example_assistant"),
|
||||
incl_types=("system",),
|
||||
excl_ids=("bar",),
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
]
|
||||
filter_messages(
|
||||
messages,
|
||||
incl_names=("example_user", "example_assistant"),
|
||||
incl_types=("system",),
|
||||
excl_ids=("bar",),
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
]
|
||||
```
|
||||
"""
|
||||
messages = convert_to_messages(messages)
|
||||
filtered: list[BaseMessage] = []
|
||||
@@ -571,7 +568,6 @@ def merge_message_runs(
|
||||
Args:
|
||||
messages: Sequence Message-like objects to merge.
|
||||
chunk_separator: Specify the string to be inserted between message chunks.
|
||||
Defaults to ``'\n'``.
|
||||
|
||||
Returns:
|
||||
list of BaseMessages with consecutive runs of message types merged into single
|
||||
@@ -579,87 +575,86 @@ def merge_message_runs(
|
||||
the merged content is a concatenation of the two strings with a new-line
|
||||
separator.
|
||||
The separator inserted between message chunks can be controlled by specifying
|
||||
any string with ``chunk_separator``. If at least one of the messages has a list
|
||||
any string with `chunk_separator`. If at least one of the messages has a list
|
||||
of content blocks, the merged content is a list of content blocks.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
merge_message_runs,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
ToolCall,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="foo",
|
||||
),
|
||||
HumanMessage(
|
||||
"wait your favorite food",
|
||||
id="bar",
|
||||
),
|
||||
AIMessage(
|
||||
"my favorite colo",
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool", args={"x": 2}, id="123", type="tool_call"
|
||||
)
|
||||
],
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "my favorite dish is lasagna"}],
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool",
|
||||
args={"x": -10},
|
||||
id="456",
|
||||
type="tool_call",
|
||||
)
|
||||
],
|
||||
id="blur",
|
||||
),
|
||||
]
|
||||
|
||||
from langchain_core.messages import (
|
||||
merge_message_runs,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
ToolCall,
|
||||
)
|
||||
merge_message_runs(messages)
|
||||
```
|
||||
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="foo",
|
||||
),
|
||||
HumanMessage(
|
||||
"wait your favorite food",
|
||||
id="bar",
|
||||
),
|
||||
AIMessage(
|
||||
```python
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color\\n"
|
||||
"wait your favorite food", id="foo",
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
"my favorite colo",
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool", args={"x": 2}, id="123", type="tool_call"
|
||||
)
|
||||
],
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "my favorite dish is lasagna"}],
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool",
|
||||
args={"x": -10},
|
||||
id="456",
|
||||
type="tool_call",
|
||||
)
|
||||
],
|
||||
id="blur",
|
||||
),
|
||||
]
|
||||
|
||||
merge_message_runs(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color\\n"
|
||||
"wait your favorite food", id="foo",
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
"my favorite colo",
|
||||
{"type": "text", "text": "my favorite dish is lasagna"}
|
||||
],
|
||||
tool_calls=[
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": 2},
|
||||
"id": "123",
|
||||
"type": "tool_call"
|
||||
}),
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": -10},
|
||||
"id": "456",
|
||||
"type": "tool_call"
|
||||
})
|
||||
]
|
||||
id="baz"
|
||||
),
|
||||
]
|
||||
{"type": "text", "text": "my favorite dish is lasagna"}
|
||||
],
|
||||
tool_calls=[
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": 2},
|
||||
"id": "123",
|
||||
"type": "tool_call"
|
||||
}),
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": -10},
|
||||
"id": "456",
|
||||
"type": "tool_call"
|
||||
})
|
||||
]
|
||||
id="baz"
|
||||
),
|
||||
]
|
||||
|
||||
```
|
||||
"""
|
||||
if not messages:
|
||||
return []
|
||||
@@ -706,30 +701,28 @@ def trim_messages(
|
||||
) -> list[BaseMessage]:
|
||||
r"""Trim messages to be below a token count.
|
||||
|
||||
``trim_messages`` can be used to reduce the size of a chat history to a specified
|
||||
token count or specified message count.
|
||||
`trim_messages` can be used to reduce the size of a chat history to a specified
|
||||
token or message count.
|
||||
|
||||
In either case, if passing the trimmed chat history back into a chat model
|
||||
directly, the resulting chat history should usually satisfy the following
|
||||
properties:
|
||||
|
||||
1. The resulting chat history should be valid. Most chat models expect that chat
|
||||
history starts with either (1) a `HumanMessage` or (2) a `SystemMessage`
|
||||
followed by a `HumanMessage`. To achieve this, set ``start_on='human'``.
|
||||
In addition, generally a `ToolMessage` can only appear after an `AIMessage`
|
||||
that involved a tool call.
|
||||
Please see the following link for more information about messages:
|
||||
https://python.langchain.com/docs/concepts/#messages
|
||||
history starts with either (1) a `HumanMessage` or (2) a `SystemMessage`
|
||||
followed by a `HumanMessage`. To achieve this, set `start_on='human'`.
|
||||
In addition, generally a `ToolMessage` can only appear after an `AIMessage`
|
||||
that involved a tool call.
|
||||
2. It includes recent messages and drops old messages in the chat history.
|
||||
To achieve this set the ``strategy='last'``.
|
||||
To achieve this set the `strategy='last'`.
|
||||
3. Usually, the new chat history should include the `SystemMessage` if it
|
||||
was present in the original chat history since the `SystemMessage` includes
|
||||
special instructions to the chat model. The `SystemMessage` is almost always
|
||||
the first message in the history if present. To achieve this set the
|
||||
``include_system=True``.
|
||||
was present in the original chat history since the `SystemMessage` includes
|
||||
special instructions to the chat model. The `SystemMessage` is almost always
|
||||
the first message in the history if present. To achieve this set the
|
||||
`include_system=True`.
|
||||
|
||||
!!! note
|
||||
The examples below show how to configure ``trim_messages`` to achieve a behavior
|
||||
The examples below show how to configure `trim_messages` to achieve a behavior
|
||||
consistent with the above properties.
|
||||
|
||||
Args:
|
||||
@@ -737,118 +730,113 @@ def trim_messages(
|
||||
max_tokens: Max token count of trimmed messages.
|
||||
token_counter: Function or llm for counting tokens in a `BaseMessage` or a
|
||||
list of `BaseMessage`. If a `BaseLanguageModel` is passed in then
|
||||
``BaseLanguageModel.get_num_tokens_from_messages()`` will be used.
|
||||
Set to ``len`` to count the number of **messages** in the chat history.
|
||||
`BaseLanguageModel.get_num_tokens_from_messages()` will be used.
|
||||
Set to `len` to count the number of **messages** in the chat history.
|
||||
|
||||
!!! note
|
||||
Use ``count_tokens_approximately`` to get fast, approximate token
|
||||
Use `count_tokens_approximately` to get fast, approximate token
|
||||
counts.
|
||||
This is recommended for using ``trim_messages`` on the hot path, where
|
||||
This is recommended for using `trim_messages` on the hot path, where
|
||||
exact token counting is not necessary.
|
||||
|
||||
strategy: Strategy for trimming.
|
||||
- ``'first'``: Keep the first ``<= n_count`` tokens of the messages.
|
||||
- ``'last'``: Keep the last ``<= n_count`` tokens of the messages.
|
||||
Default is ``'last'``.
|
||||
- `'first'`: Keep the first `<= n_count` tokens of the messages.
|
||||
- `'last'`: Keep the last `<= n_count` tokens of the messages.
|
||||
allow_partial: Whether to split a message if only part of the message can be
|
||||
included. If ``strategy='last'`` then the last partial contents of a message
|
||||
are included. If ``strategy='first'`` then the first partial contents of a
|
||||
included. If `strategy='last'` then the last partial contents of a message
|
||||
are included. If `strategy='first'` then the first partial contents of a
|
||||
message are included.
|
||||
Default is False.
|
||||
end_on: The message type to end on. If specified then every message after the
|
||||
last occurrence of this type is ignored. If ``strategy='last'`` then this
|
||||
is done before we attempt to get the last ``max_tokens``. If
|
||||
``strategy='first'`` then this is done after we get the first
|
||||
``max_tokens``. Can be specified as string names (e.g. ``'system'``,
|
||||
``'human'``, ``'ai'``, ...) or as `BaseMessage` classes (e.g.
|
||||
last occurrence of this type is ignored. If `strategy='last'` then this
|
||||
is done before we attempt to get the last `max_tokens`. If
|
||||
`strategy='first'` then this is done after we get the first
|
||||
`max_tokens`. Can be specified as string names (e.g. `'system'`,
|
||||
`'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g.
|
||||
`SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single
|
||||
type or a list of types.
|
||||
Default is None.
|
||||
|
||||
start_on: The message type to start on. Should only be specified if
|
||||
``strategy='last'``. If specified then every message before
|
||||
`strategy='last'`. If specified then every message before
|
||||
the first occurrence of this type is ignored. This is done after we trim
|
||||
the initial messages to the last ``max_tokens``. Does not
|
||||
apply to a `SystemMessage` at index 0 if ``include_system=True``. Can be
|
||||
specified as string names (e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or
|
||||
the initial messages to the last `max_tokens`. Does not
|
||||
apply to a `SystemMessage` at index 0 if `include_system=True`. Can be
|
||||
specified as string names (e.g. `'system'`, `'human'`, `'ai'`, ...) or
|
||||
as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`,
|
||||
`AIMessage`, ...). Can be a single type or a list of types.
|
||||
Default is None.
|
||||
include_system: Whether to keep the SystemMessage if there is one at index 0.
|
||||
Should only be specified if ``strategy="last"``.
|
||||
Default is False.
|
||||
text_splitter: Function or ``langchain_text_splitters.TextSplitter`` for
|
||||
|
||||
include_system: Whether to keep the `SystemMessage` if there is one at index
|
||||
`0`. Should only be specified if `strategy="last"`.
|
||||
text_splitter: Function or `langchain_text_splitters.TextSplitter` for
|
||||
splitting the string contents of a message. Only used if
|
||||
``allow_partial=True``. If ``strategy='last'`` then the last split tokens
|
||||
from a partial message will be included. if ``strategy='first'`` then the
|
||||
`allow_partial=True`. If `strategy='last'` then the last split tokens
|
||||
from a partial message will be included. if `strategy='first'` then the
|
||||
first split tokens from a partial message will be included. Token splitter
|
||||
assumes that separators are kept, so that split contents can be directly
|
||||
concatenated to recreate the original text. Defaults to splitting on
|
||||
newlines.
|
||||
|
||||
Returns:
|
||||
list of trimmed `BaseMessage`.
|
||||
List of trimmed `BaseMessage`.
|
||||
|
||||
Raises:
|
||||
ValueError: if two incompatible arguments are specified or an unrecognized
|
||||
``strategy`` is specified.
|
||||
`strategy` is specified.
|
||||
|
||||
Example:
|
||||
Trim chat history based on token count, keeping the `SystemMessage` if
|
||||
present, and ensuring that the chat history starts with a `HumanMessage` (
|
||||
or a `SystemMessage` followed by a `HumanMessage`).
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
BaseMessage,
|
||||
SystemMessage,
|
||||
trim_messages,
|
||||
)
|
||||
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
BaseMessage,
|
||||
SystemMessage,
|
||||
trim_messages,
|
||||
)
|
||||
|
||||
messages = [
|
||||
SystemMessage(
|
||||
"you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage("i wonder why it's called langchain"),
|
||||
AIMessage(
|
||||
'Well, I guess they thought "WordRope" and "SentenceString" just '
|
||||
"didn't have the same ring to it!"
|
||||
),
|
||||
HumanMessage("and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
"Hmmm let me think.\n\nWhy, he's probably chasing after the last "
|
||||
"cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage("what do you call a speechless parrot"),
|
||||
]
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant, you always respond with a joke."),
|
||||
HumanMessage("i wonder why it's called langchain"),
|
||||
AIMessage(
|
||||
'Well, I guess they thought "WordRope" and "SentenceString" just '
|
||||
"didn't have the same ring to it!"
|
||||
),
|
||||
HumanMessage("and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
"Hmmm let me think.\n\nWhy, he's probably chasing after the last "
|
||||
"cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage("what do you call a speechless parrot"),
|
||||
]
|
||||
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=45,
|
||||
strategy="last",
|
||||
token_counter=ChatOpenAI(model="gpt-4o"),
|
||||
# Most chat models expect that chat history starts with either:
|
||||
# (1) a HumanMessage or
|
||||
# (2) a SystemMessage followed by a HumanMessage
|
||||
start_on="human",
|
||||
# Usually, we want to keep the SystemMessage
|
||||
# if it's present in the original history.
|
||||
# The SystemMessage has special instructions for the model.
|
||||
include_system=True,
|
||||
allow_partial=False,
|
||||
)
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=45,
|
||||
strategy="last",
|
||||
token_counter=ChatOpenAI(model="gpt-4o"),
|
||||
# Most chat models expect that chat history starts with either:
|
||||
# (1) a HumanMessage or
|
||||
# (2) a SystemMessage followed by a HumanMessage
|
||||
start_on="human",
|
||||
# Usually, we want to keep the SystemMessage
|
||||
# if it's present in the original history.
|
||||
# The SystemMessage has special instructions for the model.
|
||||
include_system=True,
|
||||
allow_partial=False,
|
||||
)
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
```python
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
```
|
||||
|
||||
Trim chat history based on the message count, keeping the `SystemMessage` if
|
||||
present, and ensuring that the chat history starts with a `HumanMessage` (
|
||||
@@ -874,100 +862,95 @@ def trim_messages(
|
||||
allow_partial=False,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
content="Hmmm let me think.\n\nWhy, he's probably chasing after "
|
||||
"the last cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
|
||||
|
||||
```python
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
content="Hmmm let me think.\n\nWhy, he's probably chasing after "
|
||||
"the last cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
```
|
||||
Trim chat history using a custom token counter function that counts the
|
||||
number of tokens in each message.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
messages = [
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="first"
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "text", "text": "This is the FIRST 4 token block."},
|
||||
{"type": "text", "text": "This is the SECOND 4 token block."},
|
||||
],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="third"
|
||||
),
|
||||
AIMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="fourth",
|
||||
),
|
||||
]
|
||||
```python
|
||||
messages = [
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="first"
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "text", "text": "This is the FIRST 4 token block."},
|
||||
{"type": "text", "text": "This is the SECOND 4 token block."},
|
||||
],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="third"
|
||||
),
|
||||
AIMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="fourth",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def dummy_token_counter(messages: list[BaseMessage]) -> int:
|
||||
# treat each message like it adds 3 default tokens at the beginning
|
||||
# of the message and at the end of the message. 3 + 4 + 3 = 10 tokens
|
||||
# per message.
|
||||
def dummy_token_counter(messages: list[BaseMessage]) -> int:
|
||||
# treat each message like it adds 3 default tokens at the beginning
|
||||
# of the message and at the end of the message. 3 + 4 + 3 = 10 tokens
|
||||
# per message.
|
||||
|
||||
default_content_len = 4
|
||||
default_msg_prefix_len = 3
|
||||
default_msg_suffix_len = 3
|
||||
default_content_len = 4
|
||||
default_msg_prefix_len = 3
|
||||
default_msg_suffix_len = 3
|
||||
|
||||
count = 0
|
||||
for msg in messages:
|
||||
if isinstance(msg.content, str):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
if isinstance(msg.content, list):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ len(msg.content) * default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
return count
|
||||
count = 0
|
||||
for msg in messages:
|
||||
if isinstance(msg.content, str):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
if isinstance(msg.content, list):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ len(msg.content) * default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
return count
|
||||
```
|
||||
|
||||
First 30 tokens, allowing partial messages:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first",
|
||||
allow_partial=True,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens."
|
||||
),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="first",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."}],
|
||||
id="second",
|
||||
),
|
||||
]
|
||||
```python
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first",
|
||||
allow_partial=True,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="first",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."}],
|
||||
id="second",
|
||||
),
|
||||
]
|
||||
```
|
||||
"""
|
||||
# Validate arguments
|
||||
if start_on and strategy == "first":
|
||||
@@ -1042,21 +1025,21 @@ def convert_to_openai_messages(
|
||||
messages: Message-like object or iterable of objects whose contents are
|
||||
in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats.
|
||||
text_format: How to format string or text block contents:
|
||||
- ``'string'``:
|
||||
- `'string'`:
|
||||
If a message has a string content, this is left as a string. If
|
||||
a message has content blocks that are all of type ``'text'``, these
|
||||
a message has content blocks that are all of type `'text'`, these
|
||||
are joined with a newline to make a single string. If a message has
|
||||
content blocks and at least one isn't of type ``'text'``, then
|
||||
content blocks and at least one isn't of type `'text'`, then
|
||||
all blocks are left as dicts.
|
||||
- ``'block'``:
|
||||
- `'block'`:
|
||||
If a message has a string content, this is turned into a list
|
||||
with a single content block of type ``'text'``. If a message has
|
||||
with a single content block of type `'text'`. If a message has
|
||||
content blocks these are left as is.
|
||||
include_id: Whether to include message ids in the openai messages, if they
|
||||
are present in the source messages.
|
||||
|
||||
Raises:
|
||||
ValueError: if an unrecognized ``text_format`` is specified, or if a message
|
||||
ValueError: if an unrecognized `text_format` is specified, or if a message
|
||||
content block is missing expected keys.
|
||||
|
||||
Returns:
|
||||
@@ -1070,50 +1053,49 @@ def convert_to_openai_messages(
|
||||
message dicts is returned.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
convert_to_openai_messages,
|
||||
AIMessage,
|
||||
SystemMessage,
|
||||
ToolMessage,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import (
|
||||
convert_to_openai_messages,
|
||||
AIMessage,
|
||||
SystemMessage,
|
||||
ToolMessage,
|
||||
)
|
||||
|
||||
messages = [
|
||||
SystemMessage([{"type": "text", "text": "foo"}]),
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "whats in this"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"},
|
||||
},
|
||||
],
|
||||
},
|
||||
AIMessage(
|
||||
"",
|
||||
tool_calls=[
|
||||
{
|
||||
"name": "analyze",
|
||||
"args": {"baz": "buz"},
|
||||
"id": "1",
|
||||
"type": "tool_call",
|
||||
}
|
||||
],
|
||||
),
|
||||
ToolMessage("foobar", tool_call_id="1", name="bar"),
|
||||
{"role": "assistant", "content": "thats nice"},
|
||||
]
|
||||
oai_messages = convert_to_openai_messages(messages)
|
||||
# -> [
|
||||
# {'role': 'system', 'content': 'foo'},
|
||||
# {'role': 'user', 'content': [{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]},
|
||||
# {'role': 'assistant', 'tool_calls': [{'type': 'function', 'id': '1','function': {'name': 'analyze', 'arguments': '{"baz": "buz"}'}}], 'content': ''},
|
||||
# {'role': 'tool', 'name': 'bar', 'content': 'foobar'},
|
||||
# {'role': 'assistant', 'content': 'thats nice'}
|
||||
# ]
|
||||
messages = [
|
||||
SystemMessage([{"type": "text", "text": "foo"}]),
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "whats in this"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"},
|
||||
},
|
||||
],
|
||||
},
|
||||
AIMessage(
|
||||
"",
|
||||
tool_calls=[
|
||||
{
|
||||
"name": "analyze",
|
||||
"args": {"baz": "buz"},
|
||||
"id": "1",
|
||||
"type": "tool_call",
|
||||
}
|
||||
],
|
||||
),
|
||||
ToolMessage("foobar", tool_call_id="1", name="bar"),
|
||||
{"role": "assistant", "content": "thats nice"},
|
||||
]
|
||||
oai_messages = convert_to_openai_messages(messages)
|
||||
# -> [
|
||||
# {'role': 'system', 'content': 'foo'},
|
||||
# {'role': 'user', 'content': [{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]},
|
||||
# {'role': 'assistant', 'tool_calls': [{'type': 'function', 'id': '1','function': {'name': 'analyze', 'arguments': '{"baz": "buz"}'}}], 'content': ''},
|
||||
# {'role': 'tool', 'name': 'bar', 'content': 'foobar'},
|
||||
# {'role': 'assistant', 'content': 'thats nice'}
|
||||
# ]
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.11"
|
||||
|
||||
@@ -1695,13 +1677,13 @@ def count_tokens_approximately(
|
||||
Args:
|
||||
messages: List of messages to count tokens for.
|
||||
chars_per_token: Number of characters per token to use for the approximation.
|
||||
Default is 4 (one token corresponds to ~4 chars for common English text).
|
||||
One token corresponds to ~4 chars for common English text.
|
||||
You can also specify float values for more fine-grained control.
|
||||
`See more here. <https://platform.openai.com/tokenizer>`__
|
||||
extra_tokens_per_message: Number of extra tokens to add per message.
|
||||
Default is 3 (special tokens, including beginning/end of message).
|
||||
[See more here](https://platform.openai.com/tokenizer).
|
||||
extra_tokens_per_message: Number of extra tokens to add per message, e.g.
|
||||
special tokens, including beginning/end of message.
|
||||
You can also specify float values for more fine-grained control.
|
||||
`See more here. <https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb>`__
|
||||
[See more here](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb).
|
||||
count_name: Whether to include message names in the count.
|
||||
Enabled by default.
|
||||
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
"""**OutputParser** classes parse the output of an LLM call.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Serializable, Generation, PromptValue
|
||||
""" # noqa: E501
|
||||
"""**OutputParser** classes parse the output of an LLM call."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -31,13 +31,13 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
|
||||
@abstractmethod
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -46,17 +46,17 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
result: A list of `Generation` to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
|
||||
@@ -134,29 +134,28 @@ class BaseOutputParser(
|
||||
Output parsers help structure language model responses.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
class BooleanOutputParser(BaseOutputParser[bool]):
|
||||
true_val: str = "YES"
|
||||
false_val: str = "NO"
|
||||
|
||||
class BooleanOutputParser(BaseOutputParser[bool]):
|
||||
true_val: str = "YES"
|
||||
false_val: str = "NO"
|
||||
|
||||
def parse(self, text: str) -> bool:
|
||||
cleaned_text = text.strip().upper()
|
||||
if cleaned_text not in (
|
||||
self.true_val.upper(),
|
||||
self.false_val.upper(),
|
||||
):
|
||||
raise OutputParserException(
|
||||
f"BooleanOutputParser expected output value to either be "
|
||||
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
||||
f"Received {cleaned_text}."
|
||||
)
|
||||
return cleaned_text == self.true_val.upper()
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "boolean_output_parser"
|
||||
def parse(self, text: str) -> bool:
|
||||
cleaned_text = text.strip().upper()
|
||||
if cleaned_text not in (
|
||||
self.true_val.upper(),
|
||||
self.false_val.upper(),
|
||||
):
|
||||
raise OutputParserException(
|
||||
f"BooleanOutputParser expected output value to either be "
|
||||
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
||||
f"Received {cleaned_text}."
|
||||
)
|
||||
return cleaned_text == self.true_val.upper()
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "boolean_output_parser"
|
||||
```
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -173,7 +172,7 @@ class BaseOutputParser(
|
||||
This property is inferred from the first type argument of the class.
|
||||
|
||||
Raises:
|
||||
TypeError: If the class doesn't have an inferable OutputType.
|
||||
TypeError: If the class doesn't have an inferable `OutputType`.
|
||||
"""
|
||||
for base in self.__class__.mro():
|
||||
if hasattr(base, "__pydantic_generic_metadata__"):
|
||||
@@ -235,16 +234,16 @@ class BaseOutputParser(
|
||||
|
||||
@override
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -265,20 +264,20 @@ class BaseOutputParser(
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
async def aparse(self, text: str) -> T:
|
||||
@@ -300,13 +299,13 @@ class BaseOutputParser(
|
||||
) -> Any:
|
||||
"""Parse the output of an LLM call with the input prompt for context.
|
||||
|
||||
The prompt is largely provided in the event the OutputParser wants
|
||||
The prompt is largely provided in the event the `OutputParser` wants
|
||||
to retry or fix the output in some way, and needs information from
|
||||
the prompt to do so.
|
||||
|
||||
Args:
|
||||
completion: String output of a language model.
|
||||
prompt: Input PromptValue.
|
||||
prompt: Input `PromptValue`.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
|
||||
@@ -62,7 +62,6 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -146,10 +146,10 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "output_parsers", "list"]``
|
||||
`["langchain", "output_parsers", "list"]`
|
||||
"""
|
||||
return ["langchain", "output_parsers", "list"]
|
||||
|
||||
|
||||
@@ -31,13 +31,13 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
`OutputParserException`: If the output is not valid JSON.
|
||||
"""
|
||||
generation = result[0]
|
||||
if not isinstance(generation, ChatGeneration):
|
||||
@@ -56,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
"""Parse an output as the Json object."""
|
||||
"""Parse an output as the JSON object."""
|
||||
|
||||
strict: bool = False
|
||||
"""Whether to allow non-JSON-compliant strings.
|
||||
@@ -82,13 +82,13 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
OutputParserExcept`ion: If the output is not valid JSON.
|
||||
"""
|
||||
if len(result) != 1:
|
||||
msg = f"Expected exactly one result, but got {len(result)}"
|
||||
@@ -155,7 +155,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
"""Parse an output as the element of the Json object."""
|
||||
"""Parse an output as the element of the JSON object."""
|
||||
|
||||
key_name: str
|
||||
"""The name of the key to return."""
|
||||
@@ -165,7 +165,7 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -177,48 +177,50 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
|
||||
class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
"""Parse an output as a pydantic object.
|
||||
"""Parse an output as a Pydantic object.
|
||||
|
||||
This parser is used to parse the output of a ChatModel that uses
|
||||
OpenAI function format to invoke functions.
|
||||
This parser is used to parse the output of a chat model that uses OpenAI function
|
||||
format to invoke functions.
|
||||
|
||||
The parser extracts the function call invocation and matches
|
||||
them to the pydantic schema provided.
|
||||
The parser extracts the function call invocation and matches them to the Pydantic
|
||||
schema provided.
|
||||
|
||||
An exception will be raised if the function call does not match
|
||||
the provided schema.
|
||||
An exception will be raised if the function call does not match the provided schema.
|
||||
|
||||
Example:
|
||||
... code-block:: python
|
||||
```python
|
||||
message = AIMessage(
|
||||
content="This is a test message",
|
||||
additional_kwargs={
|
||||
"function_call": {
|
||||
"name": "cookie",
|
||||
"arguments": json.dumps({"name": "value", "age": 10}),
|
||||
}
|
||||
},
|
||||
)
|
||||
chat_generation = ChatGeneration(message=message)
|
||||
|
||||
message = AIMessage(
|
||||
content="This is a test message",
|
||||
additional_kwargs={
|
||||
"function_call": {
|
||||
"name": "cookie",
|
||||
"arguments": json.dumps({"name": "value", "age": 10}),
|
||||
}
|
||||
},
|
||||
)
|
||||
chat_generation = ChatGeneration(message=message)
|
||||
|
||||
class Cookie(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
class Cookie(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
|
||||
class Dog(BaseModel):
|
||||
species: str
|
||||
|
||||
# Full output
|
||||
parser = PydanticOutputFunctionsParser(
|
||||
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
||||
)
|
||||
result = parser.parse_result([chat_generation])
|
||||
class Dog(BaseModel):
|
||||
species: str
|
||||
|
||||
|
||||
# Full output
|
||||
parser = PydanticOutputFunctionsParser(
|
||||
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
||||
)
|
||||
result = parser.parse_result([chat_generation])
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
||||
"""The pydantic schema to parse the output with.
|
||||
"""The Pydantic schema to parse the output with.
|
||||
|
||||
If multiple schemas are provided, then the function name will be used to
|
||||
determine which schema to use.
|
||||
@@ -227,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_schema(cls, values: dict) -> Any:
|
||||
"""Validate the pydantic schema.
|
||||
"""Validate the Pydantic schema.
|
||||
|
||||
Args:
|
||||
values: The values to validate.
|
||||
@@ -236,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
The validated values.
|
||||
|
||||
Raises:
|
||||
ValueError: If the schema is not a pydantic schema.
|
||||
ValueError: If the schema is not a Pydantic schema.
|
||||
"""
|
||||
schema = values["pydantic_schema"]
|
||||
if "args_only" not in values:
|
||||
@@ -259,10 +261,10 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Raises:
|
||||
ValueError: If the pydantic schema is not valid.
|
||||
ValueError: If the Pydantic schema is not valid.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -285,13 +287,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
elif issubclass(pydantic_schema, BaseModelV1):
|
||||
pydantic_args = pydantic_schema.parse_raw(args)
|
||||
else:
|
||||
msg = f"Unsupported pydantic schema: {pydantic_schema}"
|
||||
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
|
||||
raise ValueError(msg)
|
||||
return pydantic_args
|
||||
|
||||
|
||||
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
"""Parse an output as an attribute of a pydantic object."""
|
||||
"""Parse an output as an attribute of a Pydantic object."""
|
||||
|
||||
attr_name: str
|
||||
"""The name of the attribute to return."""
|
||||
@@ -302,7 +304,7 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -31,10 +31,9 @@ def parse_tool_call(
|
||||
|
||||
Args:
|
||||
raw_tool_call: The raw tool call to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool call.
|
||||
@@ -105,10 +104,9 @@ def parse_tool_calls(
|
||||
|
||||
Args:
|
||||
raw_tool_calls: The raw tool calls to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -165,7 +163,6 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -229,7 +226,6 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the generation is not a chat generation.
|
||||
@@ -313,7 +309,6 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed Pydantic objects.
|
||||
|
||||
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
|
||||
|
||||
|
||||
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
"""Parse an output using a pydantic model."""
|
||||
"""Parse an output using a Pydantic model."""
|
||||
|
||||
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
|
||||
"""The pydantic model to parse."""
|
||||
"""The Pydantic model to parse."""
|
||||
|
||||
def _parse_obj(self, obj: dict) -> TBaseModel:
|
||||
try:
|
||||
@@ -45,21 +45,20 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> TBaseModel | None:
|
||||
"""Parse the result of an LLM call to a pydantic object.
|
||||
"""Parse the result of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the result is not valid JSON
|
||||
or does not conform to the pydantic model.
|
||||
`OutputParserException`: If the result is not valid JSON
|
||||
or does not conform to the Pydantic model.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
try:
|
||||
json_object = super().parse_result(result)
|
||||
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
raise
|
||||
|
||||
def parse(self, text: str) -> TBaseModel:
|
||||
"""Parse the output of an LLM call to a pydantic object.
|
||||
"""Parse the output of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
text: The output of the LLM call.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
return super().parse(text)
|
||||
|
||||
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> type[TBaseModel]:
|
||||
"""Return the pydantic model."""
|
||||
"""Return the Pydantic model."""
|
||||
return self.pydantic_object
|
||||
|
||||
|
||||
|
||||
@@ -19,10 +19,10 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "output_parser"]``
|
||||
`["langchain", "schema", "output_parser"]`
|
||||
"""
|
||||
return ["langchain", "schema", "output_parser"]
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
Args:
|
||||
input: The input to transform.
|
||||
config: The configuration to use for the transformation.
|
||||
kwargs: Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Yields:
|
||||
The transformed output.
|
||||
@@ -85,7 +85,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
Args:
|
||||
input: The input to transform.
|
||||
config: The configuration to use for the transformation.
|
||||
kwargs: Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Yields:
|
||||
The transformed output.
|
||||
|
||||
@@ -82,7 +82,7 @@ class _StreamingParser:
|
||||
chunk: A chunk of text to parse. This can be a string or a BaseMessage.
|
||||
|
||||
Yields:
|
||||
AddableDict: A dictionary representing the parsed XML element.
|
||||
A dictionary representing the parsed XML element.
|
||||
|
||||
Raises:
|
||||
xml.etree.ElementTree.ParseError: If the XML is not well-formed.
|
||||
|
||||
@@ -12,7 +12,7 @@ When invoking models via the standard runnable methods (e.g. invoke, batch, etc.
|
||||
- LLMs will return regular text strings.
|
||||
|
||||
In addition, users can access the raw output of either LLMs or chat models via
|
||||
callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
|
||||
callbacks. The `on_chat_model_end` and `on_llm_end` callbacks will return an
|
||||
LLMResult object containing the generated outputs and any additional information
|
||||
returned by the model provider.
|
||||
|
||||
|
||||
@@ -15,10 +15,10 @@ from langchain_core.utils._merge import merge_dicts
|
||||
class ChatGeneration(Generation):
|
||||
"""A single chat generation output.
|
||||
|
||||
A subclass of ``Generation`` that represents the response from a chat model
|
||||
A subclass of `Generation` that represents the response from a chat model
|
||||
that generates chat messages.
|
||||
|
||||
The ``message`` attribute is a structured representation of the chat message.
|
||||
The `message` attribute is a structured representation of the chat message.
|
||||
Most of the time, the message will be of type `AIMessage`.
|
||||
|
||||
Users working with chat models will usually access information via either
|
||||
@@ -70,9 +70,9 @@ class ChatGeneration(Generation):
|
||||
|
||||
|
||||
class ChatGenerationChunk(ChatGeneration):
|
||||
"""``ChatGeneration`` chunk.
|
||||
"""`ChatGeneration` chunk.
|
||||
|
||||
``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks.
|
||||
`ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks.
|
||||
"""
|
||||
|
||||
message: BaseMessageChunk
|
||||
|
||||
@@ -44,10 +44,10 @@ class Generation(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "output"]``
|
||||
`["langchain", "schema", "output"]`
|
||||
"""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
@@ -56,16 +56,16 @@ class GenerationChunk(Generation):
|
||||
"""Generation chunk, which can be concatenated with other Generation chunks."""
|
||||
|
||||
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
||||
"""Concatenate two ``GenerationChunk``s.
|
||||
"""Concatenate two `GenerationChunk`s.
|
||||
|
||||
Args:
|
||||
other: Another ``GenerationChunk`` to concatenate with.
|
||||
other: Another `GenerationChunk` to concatenate with.
|
||||
|
||||
Raises:
|
||||
TypeError: If other is not a ``GenerationChunk``.
|
||||
TypeError: If other is not a `GenerationChunk`.
|
||||
|
||||
Returns:
|
||||
A new ``GenerationChunk`` concatenated from self and other.
|
||||
A new `GenerationChunk` concatenated from self and other.
|
||||
"""
|
||||
if isinstance(other, GenerationChunk):
|
||||
generation_info = merge_dicts(
|
||||
|
||||
@@ -30,8 +30,8 @@ class LLMResult(BaseModel):
|
||||
The second dimension of the list represents different candidate generations for a
|
||||
given prompt.
|
||||
|
||||
- When returned from **an LLM**, the type is ``list[list[Generation]]``.
|
||||
- When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
|
||||
- When returned from **an LLM**, the type is `list[list[Generation]]`.
|
||||
- When returned from a **chat model**, the type is `list[list[ChatGeneration]]`.
|
||||
|
||||
ChatGeneration is a subclass of Generation that has a field for a structured chat
|
||||
message.
|
||||
@@ -97,7 +97,7 @@ class LLMResult(BaseModel):
|
||||
other: Another `LLMResult` object to compare against.
|
||||
|
||||
Returns:
|
||||
True if the generations and ``llm_output`` are equal, False otherwise.
|
||||
`True` if the generations and `llm_output` are equal, `False` otherwise.
|
||||
"""
|
||||
if not isinstance(other, LLMResult):
|
||||
return NotImplemented
|
||||
|
||||
@@ -24,8 +24,8 @@ from langchain_core.messages import (
|
||||
class PromptValue(Serializable, ABC):
|
||||
"""Base abstract class for inputs to any language model.
|
||||
|
||||
PromptValues can be converted to both LLM (pure text-generation) inputs and
|
||||
ChatModel inputs.
|
||||
`PromptValues` can be converted to both LLM (pure text-generation) inputs and
|
||||
chat model inputs.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@@ -35,12 +35,12 @@ class PromptValue(Serializable, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "prompt"]``
|
||||
`["langchain", "schema", "prompt"]`
|
||||
"""
|
||||
return ["langchain", "schema", "prompt"]
|
||||
|
||||
@@ -62,12 +62,12 @@ class StringPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "base"]``
|
||||
`["langchain", "prompts", "base"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "base"]
|
||||
|
||||
@@ -99,12 +99,12 @@ class ChatPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "chat"]``
|
||||
`["langchain", "prompts", "chat"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@@ -113,11 +113,11 @@ class ImageURL(TypedDict, total=False):
|
||||
"""Image URL."""
|
||||
|
||||
detail: Literal["auto", "low", "high"]
|
||||
"""Specifies the detail level of the image. Defaults to `'auto'`.
|
||||
"""Specifies the detail level of the image.
|
||||
|
||||
Can be `'auto'`, `'low'`, or `'high'`.
|
||||
|
||||
This follows OpenAI's Chat Completion API's image URL format.
|
||||
|
||||
"""
|
||||
|
||||
url: str
|
||||
|
||||
@@ -1,28 +1,8 @@
|
||||
"""**Prompt** is the input to the model.
|
||||
|
||||
Prompt is often constructed
|
||||
from multiple components and prompt values. Prompt classes and functions make constructing
|
||||
and working with prompts easy.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BasePromptTemplate --> StringPromptTemplate --> PromptTemplate
|
||||
FewShotPromptTemplate
|
||||
FewShotPromptWithTemplates
|
||||
BaseChatPromptTemplate --> AutoGPTPrompt
|
||||
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
|
||||
|
||||
|
||||
|
||||
BaseMessagePromptTemplate --> MessagesPlaceholder
|
||||
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
|
||||
HumanMessagePromptTemplate
|
||||
AIMessagePromptTemplate
|
||||
SystemMessagePromptTemplate
|
||||
|
||||
""" # noqa: E501
|
||||
Prompt is often constructed from multiple components and prompt values. Prompt classes
|
||||
and functions make constructing and working with prompts easy.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -96,10 +96,10 @@ class BasePromptTemplate(
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "prompt_template"]``
|
||||
`["langchain", "schema", "prompt_template"]`
|
||||
"""
|
||||
return ["langchain", "schema", "prompt_template"]
|
||||
|
||||
@@ -127,10 +127,10 @@ class BasePromptTemplate(
|
||||
"""Get the input schema for the prompt.
|
||||
|
||||
Args:
|
||||
config: RunnableConfig, configuration for the prompt.
|
||||
config: configuration for the prompt.
|
||||
|
||||
Returns:
|
||||
Type[BaseModel]: The input schema for the prompt.
|
||||
The input schema for the prompt.
|
||||
"""
|
||||
# This is correct, but pydantic typings/mypy don't think so.
|
||||
required_input_variables = {
|
||||
@@ -199,7 +199,7 @@ class BasePromptTemplate(
|
||||
config: RunnableConfig, configuration for the prompt.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
if self.metadata:
|
||||
@@ -225,7 +225,7 @@ class BasePromptTemplate(
|
||||
config: RunnableConfig, configuration for the prompt.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
if self.metadata:
|
||||
@@ -245,20 +245,20 @@ class BasePromptTemplate(
|
||||
"""Create Prompt Value.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
|
||||
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
|
||||
"""Async create Prompt Value.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
return self.format_prompt(**kwargs)
|
||||
|
||||
@@ -266,10 +266,10 @@ class BasePromptTemplate(
|
||||
"""Return a partial of the prompt template.
|
||||
|
||||
Args:
|
||||
kwargs: Union[str, Callable[[], str]], partial variables to set.
|
||||
**kwargs: partial variables to set.
|
||||
|
||||
Returns:
|
||||
BasePromptTemplate: A partial of the prompt template.
|
||||
A partial of the prompt template.
|
||||
"""
|
||||
prompt_dict = self.__dict__.copy()
|
||||
prompt_dict["input_variables"] = list(
|
||||
@@ -290,34 +290,30 @@ class BasePromptTemplate(
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
prompt.format(variable1="foo")
|
||||
|
||||
```python
|
||||
prompt.format(variable1="foo")
|
||||
```
|
||||
"""
|
||||
|
||||
async def aformat(self, **kwargs: Any) -> FormatOutputType:
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
await prompt.aformat(variable1="foo")
|
||||
|
||||
```python
|
||||
await prompt.aformat(variable1="foo")
|
||||
```
|
||||
"""
|
||||
return self.format(**kwargs)
|
||||
|
||||
@@ -330,10 +326,10 @@ class BasePromptTemplate(
|
||||
"""Return dictionary representation of prompt.
|
||||
|
||||
Args:
|
||||
kwargs: Any additional arguments to pass to the dictionary.
|
||||
**kwargs: Any additional arguments to pass to the dictionary.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary representation of the prompt.
|
||||
Dictionary representation of the prompt.
|
||||
"""
|
||||
prompt_dict = super().model_dump(**kwargs)
|
||||
with contextlib.suppress(NotImplementedError):
|
||||
@@ -352,10 +348,9 @@ class BasePromptTemplate(
|
||||
NotImplementedError: If the prompt type is not implemented.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
prompt.save(file_path="path/prompt.yaml")
|
||||
|
||||
```python
|
||||
prompt.save(file_path="path/prompt.yaml")
|
||||
```
|
||||
"""
|
||||
if self.partial_variables:
|
||||
msg = "Cannot save prompt with partial variables."
|
||||
@@ -426,16 +421,16 @@ def format_document(doc: Document, prompt: BasePromptTemplate[str]) -> str:
|
||||
string of the document formatted.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
doc = Document(page_content="This is a joke", metadata={"page": "1"})
|
||||
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
|
||||
format_document(doc, prompt)
|
||||
>>> "Page 1: This is a joke"
|
||||
doc = Document(page_content="This is a joke", metadata={"page": "1"})
|
||||
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
|
||||
format_document(doc, prompt)
|
||||
>>> "Page 1: This is a joke"
|
||||
|
||||
```
|
||||
"""
|
||||
return prompt.format(**_get_document_info(doc, prompt))
|
||||
|
||||
|
||||
@@ -59,71 +59,70 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
|
||||
Direct usage:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
prompt = MessagesPlaceholder("history")
|
||||
prompt.format_messages() # raises KeyError
|
||||
|
||||
prompt = MessagesPlaceholder("history")
|
||||
prompt.format_messages() # raises KeyError
|
||||
prompt = MessagesPlaceholder("history", optional=True)
|
||||
prompt.format_messages() # returns empty list []
|
||||
|
||||
prompt = MessagesPlaceholder("history", optional=True)
|
||||
prompt.format_messages() # returns empty list []
|
||||
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# SystemMessage(content="You are an AI assistant."),
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# SystemMessage(content="You are an AI assistant."),
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
```
|
||||
|
||||
Building a prompt with chat history:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful assistant."),
|
||||
MessagesPlaceholder("history"),
|
||||
("human", "{question}"),
|
||||
]
|
||||
)
|
||||
prompt.invoke(
|
||||
{
|
||||
"history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
|
||||
"question": "now multiply that by 4",
|
||||
}
|
||||
)
|
||||
# -> ChatPromptValue(messages=[
|
||||
# SystemMessage(content="You are a helpful assistant."),
|
||||
# HumanMessage(content="what's 5 + 2"),
|
||||
# AIMessage(content="5 + 2 is 7"),
|
||||
# HumanMessage(content="now multiply that by 4"),
|
||||
# ])
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful assistant."),
|
||||
MessagesPlaceholder("history"),
|
||||
("human", "{question}"),
|
||||
]
|
||||
)
|
||||
prompt.invoke(
|
||||
{
|
||||
"history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
|
||||
"question": "now multiply that by 4",
|
||||
}
|
||||
)
|
||||
# -> ChatPromptValue(messages=[
|
||||
# SystemMessage(content="You are a helpful assistant."),
|
||||
# HumanMessage(content="what's 5 + 2"),
|
||||
# AIMessage(content="5 + 2 is 7"),
|
||||
# HumanMessage(content="now multiply that by 4"),
|
||||
# ])
|
||||
```
|
||||
|
||||
Limiting the number of messages:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
|
||||
prompt = MessagesPlaceholder("history", n_messages=1)
|
||||
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
prompt = MessagesPlaceholder("history", n_messages=1)
|
||||
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
```
|
||||
"""
|
||||
|
||||
variable_name: str
|
||||
@@ -136,7 +135,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
|
||||
n_messages: PositiveInt | None = None
|
||||
"""Maximum number of messages to include. If `None`, then will include all.
|
||||
Defaults to `None`."""
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, variable_name: str, *, optional: bool = False, **kwargs: Any
|
||||
@@ -148,7 +147,6 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
optional: If `True` format_messages can be called with no arguments and will
|
||||
return an empty list. If `False` then a named argument with name
|
||||
`variable_name` must be passed in, even if the value is an empty list.
|
||||
Defaults to `False`.]
|
||||
"""
|
||||
# mypy can't detect the init which is defined in the parent class
|
||||
# b/c these are BaseModel classes.
|
||||
@@ -196,7 +194,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -236,13 +234,13 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
|
||||
Args:
|
||||
template: a template.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
Defaults to `None`.
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
@@ -331,7 +329,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -413,9 +411,9 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
Args:
|
||||
template: a template.
|
||||
template_format: format of the template.
|
||||
Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
|
||||
Options are: 'f-string', 'mustache', 'jinja2'.
|
||||
partial_variables: A dictionary of variables that can be used too partially.
|
||||
Defaults to `None`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
@@ -638,7 +636,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -685,7 +683,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Returns:
|
||||
formatted string.
|
||||
@@ -697,7 +695,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Returns:
|
||||
formatted string.
|
||||
@@ -751,7 +749,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -781,78 +779,78 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
Examples:
|
||||
!!! warning "Behavior changed in 0.2.24"
|
||||
You can pass any Message-like formats supported by
|
||||
``ChatPromptTemplate.from_messages()`` directly to ``ChatPromptTemplate()``
|
||||
`ChatPromptTemplate.from_messages()` directly to `ChatPromptTemplate()`
|
||||
init.
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is {name}."),
|
||||
("human", "Hello, how are you doing?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is {name}."),
|
||||
("human", "Hello, how are you doing?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"name": "Bob",
|
||||
"user_input": "What is your name?",
|
||||
}
|
||||
)
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
|
||||
# HumanMessage(content='Hello, how are you doing?'),
|
||||
# AIMessage(content="I'm doing well, thanks!"),
|
||||
# HumanMessage(content='What is your name?')
|
||||
# ]
|
||||
# )
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"name": "Bob",
|
||||
"user_input": "What is your name?",
|
||||
}
|
||||
)
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
|
||||
# HumanMessage(content='Hello, how are you doing?'),
|
||||
# AIMessage(content="I'm doing well, thanks!"),
|
||||
# HumanMessage(content='What is your name?')
|
||||
# ]
|
||||
# )
|
||||
```
|
||||
|
||||
Messages Placeholder:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# In addition to Human/AI/Tool/Function messages,
|
||||
# you can initialize the template with a MessagesPlaceholder
|
||||
# either using the class directly or with the shorthand tuple syntax:
|
||||
|
||||
# In addition to Human/AI/Tool/Function messages,
|
||||
# you can initialize the template with a MessagesPlaceholder
|
||||
# either using the class directly or with the shorthand tuple syntax:
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot."),
|
||||
# Means the template will receive an optional list of messages under
|
||||
# the "conversation" key
|
||||
("placeholder", "{conversation}"),
|
||||
# Equivalently:
|
||||
# MessagesPlaceholder(variable_name="conversation", optional=True)
|
||||
]
|
||||
)
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot."),
|
||||
# Means the template will receive an optional list of messages under
|
||||
# the "conversation" key
|
||||
("placeholder", "{conversation}"),
|
||||
# Equivalently:
|
||||
# MessagesPlaceholder(variable_name="conversation", optional=True)
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"conversation": [
|
||||
("human", "Hi!"),
|
||||
("ai", "How can I assist you today?"),
|
||||
("human", "Can you make me an ice cream sundae?"),
|
||||
("ai", "No."),
|
||||
]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"conversation": [
|
||||
("human", "Hi!"),
|
||||
("ai", "How can I assist you today?"),
|
||||
("human", "Can you make me an ice cream sundae?"),
|
||||
("ai", "No."),
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot.'),
|
||||
# HumanMessage(content='Hi!'),
|
||||
# AIMessage(content='How can I assist you today?'),
|
||||
# HumanMessage(content='Can you make me an ice cream sundae?'),
|
||||
# AIMessage(content='No.'),
|
||||
# ]
|
||||
# )
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot.'),
|
||||
# HumanMessage(content='Hi!'),
|
||||
# AIMessage(content='How can I assist you today?'),
|
||||
# HumanMessage(content='Can you make me an ice cream sundae?'),
|
||||
# AIMessage(content='No.'),
|
||||
# ]
|
||||
# )
|
||||
```
|
||||
|
||||
Single-variable template:
|
||||
|
||||
@@ -861,29 +859,28 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
inject the provided argument into that variable location.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is Carl."),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is Carl."),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt_value = template.invoke("Hello, there!")
|
||||
# Equivalent to
|
||||
# prompt_value = template.invoke({"user_input": "Hello, there!"})
|
||||
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
|
||||
# HumanMessage(content='Hello, there!'),
|
||||
# ]
|
||||
# )
|
||||
prompt_value = template.invoke("Hello, there!")
|
||||
# Equivalent to
|
||||
# prompt_value = template.invoke({"user_input": "Hello, there!"})
|
||||
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
|
||||
# HumanMessage(content='Hello, there!'),
|
||||
# ]
|
||||
# )
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
messages: Annotated[list[MessageLike], SkipValidation()]
|
||||
@@ -902,12 +899,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
messages: sequence of message representations.
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template.
|
||||
input_variables: A list of the names of the variables whose values are
|
||||
required as inputs to the prompt.
|
||||
optional_variables: A list of the names of the variables for placeholder
|
||||
@@ -924,27 +921,26 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
Examples:
|
||||
Instantiation from a list of message templates:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```python
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
Instantiation from mixed message formats:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
|
||||
```python
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
```
|
||||
"""
|
||||
messages_ = [
|
||||
_convert_to_message_template(message, template_format)
|
||||
@@ -974,10 +970,10 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "chat"]``
|
||||
`["langchain", "prompts", "chat"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@@ -1104,35 +1100,34 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
Examples:
|
||||
Instantiation from a list of message templates:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```python
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
Instantiation from mixed message formats:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
|
||||
```python
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
```
|
||||
Args:
|
||||
messages: sequence of message representations.
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
a chat prompt template.
|
||||
@@ -1145,7 +1140,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Raises:
|
||||
ValueError: if messages are of unexpected types.
|
||||
@@ -1173,7 +1168,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Returns:
|
||||
list of formatted messages.
|
||||
@@ -1208,23 +1203,21 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are an AI assistant named {name}."),
|
||||
("human", "Hi I'm {user}"),
|
||||
("ai", "Hi there, {user}, I'm {name}."),
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
template2 = template.partial(user="Lucy", name="R2D2")
|
||||
|
||||
template2.format_messages(input="hello")
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are an AI assistant named {name}."),
|
||||
("human", "Hi I'm {user}"),
|
||||
("ai", "Hi there, {user}, I'm {name}."),
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
template2 = template.partial(user="Lucy", name="R2D2")
|
||||
|
||||
template2.format_messages(input="hello")
|
||||
```
|
||||
"""
|
||||
prompt_dict = self.__dict__.copy()
|
||||
prompt_dict["input_variables"] = list(
|
||||
@@ -1262,7 +1255,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Returns:
|
||||
If index is an int, returns the message at that index.
|
||||
If index is a slice, returns a new ``ChatPromptTemplate``
|
||||
If index is a slice, returns a new `ChatPromptTemplate`
|
||||
containing the messages in that slice.
|
||||
"""
|
||||
if isinstance(index, slice):
|
||||
@@ -1293,7 +1286,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -1312,7 +1305,7 @@ def _create_template_from_message_type(
|
||||
Args:
|
||||
message_type: str the type of the message template (e.g., "human", "ai", etc.)
|
||||
template: str the template string.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
a message prompt template of the appropriate type.
|
||||
@@ -1389,7 +1382,7 @@ def _convert_to_message_template(
|
||||
|
||||
Args:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
an instance of a message or a message template.
|
||||
|
||||
@@ -74,10 +74,10 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain_core", "prompts", "dict"]``
|
||||
`["langchain_core", "prompts", "dict"]`
|
||||
"""
|
||||
return ["langchain_core", "prompts", "dict"]
|
||||
|
||||
@@ -85,7 +85,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
|
||||
@@ -268,97 +268,90 @@ class FewShotChatMessagePromptTemplate(
|
||||
Prompt template with a fixed list of examples (matching the sample
|
||||
conversation above):
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import (
|
||||
FewShotChatMessagePromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
)
|
||||
|
||||
from langchain_core.prompts import (
|
||||
FewShotChatMessagePromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
)
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
]
|
||||
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
example_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "What is {input}?"),
|
||||
("ai", "{output}"),
|
||||
]
|
||||
)
|
||||
|
||||
example_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "What is {input}?"),
|
||||
("ai", "{output}"),
|
||||
]
|
||||
)
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
examples=examples,
|
||||
# This is a prompt template used to format each individual example.
|
||||
example_prompt=example_prompt,
|
||||
)
|
||||
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
examples=examples,
|
||||
# This is a prompt template used to format each individual example.
|
||||
example_prompt=example_prompt,
|
||||
)
|
||||
|
||||
final_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful AI Assistant"),
|
||||
few_shot_prompt,
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
final_prompt.format(input="What is 4+4?")
|
||||
final_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful AI Assistant"),
|
||||
few_shot_prompt,
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
final_prompt.format(input="What is 4+4?")
|
||||
```
|
||||
|
||||
Prompt template with dynamically selected examples:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import SemanticSimilarityExampleSelector
|
||||
from langchain_core.embeddings import OpenAIEmbeddings
|
||||
from langchain_core.vectorstores import Chroma
|
||||
|
||||
from langchain_core.prompts import SemanticSimilarityExampleSelector
|
||||
from langchain_core.embeddings import OpenAIEmbeddings
|
||||
from langchain_core.vectorstores import Chroma
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
{"input": "2+4", "output": "6"},
|
||||
# ...
|
||||
]
|
||||
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
{"input": "2+4", "output": "6"},
|
||||
# ...
|
||||
]
|
||||
to_vectorize = [" ".join(example.values()) for example in examples]
|
||||
embeddings = OpenAIEmbeddings()
|
||||
vectorstore = Chroma.from_texts(to_vectorize, embeddings, metadatas=examples)
|
||||
example_selector = SemanticSimilarityExampleSelector(vectorstore=vectorstore)
|
||||
|
||||
to_vectorize = [" ".join(example.values()) for example in examples]
|
||||
embeddings = OpenAIEmbeddings()
|
||||
vectorstore = Chroma.from_texts(
|
||||
to_vectorize, embeddings, metadatas=examples
|
||||
)
|
||||
example_selector = SemanticSimilarityExampleSelector(
|
||||
vectorstore=vectorstore
|
||||
)
|
||||
from langchain_core import SystemMessage
|
||||
from langchain_core.prompts import HumanMessagePromptTemplate
|
||||
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
|
||||
|
||||
from langchain_core import SystemMessage
|
||||
from langchain_core.prompts import HumanMessagePromptTemplate
|
||||
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
# Which variable(s) will be passed to the example selector.
|
||||
input_variables=["input"],
|
||||
example_selector=example_selector,
|
||||
# Define how each example will be formatted.
|
||||
# In this case, each example will become 2 messages:
|
||||
# 1 human, and 1 AI
|
||||
example_prompt=(
|
||||
HumanMessagePromptTemplate.from_template("{input}")
|
||||
+ AIMessagePromptTemplate.from_template("{output}")
|
||||
),
|
||||
)
|
||||
# Define the overall prompt.
|
||||
final_prompt = (
|
||||
SystemMessagePromptTemplate.from_template("You are a helpful AI Assistant")
|
||||
+ few_shot_prompt
|
||||
+ HumanMessagePromptTemplate.from_template("{input}")
|
||||
)
|
||||
# Show the prompt
|
||||
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
|
||||
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
# Which variable(s) will be passed to the example selector.
|
||||
input_variables=["input"],
|
||||
example_selector=example_selector,
|
||||
# Define how each example will be formatted.
|
||||
# In this case, each example will become 2 messages:
|
||||
# 1 human, and 1 AI
|
||||
example_prompt=(
|
||||
HumanMessagePromptTemplate.from_template("{input}")
|
||||
+ AIMessagePromptTemplate.from_template("{output}")
|
||||
),
|
||||
)
|
||||
# Define the overall prompt.
|
||||
final_prompt = (
|
||||
SystemMessagePromptTemplate.from_template(
|
||||
"You are a helpful AI Assistant"
|
||||
)
|
||||
+ few_shot_prompt
|
||||
+ HumanMessagePromptTemplate.from_template("{input}")
|
||||
)
|
||||
# Show the prompt
|
||||
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
|
||||
|
||||
# Use within an LLM
|
||||
from langchain_core.chat_models import ChatAnthropic
|
||||
|
||||
chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307")
|
||||
chain.invoke({"input": "What's 3+3?"})
|
||||
# Use within an LLM
|
||||
from langchain_core.chat_models import ChatAnthropic
|
||||
|
||||
chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307")
|
||||
chain.invoke({"input": "What's 3+3?"})
|
||||
```
|
||||
"""
|
||||
|
||||
input_variables: list[str] = Field(default_factory=list)
|
||||
|
||||
@@ -46,10 +46,10 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "few_shot_with_templates"]``
|
||||
`["langchain", "prompts", "few_shot_with_templates"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "few_shot_with_templates"]
|
||||
|
||||
@@ -116,17 +116,15 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
prompt.format(variable1="foo")
|
||||
|
||||
```python
|
||||
prompt.format(variable1="foo")
|
||||
```
|
||||
"""
|
||||
kwargs = self._merge_partial_and_user_variables(**kwargs)
|
||||
# Get the examples to use.
|
||||
@@ -165,7 +163,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
@@ -26,8 +26,8 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Create an image prompt template.
|
||||
|
||||
Raises:
|
||||
ValueError: If the input variables contain ``'url'``, ``'path'``, or
|
||||
``'detail'``.
|
||||
ValueError: If the input variables contain `'url'`, `'path'`, or
|
||||
`'detail'`.
|
||||
"""
|
||||
if "input_variables" not in kwargs:
|
||||
kwargs["input_variables"] = []
|
||||
@@ -49,10 +49,10 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "image"]``
|
||||
`["langchain", "prompts", "image"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "image"]
|
||||
|
||||
@@ -60,7 +60,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -71,7 +71,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -85,7 +85,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -93,14 +93,12 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
Raises:
|
||||
ValueError: If the url is not provided.
|
||||
ValueError: If the url is not a string.
|
||||
ValueError: If ``'path'`` is provided in the template or kwargs.
|
||||
ValueError: If `'path'` is provided in the template or kwargs.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
prompt.format(variable1="foo")
|
||||
|
||||
```python
|
||||
prompt.format(variable1="foo")
|
||||
```
|
||||
"""
|
||||
formatted = {}
|
||||
for k, v in self.template.items():
|
||||
@@ -134,7 +132,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
@@ -139,7 +139,7 @@ def load_prompt(path: str | Path, encoding: str | None = None) -> BasePromptTemp
|
||||
|
||||
Args:
|
||||
path: Path to the prompt file.
|
||||
encoding: Encoding of the file. Defaults to `None`.
|
||||
encoding: Encoding of the file.
|
||||
|
||||
Returns:
|
||||
A PromptTemplate object.
|
||||
|
||||
@@ -23,10 +23,10 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "chat"]``
|
||||
`["langchain", "prompts", "chat"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@@ -68,7 +68,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
|
||||
@@ -44,18 +44,16 @@ class PromptTemplate(StringPromptTemplate):
|
||||
from untrusted sources.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
# Instantiation using from_template (recommended)
|
||||
prompt = PromptTemplate.from_template("Say {foo}")
|
||||
prompt.format(foo="bar")
|
||||
|
||||
# Instantiation using initializer
|
||||
prompt = PromptTemplate(template="Say {foo}")
|
||||
# Instantiation using from_template (recommended)
|
||||
prompt = PromptTemplate.from_template("Say {foo}")
|
||||
prompt.format(foo="bar")
|
||||
|
||||
# Instantiation using initializer
|
||||
prompt = PromptTemplate(template="Say {foo}")
|
||||
```
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -68,10 +66,10 @@ class PromptTemplate(StringPromptTemplate):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "prompt"]``
|
||||
`["langchain", "prompts", "prompt"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "prompt"]
|
||||
|
||||
@@ -144,10 +142,10 @@ class PromptTemplate(StringPromptTemplate):
|
||||
Raises:
|
||||
ValueError: If the template formats are not f-string or if there are
|
||||
conflicting partial variables.
|
||||
NotImplementedError: If the other object is not a ``PromptTemplate`` or str.
|
||||
NotImplementedError: If the other object is not a `PromptTemplate` or str.
|
||||
|
||||
Returns:
|
||||
A new ``PromptTemplate`` that is the combination of the two.
|
||||
A new `PromptTemplate` that is the combination of the two.
|
||||
"""
|
||||
# Allow for easy combining
|
||||
if isinstance(other, PromptTemplate):
|
||||
@@ -191,7 +189,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -222,7 +220,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
example_separator: The separator to use in between examples. Defaults
|
||||
to two new line characters.
|
||||
prefix: String that should go before any examples. Generally includes
|
||||
examples. Default to an empty string.
|
||||
examples.
|
||||
|
||||
Returns:
|
||||
The final prompt generated.
|
||||
@@ -277,14 +275,13 @@ class PromptTemplate(StringPromptTemplate):
|
||||
Args:
|
||||
template: The template to load.
|
||||
template_format: The format of the template. Use `jinja2` for jinja2,
|
||||
`mustache` for mustache, and `f-string` for f-strings.
|
||||
Defaults to `f-string`.
|
||||
`mustache` for mustache, and `f-string` for f-strings.
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`. Defaults to `None`.
|
||||
kwargs: Any other arguments to pass to the prompt template.
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
**kwargs: Any other arguments to pass to the prompt template.
|
||||
|
||||
Returns:
|
||||
The prompt template loaded from the template.
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from string import Formatter
|
||||
from typing import Any, Literal
|
||||
|
||||
@@ -149,9 +149,7 @@ def mustache_template_vars(
|
||||
Defs = dict[str, "Defs"]
|
||||
|
||||
|
||||
def mustache_schema(
|
||||
template: str,
|
||||
) -> type[BaseModel]:
|
||||
def mustache_schema(template: str) -> type[BaseModel]:
|
||||
"""Get the variables from a mustache template.
|
||||
|
||||
Args:
|
||||
@@ -175,6 +173,11 @@ def mustache_schema(
|
||||
fields[prefix] = False
|
||||
elif type_ in {"variable", "no escape"}:
|
||||
fields[prefix + tuple(key.split("."))] = True
|
||||
|
||||
for fkey, fval in fields.items():
|
||||
fields[fkey] = fval and not any(
|
||||
is_subsequence(fkey, k) for k in fields if k != fkey
|
||||
)
|
||||
defs: Defs = {} # None means leaf node
|
||||
while fields:
|
||||
field, is_leaf = fields.popitem()
|
||||
@@ -273,10 +276,10 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "base"]``
|
||||
`["langchain", "prompts", "base"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "base"]
|
||||
|
||||
@@ -284,7 +287,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -295,7 +298,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -327,3 +330,12 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
def pretty_print(self) -> None:
|
||||
"""Print a pretty representation of the prompt."""
|
||||
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
|
||||
|
||||
|
||||
def is_subsequence(child: Sequence, parent: Sequence) -> bool:
|
||||
"""Return True if child is subsequence of parent."""
|
||||
if len(child) == 0 or len(parent) == 0:
|
||||
return False
|
||||
if len(parent) < len(child):
|
||||
return False
|
||||
return all(child[i] == parent[i] for i in range(len(child)))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user