mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 01:00:22 +00:00
Compare commits
45 Commits
wrap_tool_
...
async_impl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fed37ddf96 | ||
|
|
9f6b660f00 | ||
|
|
00c2380019 | ||
|
|
c53ffe4b82 | ||
|
|
23f5b0cedf | ||
|
|
89e3a10cbd | ||
|
|
fad0e2dbd8 | ||
|
|
6c2f0eb67a | ||
|
|
760fc3bc12 | ||
|
|
e3fc7d8aa6 | ||
|
|
2b3b209e40 | ||
|
|
78903ac285 | ||
|
|
f361acc11c | ||
|
|
ed185c0026 | ||
|
|
6dc34beb71 | ||
|
|
c2205f88e6 | ||
|
|
abdbe185c5 | ||
|
|
c1b816cb7e | ||
|
|
0559558715 | ||
|
|
75965474fc | ||
|
|
5dc014fdf4 | ||
|
|
291a9fcea1 | ||
|
|
dd994b9d7f | ||
|
|
83901b30e3 | ||
|
|
bcfa21a6e7 | ||
|
|
af1da28459 | ||
|
|
ed2ee4e8cc | ||
|
|
f293c8ffd6 | ||
|
|
714c370191 | ||
|
|
a29d4e9c3a | ||
|
|
74983f8a96 | ||
|
|
11c5b86981 | ||
|
|
383f4c0ee9 | ||
|
|
045e7ad4a1 | ||
|
|
0e80291804 | ||
|
|
c99773b652 | ||
|
|
5f9e3e33cd | ||
|
|
6fc21afbc9 | ||
|
|
50445d4a27 | ||
|
|
11a2efe49b | ||
|
|
d8a680ee57 | ||
|
|
f405a2c57d | ||
|
|
3576e690fa | ||
|
|
057ac361ef | ||
|
|
d9675a4a20 |
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(uv run:*)",
|
||||
"Bash(make:*)",
|
||||
"WebSearch",
|
||||
"WebFetch(domain:ai.pydantic.dev)",
|
||||
"WebFetch(domain:openai.github.io)",
|
||||
"Bash(uv run:*)",
|
||||
"Bash(python3:*)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"Bash(gh pr view:*)",
|
||||
"Bash(gh pr diff:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}
|
||||
23
.github/workflows/integration_tests.yml
vendored
23
.github/workflows/integration_tests.yml
vendored
@@ -23,10 +23,8 @@ permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
|
||||
POETRY_LIBS: ("libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
# Generate dynamic test matrix based on input parameters or defaults
|
||||
@@ -60,7 +58,6 @@ jobs:
|
||||
echo $matrix
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
# Run integration tests against partner libraries with live API credentials
|
||||
# Tests are run with Poetry or UV depending on the library's setup
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
name: "🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}"
|
||||
@@ -95,17 +92,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + Poetry"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + UV"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -123,15 +110,7 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: "📦 Install Dependencies (Poetry)"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: "📦 Install Dependencies (UV)"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
- name: "📦 Install Dependencies"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
|
||||
@@ -149,14 +149,14 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (`'low'`, ``'normal'``, `'high'`).
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
|
||||
@@ -149,14 +149,14 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (`'low'`, ``'normal'``, `'high'`).
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
|
||||
21
README.md
21
README.md
@@ -34,14 +34,14 @@
|
||||
LangChain is a framework for building LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves.
|
||||
|
||||
```bash
|
||||
pip install -U langchain
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/).
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/oss/python/langchain/overview).
|
||||
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building controllable agent workflows.
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our framework for building controllable agent workflows.
|
||||
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
@@ -62,16 +62,13 @@ While the LangChain framework can be used standalone, it also integrates seamles
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Conceptual Guides](https://docs.langchain.com/oss/python/langchain/overview): Explanations of key
|
||||
concepts behind the LangChain framework.
|
||||
- [Tutorials](https://docs.langchain.com/oss/python/learn): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [API Reference](https://reference.langchain.com/python/): Detailed reference on
|
||||
- [Learn](https://docs.langchain.com/oss/python/learn): Use cases, conceptual overviews, and more.
|
||||
- [API Reference](https://reference.langchain.com/python): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.
|
||||
- [LangChain Forum](https://forum.langchain.com): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com): Ask questions & chat with our documentation.
|
||||
|
||||
@@ -19,8 +19,8 @@ And you should configure credentials by setting the following environment variab
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__()
|
||||
llm.invoke("Sing a ballad of LangChain.")
|
||||
model = Chat__ModuleName__()
|
||||
model.invoke("Sing a ballad of LangChain.")
|
||||
```
|
||||
|
||||
## Embeddings
|
||||
@@ -41,6 +41,6 @@ embeddings.embed_query("What is the meaning of life?")
|
||||
```python
|
||||
from __module_name__ import __ModuleName__LLM
|
||||
|
||||
llm = __ModuleName__LLM()
|
||||
llm.invoke("The meaning of life is")
|
||||
model = __ModuleName__LLM()
|
||||
model.invoke("The meaning of life is")
|
||||
```
|
||||
|
||||
@@ -72,7 +72,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -126,7 +128,7 @@
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"llm = Chat__ModuleName__(\n",
|
||||
"model = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
@@ -162,7 +164,7 @@
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg = model.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
@@ -207,7 +209,7 @@
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
|
||||
@@ -65,7 +65,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -119,7 +121,7 @@
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"llm = __ModuleName__LLM(\n",
|
||||
"model = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
@@ -141,7 +143,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -150,7 +152,7 @@
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = llm.invoke(input_text)\n",
|
||||
"completion = model.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
@@ -177,7 +179,7 @@
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -185,7 +185,7 @@
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": null,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -203,7 +203,7 @@
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -216,7 +216,7 @@
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(llm, tools)"
|
||||
"agent = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,13 +26,13 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
@@ -57,216 +57,214 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
timeout=None,
|
||||
max_retries=2,
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
model = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
timeout=None,
|
||||
max_retries=2,
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
Invoke:
|
||||
.. code-block:: python
|
||||
```python
|
||||
messages = [
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
model.invoke(messages)
|
||||
```
|
||||
|
||||
messages = [
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
llm.invoke(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Stream:
|
||||
.. code-block:: python
|
||||
```python
|
||||
for chunk in model.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```
|
||||
|
||||
for chunk in llm.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
stream = model.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
```
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
stream = llm.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
await model.ainvoke(messages)
|
||||
|
||||
await llm.ainvoke(messages)
|
||||
# stream:
|
||||
# async for chunk in (await model.astream(messages))
|
||||
|
||||
# stream:
|
||||
# async for chunk in (await llm.astream(messages))
|
||||
|
||||
# batch:
|
||||
# await llm.abatch([messages])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
# batch:
|
||||
# await model.abatch([messages])
|
||||
```
|
||||
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
# TODO: Delete if .bind_tools() isn't supported.
|
||||
Tool calling:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
class GetWeather(BaseModel):
|
||||
'''Get the current weather in a given location'''
|
||||
|
||||
class GetWeather(BaseModel):
|
||||
'''Get the current weather in a given location'''
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
class GetPopulation(BaseModel):
|
||||
'''Get the current population in a given location'''
|
||||
|
||||
class GetPopulation(BaseModel):
|
||||
'''Get the current population in a given location'''
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
|
||||
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
See ``Chat__ModuleName__.bind_tools()`` method for more.
|
||||
See `Chat__ModuleName__.bind_tools()` method for more.
|
||||
|
||||
# TODO: Delete if .with_structured_output() isn't supported.
|
||||
Structured output:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
class Joke(BaseModel):
|
||||
'''Joke to tell user.'''
|
||||
|
||||
class Joke(BaseModel):
|
||||
'''Joke to tell user.'''
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
|
||||
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
|
||||
structured_model = model.with_structured_output(Joke)
|
||||
structured_model.invoke("Tell me a joke about cats")
|
||||
```
|
||||
|
||||
structured_llm = llm.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats")
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
|
||||
See ``Chat__ModuleName__.with_structured_output()`` for more.
|
||||
See `Chat__ModuleName__.with_structured_output()` for more.
|
||||
|
||||
# TODO: Delete if JSON mode response format isn't supported.
|
||||
JSON mode:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_model = model.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_llm = llm.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_llm.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if image inputs aren't supported.
|
||||
Image input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
import base64
|
||||
import httpx
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
import base64
|
||||
import httpx
|
||||
from langchain_core.messages import HumanMessage
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
# TODO: Replace with appropriate message content format.
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = model.invoke([message])
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
# TODO: Replace with appropriate message content format.
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = llm.invoke([message])
|
||||
ai_msg.content
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if audio inputs aren't supported.
|
||||
Audio input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Example input
|
||||
```
|
||||
|
||||
# TODO: Example input
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if video inputs aren't supported.
|
||||
Video input:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: Example input
|
||||
```
|
||||
|
||||
# TODO: Example input
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if token usage metadata isn't supported.
|
||||
Token usage:
|
||||
.. code-block:: python
|
||||
```python
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
```
|
||||
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
|
||||
```python
|
||||
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
|
||||
```
|
||||
|
||||
# TODO: Delete if logprobs aren't supported.
|
||||
Logprobs:
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_llm = llm.bind(logprobs=True)
|
||||
ai_msg = logprobs_llm.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_model = model.bind(logprobs=True)
|
||||
ai_msg = logprobs_model.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
```
|
||||
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
Response metadata
|
||||
.. code-block:: python
|
||||
```python
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
```
|
||||
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
model_name: str = Field(alias="model")
|
||||
@@ -314,11 +312,11 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Args:
|
||||
messages: the prompt composed of a list of messages.
|
||||
stop: a list of strings on which the model should stop generating.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
run_manager: A run manager with callbacks for the LLM.
|
||||
"""
|
||||
# Replace this with actual logic to generate a response from a list
|
||||
@@ -362,11 +360,11 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Args:
|
||||
messages: the prompt composed of a list of messages.
|
||||
stop: a list of strings on which the model should stop generating.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
If generation stops due to a stop token, the stop token itself
|
||||
SHOULD BE INCLUDED as part of the output. This is not enforced
|
||||
across models right now, but it's a good practice to follow since
|
||||
it makes it much easier to parse the output of the model
|
||||
downstream and understand why generation stopped.
|
||||
run_manager: A run manager with callbacks for the LLM.
|
||||
"""
|
||||
last_message = messages[-1]
|
||||
|
||||
@@ -14,55 +14,55 @@ class __ModuleName__Loader(BaseLoader):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_community.document_loaders import __ModuleName__Loader
|
||||
|
||||
from langchain_community.document_loaders import __ModuleName__Loader
|
||||
|
||||
loader = __ModuleName__Loader(
|
||||
# required params = ...
|
||||
# other params = ...
|
||||
)
|
||||
loader = __ModuleName__Loader(
|
||||
# required params = ...
|
||||
# other params = ...
|
||||
)
|
||||
```
|
||||
|
||||
Lazy load:
|
||||
.. code-block:: python
|
||||
```python
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
# async variant:
|
||||
# docs_lazy = await loader.alazy_load()
|
||||
|
||||
# async variant:
|
||||
# docs_lazy = await loader.alazy_load()
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
```
|
||||
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
```python
|
||||
TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Delete if async load is not implemented
|
||||
Async load:
|
||||
.. code-block:: python
|
||||
```python
|
||||
docs = await loader.aload()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
```
|
||||
|
||||
docs = await loader.aload()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
```python
|
||||
TODO: Example output
|
||||
|
||||
```
|
||||
"""
|
||||
|
||||
# TODO: This method must be implemented to load documents.
|
||||
|
||||
@@ -8,13 +8,13 @@ class __ModuleName__Embeddings(Embeddings):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
@@ -25,50 +25,50 @@ class __ModuleName__Embeddings(Embeddings):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__ import __ModuleName__Embeddings
|
||||
|
||||
from __module_name__ import __ModuleName__Embeddings
|
||||
|
||||
embed = __ModuleName__Embeddings(
|
||||
model="...",
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
embed = __ModuleName__Embeddings(
|
||||
model="...",
|
||||
# api_key="...",
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
embed.embed_query(input_text)
|
||||
```
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
embed.embed_query(input_text)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Embed multiple text:
|
||||
.. code-block:: python
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
embed.embed_documents(input_texts)
|
||||
```
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
embed.embed_documents(input_texts)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
await embed.aembed_query(input_text)
|
||||
|
||||
await embed.aembed_query(input_text)
|
||||
# multiple:
|
||||
# await embed.aembed_documents(input_texts)
|
||||
```
|
||||
|
||||
# multiple:
|
||||
# await embed.aembed_documents(input_texts)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output.
|
||||
```python
|
||||
# TODO: Example output.
|
||||
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, model: str):
|
||||
|
||||
@@ -14,13 +14,13 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars, etc.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args:
|
||||
@@ -31,58 +31,58 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __package_name__ import __ModuleName__Retriever
|
||||
|
||||
from __package_name__ import __ModuleName__Retriever
|
||||
|
||||
retriever = __ModuleName__Retriever(
|
||||
# ...
|
||||
)
|
||||
retriever = __ModuleName__Retriever(
|
||||
# ...
|
||||
)
|
||||
```
|
||||
|
||||
Usage:
|
||||
.. code-block:: python
|
||||
```python
|
||||
query = "..."
|
||||
|
||||
query = "..."
|
||||
retriever.invoke(query)
|
||||
```
|
||||
|
||||
retriever.invoke(query)
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
Use within a chain:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from langchain_openai import ChatOpenAI
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
\"\"\"Answer the question based only on the context provided.
|
||||
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
\"\"\"Answer the question based only on the context provided.
|
||||
Context: {context}
|
||||
|
||||
Context: {context}
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| model
|
||||
| StrOutputParser()
|
||||
)
|
||||
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| llm
|
||||
| StrOutputParser()
|
||||
)
|
||||
chain.invoke("...")
|
||||
```
|
||||
|
||||
chain.invoke("...")
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -12,13 +12,13 @@ class __ModuleName__Toolkit(BaseToolkit):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars, etc.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args:
|
||||
@@ -29,42 +29,42 @@ class __ModuleName__Toolkit(BaseToolkit):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __package_name__ import __ModuleName__Toolkit
|
||||
|
||||
from __package_name__ import __ModuleName__Toolkit
|
||||
|
||||
toolkit = __ModuleName__Toolkit(
|
||||
# ...
|
||||
)
|
||||
toolkit = __ModuleName__Toolkit(
|
||||
# ...
|
||||
)
|
||||
```
|
||||
|
||||
Tools:
|
||||
.. code-block:: python
|
||||
```python
|
||||
toolkit.get_tools()
|
||||
```
|
||||
|
||||
toolkit.get_tools()
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
Use within an agent:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
agent_executor = create_react_agent(llm, tools)
|
||||
|
||||
agent_executor = create_react_agent(llm, tools)
|
||||
example_query = "..."
|
||||
|
||||
example_query = "..."
|
||||
events = agent_executor.stream(
|
||||
{"messages": [("user", example_query)]},
|
||||
stream_mode="values",
|
||||
)
|
||||
for event in events:
|
||||
event["messages"][-1].pretty_print()
|
||||
```
|
||||
|
||||
events = agent_executor.stream(
|
||||
{"messages": [("user", example_query)]},
|
||||
stream_mode="values",
|
||||
)
|
||||
for event in events:
|
||||
event["messages"][-1].pretty_print()
|
||||
|
||||
.. code-block::
|
||||
|
||||
# TODO: Example output.
|
||||
```txt
|
||||
# TODO: Example output.
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -27,42 +27,42 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override]
|
||||
|
||||
Setup:
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Install ``__package_name__`` and set environment variable
|
||||
``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable
|
||||
`__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
Instantiation:
|
||||
.. code-block:: python
|
||||
|
||||
tool = __ModuleName__Tool(
|
||||
# TODO: init params
|
||||
)
|
||||
```python
|
||||
tool = __ModuleName__Tool(
|
||||
# TODO: init params
|
||||
)
|
||||
```
|
||||
|
||||
Invocation with args:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: invoke args
|
||||
tool.invoke({...})
|
||||
```
|
||||
|
||||
# TODO: invoke args
|
||||
tool.invoke({...})
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: output of invocation
|
||||
```python
|
||||
# TODO: output of invocation
|
||||
```
|
||||
|
||||
Invocation with ToolCall:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# TODO: invoke args
|
||||
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
|
||||
```
|
||||
|
||||
# TODO: invoke args
|
||||
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: output of invocation
|
||||
```python
|
||||
# TODO: output of invocation
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
# TODO: Set tool name and description
|
||||
|
||||
@@ -28,12 +28,12 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
|
||||
# TODO: Replace with relevant packages, env vars.
|
||||
Setup:
|
||||
Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.
|
||||
Install `__package_name__` and set environment variable `__MODULE_NAME___API_KEY`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```bash
|
||||
pip install -U __package_name__
|
||||
export __MODULE_NAME___API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — indexing params:
|
||||
@@ -51,110 +51,110 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from __module_name__.vectorstores import __ModuleName__VectorStore
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
from __module_name__.vectorstores import __ModuleName__VectorStore
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
vector_store = __ModuleName__VectorStore(
|
||||
collection_name="foo",
|
||||
embedding_function=OpenAIEmbeddings(),
|
||||
connection_args={"uri": "./foo.db"},
|
||||
# other params...
|
||||
)
|
||||
vector_store = __ModuleName__VectorStore(
|
||||
collection_name="foo",
|
||||
embedding_function=OpenAIEmbeddings(),
|
||||
connection_args={"uri": "./foo.db"},
|
||||
# other params...
|
||||
)
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant variables.
|
||||
Add Documents:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_core.documents import Document
|
||||
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
|
||||
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
|
||||
document_3 = Document(page_content="i will be deleted :(")
|
||||
|
||||
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
|
||||
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
|
||||
document_3 = Document(page_content="i will be deleted :(")
|
||||
|
||||
documents = [document_1, document_2, document_3]
|
||||
ids = ["1", "2", "3"]
|
||||
vector_store.add_documents(documents=documents, ids=ids)
|
||||
documents = [document_1, document_2, document_3]
|
||||
ids = ["1", "2", "3"]
|
||||
vector_store.add_documents(documents=documents, ids=ids)
|
||||
```
|
||||
|
||||
# TODO: Populate with relevant variables.
|
||||
Delete Documents:
|
||||
.. code-block:: python
|
||||
|
||||
vector_store.delete(ids=["3"])
|
||||
```python
|
||||
vector_store.delete(ids=["3"])
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search(query="thud",k=1)
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search(query="thud",k=1)
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search with filter:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
|
||||
for doc in results:
|
||||
print(f"* {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Search with score:
|
||||
.. code-block:: python
|
||||
```python
|
||||
results = vector_store.similarity_search_with_score(query="qux",k=1)
|
||||
for doc, score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
results = vector_store.similarity_search_with_score(query="qux",k=1)
|
||||
for doc, score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Async:
|
||||
.. code-block:: python
|
||||
```python
|
||||
# add documents
|
||||
# await vector_store.aadd_documents(documents=documents, ids=ids)
|
||||
|
||||
# add documents
|
||||
# await vector_store.aadd_documents(documents=documents, ids=ids)
|
||||
# delete documents
|
||||
# await vector_store.adelete(ids=["3"])
|
||||
|
||||
# delete documents
|
||||
# await vector_store.adelete(ids=["3"])
|
||||
# search
|
||||
# results = vector_store.asimilarity_search(query="thud",k=1)
|
||||
|
||||
# search
|
||||
# results = vector_store.asimilarity_search(query="thud",k=1)
|
||||
# search with score
|
||||
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
|
||||
for doc,score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
```
|
||||
|
||||
# search with score
|
||||
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
|
||||
for doc,score in results:
|
||||
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
```
|
||||
|
||||
# TODO: Fill out with relevant variables and example output.
|
||||
Use as Retriever:
|
||||
.. code-block:: python
|
||||
```python
|
||||
retriever = vector_store.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
|
||||
)
|
||||
retriever.invoke("thud")
|
||||
```
|
||||
|
||||
retriever = vector_store.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
|
||||
)
|
||||
retriever.invoke("thud")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: Example output
|
||||
```python
|
||||
# TODO: Example output
|
||||
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(self, embedding: Embeddings) -> None:
|
||||
|
||||
@@ -24,7 +24,7 @@ def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
|
||||
This code works
|
||||
|
||||
Args:
|
||||
pkg_name (str): The name of the partner package.
|
||||
pkg_name: The name of the partner package.
|
||||
|
||||
Returns:
|
||||
List of 2-tuples containing old and new import paths.
|
||||
|
||||
@@ -65,7 +65,7 @@ def is_subclass(class_obj: type, classes_: list[type]) -> bool:
|
||||
classes_: A list of classes to check against.
|
||||
|
||||
Returns:
|
||||
True if `class_obj` is a subclass of any class in `classes_`, False otherwise.
|
||||
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
|
||||
"""
|
||||
return any(
|
||||
issubclass(class_obj, kls)
|
||||
|
||||
@@ -6,7 +6,6 @@ This module is only relevant for LangChain developers, not for users.
|
||||
|
||||
This module and its submodules are for internal use only. Do not use them in your
|
||||
own code. We may change the API at any time with no warning.
|
||||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -51,29 +51,26 @@ def beta(
|
||||
own (annotation-emitting) `C.__init__`).
|
||||
|
||||
Args:
|
||||
message : str, optional
|
||||
message:
|
||||
Override the default beta message. The %(since)s,
|
||||
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
|
||||
and %(removal)s format specifiers will be replaced by the
|
||||
values of the respective arguments passed to this function.
|
||||
name : str, optional
|
||||
name:
|
||||
The name of the beta object.
|
||||
obj_type : str, optional
|
||||
obj_type:
|
||||
The object type being beta.
|
||||
addendum : str, optional
|
||||
addendum:
|
||||
Additional text appended directly to the final message.
|
||||
|
||||
Returns:
|
||||
A decorator which can be used to mark functions or classes as beta.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@beta
|
||||
def the_function_to_annotate():
|
||||
pass
|
||||
|
||||
```python
|
||||
@beta
|
||||
def the_function_to_annotate():
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
def beta(
|
||||
|
||||
@@ -97,47 +97,44 @@ def deprecated(
|
||||
property, and 'function' otherwise.
|
||||
|
||||
Args:
|
||||
since : str
|
||||
since:
|
||||
The release at which this API became deprecated.
|
||||
message : str, optional
|
||||
message:
|
||||
Override the default deprecation message. The %(since)s,
|
||||
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
|
||||
and %(removal)s format specifiers will be replaced by the
|
||||
values of the respective arguments passed to this function.
|
||||
name : str, optional
|
||||
name:
|
||||
The name of the deprecated object.
|
||||
alternative : str, optional
|
||||
alternative:
|
||||
An alternative API that the user may use in place of the
|
||||
deprecated API. The deprecation warning will tell the user
|
||||
about this alternative if provided.
|
||||
alternative_import: str, optional
|
||||
alternative_import:
|
||||
An alternative import that the user may use instead.
|
||||
pending : bool, optional
|
||||
pending:
|
||||
If `True`, uses a `PendingDeprecationWarning` instead of a
|
||||
DeprecationWarning. Cannot be used together with removal.
|
||||
obj_type : str, optional
|
||||
obj_type:
|
||||
The object type being deprecated.
|
||||
addendum : str, optional
|
||||
addendum:
|
||||
Additional text appended directly to the final message.
|
||||
removal : str, optional
|
||||
removal:
|
||||
The expected removal version. With the default (an empty
|
||||
string), a removal version is automatically computed from
|
||||
since. Set to other Falsy values to not schedule a removal
|
||||
date. Cannot be used together with pending.
|
||||
package: str, optional
|
||||
package:
|
||||
The package of the deprecated object.
|
||||
|
||||
Returns:
|
||||
A decorator to mark a function or class as deprecated.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@deprecated("1.4.0")
|
||||
def the_function_to_deprecate():
|
||||
pass
|
||||
|
||||
```python
|
||||
@deprecated("1.4.0")
|
||||
def the_function_to_deprecate():
|
||||
pass
|
||||
```
|
||||
"""
|
||||
_validate_deprecation_params(
|
||||
removal, alternative, alternative_import, pending=pending
|
||||
@@ -550,12 +547,10 @@ def rename_parameter(
|
||||
A decorator indicating that a parameter was renamed.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@_api.rename_parameter("3.1", "bad_name", "good_name")
|
||||
def func(good_name): ...
|
||||
|
||||
```python
|
||||
@_api.rename_parameter("3.1", "bad_name", "good_name")
|
||||
def func(good_name): ...
|
||||
```
|
||||
"""
|
||||
|
||||
def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Schema definitions for representing agent actions, observations, and return values.
|
||||
|
||||
**ATTENTION** The schema definitions are provided for backwards compatibility.
|
||||
!!! warning
|
||||
The schema definitions are provided for backwards compatibility.
|
||||
|
||||
!!! warning
|
||||
New agents should be built using the
|
||||
@@ -16,10 +17,10 @@ Agents use language models to choose a sequence of actions to take.
|
||||
A basic agent works in the following manner:
|
||||
|
||||
1. Given a prompt an agent uses an LLM to request an action to take
|
||||
(e.g., a tool to run).
|
||||
(e.g., a tool to run).
|
||||
2. The agent executes the action (e.g., runs the tool), and receives an observation.
|
||||
3. The agent returns the observation to the LLM, which can then be used to generate
|
||||
the next action.
|
||||
the next action.
|
||||
4. When the agent reaches a stopping condition, it returns a final return value.
|
||||
|
||||
The schemas for the agents themselves are defined in langchain.agents.agent.
|
||||
@@ -86,7 +87,7 @@ class AgentAction(Serializable):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "agent"]``
|
||||
`["langchain", "schema", "agent"]`
|
||||
"""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
@@ -163,7 +164,7 @@ class AgentFinish(Serializable):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "agent"]``
|
||||
`["langchain", "schema", "agent"]`
|
||||
"""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
|
||||
@@ -1,24 +1,18 @@
|
||||
"""Cache classes.
|
||||
|
||||
!!! warning
|
||||
Beta Feature!
|
||||
Beta Feature!
|
||||
|
||||
**Cache** provides an optional caching layer for LLMs.
|
||||
|
||||
Cache is useful for two reasons:
|
||||
|
||||
- It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
- It can speed up your application by reducing the number of API calls you make
|
||||
to the LLM provider.
|
||||
to the LLM provider.
|
||||
|
||||
Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
"""**Callback handlers** allow listening to events in LangChain."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -35,10 +35,10 @@ class RetrieverManagerMixin:
|
||||
"""Run when Retriever errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_retriever_end(
|
||||
@@ -52,10 +52,10 @@ class RetrieverManagerMixin:
|
||||
"""Run when Retriever ends running.
|
||||
|
||||
Args:
|
||||
documents (Sequence[Document]): The documents retrieved.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
documents: The documents retrieved.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -76,12 +76,11 @@ class LLMManagerMixin:
|
||||
For both chat models and non-chat models (legacy LLMs).
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
chunk: The new generated chunk, containing content and other information.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_end(
|
||||
@@ -95,10 +94,10 @@ class LLMManagerMixin:
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
response: The response which was generated.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_error(
|
||||
@@ -112,10 +111,10 @@ class LLMManagerMixin:
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -133,10 +132,10 @@ class ChainManagerMixin:
|
||||
"""Run when chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_error(
|
||||
@@ -150,10 +149,10 @@ class ChainManagerMixin:
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_action(
|
||||
@@ -167,10 +166,10 @@ class ChainManagerMixin:
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_finish(
|
||||
@@ -184,10 +183,10 @@ class ChainManagerMixin:
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -205,10 +204,10 @@ class ToolManagerMixin:
|
||||
"""Run when the tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_error(
|
||||
@@ -222,10 +221,10 @@ class ToolManagerMixin:
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -248,16 +247,16 @@ class CallbackManagerMixin:
|
||||
!!! warning
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
`on_chat_model_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chat_model_start(
|
||||
@@ -275,16 +274,16 @@ class CallbackManagerMixin:
|
||||
|
||||
!!! warning
|
||||
This method is called for chat models. If you're implementing a handler for
|
||||
a non-chat model, you should use ``on_llm_start`` instead.
|
||||
a non-chat model, you should use `on_llm_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
messages (list[list[BaseMessage]]): The messages.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chat model.
|
||||
messages: The messages.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
# NotImplementedError is thrown intentionally
|
||||
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||
@@ -305,13 +304,13 @@ class CallbackManagerMixin:
|
||||
"""Run when the Retriever starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized Retriever.
|
||||
query (str): The query.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized Retriever.
|
||||
query: The query.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_start(
|
||||
@@ -328,13 +327,13 @@ class CallbackManagerMixin:
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_start(
|
||||
@@ -352,14 +351,14 @@ class CallbackManagerMixin:
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
inputs (dict[str, Any] | None): The inputs.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
input_str: The input string.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
inputs: The inputs.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
|
||||
@@ -377,10 +376,10 @@ class RunManagerMixin:
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
text: The text.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_retry(
|
||||
@@ -394,10 +393,10 @@ class RunManagerMixin:
|
||||
"""Run on a retry event.
|
||||
|
||||
Args:
|
||||
retry_state (RetryCallState): The retry state.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
retry_state: The retry state.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_custom_event(
|
||||
@@ -415,7 +414,7 @@ class RunManagerMixin:
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
@@ -497,16 +496,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
!!! warning
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
`on_chat_model_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chat_model_start(
|
||||
@@ -524,16 +523,16 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
!!! warning
|
||||
This method is called for chat models. If you're implementing a handler for
|
||||
a non-chat model, you should use ``on_llm_start`` instead.
|
||||
a non-chat model, you should use `on_llm_start` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
messages (list[list[BaseMessage]]): The messages.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chat model.
|
||||
messages: The messages.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
# NotImplementedError is thrown intentionally
|
||||
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||
@@ -555,13 +554,12 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
For both chat models and non-chat models (legacy LLMs).
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
chunk: The new generated chunk, containing content and other information.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_llm_end(
|
||||
@@ -576,11 +574,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the model ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
response: The response which was generated.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_llm_error(
|
||||
@@ -599,7 +597,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
- response (LLMResult): The response which was generated before
|
||||
the error occurred.
|
||||
"""
|
||||
@@ -618,13 +616,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chain_end(
|
||||
@@ -639,11 +637,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_chain_error(
|
||||
@@ -658,11 +656,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_start(
|
||||
@@ -680,14 +678,14 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
inputs (dict[str, Any] | None): The inputs.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized tool.
|
||||
input_str: The input string.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
inputs: The inputs.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_end(
|
||||
@@ -702,11 +700,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_tool_error(
|
||||
@@ -721,11 +719,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_text(
|
||||
@@ -740,11 +738,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
text: The text.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retry(
|
||||
@@ -758,10 +756,10 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on a retry event.
|
||||
|
||||
Args:
|
||||
retry_state (RetryCallState): The retry state.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
retry_state: The retry state.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_agent_action(
|
||||
@@ -776,11 +774,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_agent_finish(
|
||||
@@ -795,11 +793,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_start(
|
||||
@@ -816,13 +814,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the retriever start.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized retriever.
|
||||
query (str): The query.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
metadata (dict[str, Any] | None): The metadata.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
metadata: The metadata.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_end(
|
||||
@@ -837,11 +835,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the retriever end.
|
||||
|
||||
Args:
|
||||
documents (Sequence[Document]): The documents retrieved.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
documents: The documents retrieved.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_retriever_error(
|
||||
@@ -856,11 +854,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on retriever error.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (list[str] | None): The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
run_id: The run ID. This is the ID of the current run.
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_custom_event(
|
||||
@@ -878,7 +876,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
@@ -943,35 +941,29 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
within merge_configs.
|
||||
|
||||
Returns:
|
||||
BaseCallbackManager: The merged callback manager of the same type
|
||||
as the current object.
|
||||
The merged callback manager of the same type as the current object.
|
||||
|
||||
Example: Merging two callback managers.
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.callbacks.manager import (
|
||||
CallbackManager,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
CallbackManager,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
|
||||
manager = CallbackManager(
|
||||
handlers=[StdOutCallbackHandler()], tags=["tag2"]
|
||||
)
|
||||
with trace_as_chain_group(
|
||||
"My Group Name", tags=["tag1"]
|
||||
) as group_manager:
|
||||
merged_manager = group_manager.merge(manager)
|
||||
print(merged_manager.handlers)
|
||||
# [
|
||||
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
|
||||
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
|
||||
# ]
|
||||
|
||||
print(merged_manager.tags)
|
||||
# ['tag2', 'tag1']
|
||||
manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"])
|
||||
with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager:
|
||||
merged_manager = group_manager.merge(manager)
|
||||
print(merged_manager.handlers)
|
||||
# [
|
||||
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
|
||||
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
|
||||
# ]
|
||||
|
||||
print(merged_manager.tags)
|
||||
# ['tag2', 'tag1']
|
||||
```
|
||||
""" # noqa: E501
|
||||
manager = self.__class__(
|
||||
parent_run_id=self.parent_run_id or other.parent_run_id,
|
||||
@@ -1008,8 +1000,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add a handler to the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to add.
|
||||
inherit (bool): Whether to inherit the handler. Default is True.
|
||||
handler: The handler to add.
|
||||
inherit: Whether to inherit the handler. Default is True.
|
||||
"""
|
||||
if handler not in self.handlers:
|
||||
self.handlers.append(handler)
|
||||
@@ -1020,7 +1012,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove a handler from the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to remove.
|
||||
handler: The handler to remove.
|
||||
"""
|
||||
if handler in self.handlers:
|
||||
self.handlers.remove(handler)
|
||||
@@ -1035,8 +1027,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Set handlers as the only handlers on the callback manager.
|
||||
|
||||
Args:
|
||||
handlers (list[BaseCallbackHandler]): The handlers to set.
|
||||
inherit (bool): Whether to inherit the handlers. Default is True.
|
||||
handlers: The handlers to set.
|
||||
inherit: Whether to inherit the handlers. Default is True.
|
||||
"""
|
||||
self.handlers = []
|
||||
self.inheritable_handlers = []
|
||||
@@ -1051,8 +1043,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Set handler as the only handler on the callback manager.
|
||||
|
||||
Args:
|
||||
handler (BaseCallbackHandler): The handler to set.
|
||||
inherit (bool): Whether to inherit the handler. Default is True.
|
||||
handler: The handler to set.
|
||||
inherit: Whether to inherit the handler. Default is True.
|
||||
"""
|
||||
self.set_handlers([handler], inherit=inherit)
|
||||
|
||||
@@ -1064,8 +1056,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add tags to the callback manager.
|
||||
|
||||
Args:
|
||||
tags (list[str]): The tags to add.
|
||||
inherit (bool): Whether to inherit the tags. Default is True.
|
||||
tags: The tags to add.
|
||||
inherit: Whether to inherit the tags. Default is True.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1078,7 +1070,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove tags from the callback manager.
|
||||
|
||||
Args:
|
||||
tags (list[str]): The tags to remove.
|
||||
tags: The tags to remove.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1094,8 +1086,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Add metadata to the callback manager.
|
||||
|
||||
Args:
|
||||
metadata (dict[str, Any]): The metadata to add.
|
||||
inherit (bool): Whether to inherit the metadata. Default is True.
|
||||
metadata: The metadata to add.
|
||||
inherit: Whether to inherit the metadata. Default is True.
|
||||
"""
|
||||
self.metadata.update(metadata)
|
||||
if inherit:
|
||||
@@ -1105,7 +1097,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Remove metadata from the callback manager.
|
||||
|
||||
Args:
|
||||
keys (list[str]): The keys to remove.
|
||||
keys: The keys to remove.
|
||||
"""
|
||||
for key in keys:
|
||||
self.metadata.pop(key, None)
|
||||
|
||||
@@ -27,27 +27,27 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Examples:
|
||||
Using as a context manager (recommended):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with FileCallbackHandler("output.txt") as handler:
|
||||
# Use handler with your chain/agent
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
```python
|
||||
with FileCallbackHandler("output.txt") as handler:
|
||||
# Use handler with your chain/agent
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
```
|
||||
|
||||
Direct instantiation (deprecated):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
handler = FileCallbackHandler("output.txt")
|
||||
# File remains open until handler is garbage collected
|
||||
try:
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
finally:
|
||||
handler.close() # Explicit cleanup recommended
|
||||
```python
|
||||
handler = FileCallbackHandler("output.txt")
|
||||
# File remains open until handler is garbage collected
|
||||
try:
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
finally:
|
||||
handler.close() # Explicit cleanup recommended
|
||||
```
|
||||
|
||||
Args:
|
||||
filename: The file path to write to.
|
||||
mode: The file open mode. Defaults to `'a'` (append).
|
||||
color: Default color for text output. Defaults to `None`.
|
||||
color: Default color for text output.
|
||||
|
||||
!!! note
|
||||
When not used as a context manager, a deprecation warning will be issued
|
||||
@@ -64,7 +64,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
filename: Path to the output file.
|
||||
mode: File open mode (e.g., `'w'`, `'a'`, `'x'`). Defaults to `'a'`.
|
||||
color: Default text color for output. Defaults to `None`.
|
||||
color: Default text color for output.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,7 +20,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Initialize callback handler.
|
||||
|
||||
Args:
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
"""
|
||||
self.color = color
|
||||
|
||||
@@ -31,9 +31,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we are entering a chain.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs to the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
if "name" in kwargs:
|
||||
name = kwargs["name"]
|
||||
@@ -48,8 +48,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we finished a chain.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
|
||||
|
||||
@@ -60,9 +60,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(action.log, color=color or self.color)
|
||||
|
||||
@@ -78,12 +78,11 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""If not the final action, print out observation.
|
||||
|
||||
Args:
|
||||
output (Any): The output to print.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
observation_prefix (str | None): The observation prefix.
|
||||
Defaults to `None`.
|
||||
llm_prefix (str | None): The LLM prefix. Defaults to `None`.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
output: The output to print.
|
||||
color: The color to use for the text.
|
||||
observation_prefix: The observation prefix.
|
||||
llm_prefix: The LLM prefix.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
output = str(output)
|
||||
if observation_prefix is not None:
|
||||
@@ -103,10 +102,10 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the agent ends.
|
||||
|
||||
Args:
|
||||
text (str): The text to print.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
end (str): The end character to use. Defaults to "".
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
text: The text to print.
|
||||
color: The color to use for the text.
|
||||
end: The end character to use. Defaults to "".
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(text, color=color or self.color, end=end)
|
||||
|
||||
@@ -117,8 +116,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
color (str | None): The color to use for the text. Defaults to `None`.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(finish.log, color=color or self.color, end="\n")
|
||||
|
||||
@@ -24,9 +24,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
prompts (list[str]): The prompts to run.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
prompts: The prompts to run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chat_model_start(
|
||||
@@ -38,9 +38,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
messages (list[list[BaseMessage]]): The messages to run.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized LLM.
|
||||
messages: The messages to run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
@override
|
||||
@@ -48,8 +48,8 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run on new LLM token. Only available when streaming is enabled.
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
token: The new token.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
sys.stdout.write(token)
|
||||
sys.stdout.flush()
|
||||
@@ -58,16 +58,16 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response from the LLM.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
response: The response from the LLM.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_start(
|
||||
@@ -76,25 +76,25 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs to the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when a chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_start(
|
||||
@@ -103,47 +103,47 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when the tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input string.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized tool.
|
||||
input_str: The input string.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run on agent action.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running.
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
output: The output of the tool.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (BaseException): The error that occurred.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
error: The error that occurred.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Run on an arbitrary text.
|
||||
|
||||
Args:
|
||||
text (str): The text to print.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
text: The text to print.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Run on the agent end.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
@@ -19,30 +19,29 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback Handler that tracks AIMessage.usage_metadata.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
callback = UsageMetadataCallbackHandler()
|
||||
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
||||
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
||||
callback.usage_metadata
|
||||
|
||||
.. code-block::
|
||||
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
callback = UsageMetadataCallbackHandler()
|
||||
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
||||
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
||||
callback.usage_metadata
|
||||
```
|
||||
```txt
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.49"
|
||||
|
||||
@@ -96,40 +95,44 @@ def get_usage_metadata_callback(
|
||||
"""Get usage metadata callback.
|
||||
|
||||
Get context manager for tracking usage metadata across chat model calls using
|
||||
``AIMessage.usage_metadata``.
|
||||
`AIMessage.usage_metadata`.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context variable. Defaults to
|
||||
``'usage_metadata_callback'``.
|
||||
name: The name of the context variable.
|
||||
|
||||
Yields:
|
||||
The usage metadata callback.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import get_usage_metadata_callback
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.callbacks import get_usage_metadata_callback
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
||||
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
||||
|
||||
with get_usage_metadata_callback() as cb:
|
||||
llm_1.invoke("Hello")
|
||||
llm_2.invoke("Hello")
|
||||
print(cb.usage_metadata)
|
||||
|
||||
.. code-block::
|
||||
|
||||
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
||||
'output_tokens': 10,
|
||||
'total_tokens': 18,
|
||||
'input_token_details': {'audio': 0, 'cache_read': 0},
|
||||
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
||||
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
||||
'output_tokens': 21,
|
||||
'total_tokens': 29,
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
with get_usage_metadata_callback() as cb:
|
||||
llm_1.invoke("Hello")
|
||||
llm_2.invoke("Hello")
|
||||
print(cb.usage_metadata)
|
||||
```
|
||||
```txt
|
||||
{
|
||||
"gpt-4o-mini-2024-07-18": {
|
||||
"input_tokens": 8,
|
||||
"output_tokens": 10,
|
||||
"total_tokens": 18,
|
||||
"input_token_details": {"audio": 0, "cache_read": 0},
|
||||
"output_token_details": {"audio": 0, "reasoning": 0},
|
||||
},
|
||||
"claude-3-5-haiku-20241022": {
|
||||
"input_tokens": 8,
|
||||
"output_tokens": 21,
|
||||
"total_tokens": 29,
|
||||
"input_token_details": {"cache_read": 0, "cache_creation": 0},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.49"
|
||||
|
||||
|
||||
@@ -1,18 +1,4 @@
|
||||
"""**Chat message history** stores a history of the message interactions in a chat.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AIMessage, HumanMessage, BaseMessage
|
||||
|
||||
""" # noqa: E501
|
||||
"""**Chat message history** stores a history of the message interactions in a chat."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -63,46 +49,45 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
Example: Shows a default implementation.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import json
|
||||
import os
|
||||
from langchain_core.messages import messages_from_dict, message_to_dict
|
||||
```python
|
||||
import json
|
||||
import os
|
||||
from langchain_core.messages import messages_from_dict, message_to_dict
|
||||
|
||||
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
storage_path: str
|
||||
session_id: str
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
storage_path: str
|
||||
session_id: str
|
||||
|
||||
@property
|
||||
def messages(self) -> list[BaseMessage]:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(self.storage_path, self.session_id),
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
messages_data = json.load(f)
|
||||
return messages_from_dict(messages_data)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
@property
|
||||
def messages(self) -> list[BaseMessage]:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(self.storage_path, self.session_id),
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
messages_data = json.load(f)
|
||||
return messages_from_dict(messages_data)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(serialized, f)
|
||||
|
||||
def clear(self) -> None:
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump([], f)
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(serialized, f)
|
||||
|
||||
def clear(self) -> None:
|
||||
file_path = os.path.join(self.storage_path, self.session_id)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump([], f)
|
||||
```
|
||||
"""
|
||||
|
||||
messages: list[BaseMessage]
|
||||
@@ -130,7 +115,7 @@ class BaseChatMessageHistory(ABC):
|
||||
"""Convenience method for adding a human message string to the store.
|
||||
|
||||
!!! note
|
||||
This is a convenience method. Code should favor the bulk ``add_messages``
|
||||
This is a convenience method. Code should favor the bulk `add_messages`
|
||||
interface instead to save on round-trips to the persistence layer.
|
||||
|
||||
This method may be deprecated in a future release.
|
||||
@@ -147,7 +132,7 @@ class BaseChatMessageHistory(ABC):
|
||||
"""Convenience method for adding an AI message string to the store.
|
||||
|
||||
!!! note
|
||||
This is a convenience method. Code should favor the bulk ``add_messages``
|
||||
This is a convenience method. Code should favor the bulk `add_messages`
|
||||
interface instead to save on round-trips to the persistence layer.
|
||||
|
||||
This method may be deprecated in a future release.
|
||||
|
||||
@@ -22,22 +22,22 @@ class LangSmithLoader(BaseLoader):
|
||||
|
||||
??? note "Lazy load"
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.document_loaders import LangSmithLoader
|
||||
|
||||
from langchain_core.document_loaders import LangSmithLoader
|
||||
loader = LangSmithLoader(dataset_id="...", limit=100)
|
||||
docs = []
|
||||
for doc in loader.lazy_load():
|
||||
docs.append(doc)
|
||||
```
|
||||
|
||||
loader = LangSmithLoader(dataset_id="...", limit=100)
|
||||
docs = []
|
||||
for doc in loader.lazy_load():
|
||||
docs.append(doc)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
```python
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.34"
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -60,15 +60,15 @@ class LangSmithLoader(BaseLoader):
|
||||
"""Create a LangSmith loader.
|
||||
|
||||
Args:
|
||||
dataset_id: The ID of the dataset to filter by. Defaults to `None`.
|
||||
dataset_name: The name of the dataset to filter by. Defaults to `None`.
|
||||
dataset_id: The ID of the dataset to filter by.
|
||||
dataset_name: The name of the dataset to filter by.
|
||||
content_key: The inputs key to set as Document page content. `'.'` characters
|
||||
are interpreted as nested keys. E.g. `content_key="first.second"` will
|
||||
result in
|
||||
`Document(page_content=format_content(example.inputs["first"]["second"]))`
|
||||
format_content: Function for converting the content extracted from the example
|
||||
inputs into a string. Defaults to JSON-encoding the contents.
|
||||
example_ids: The IDs of the examples to filter by. Defaults to `None`.
|
||||
example_ids: The IDs of the examples to filter by.
|
||||
as_of: The dataset version tag OR
|
||||
timestamp to retrieve the examples as of.
|
||||
Response examples will only be those that were present at the time
|
||||
@@ -79,7 +79,7 @@ class LangSmithLoader(BaseLoader):
|
||||
inline_s3_urls: Whether to inline S3 URLs. Defaults to `True`.
|
||||
offset: The offset to start from. Defaults to 0.
|
||||
limit: The maximum number of examples to return.
|
||||
metadata: Metadata to filter by. Defaults to `None`.
|
||||
metadata: Metadata to filter by.
|
||||
filter: A structured filter string to apply to the examples.
|
||||
client: LangSmith Client. If not provided will be initialized from below args.
|
||||
client_kwargs: Keyword args to pass to LangSmith client init. Should only be
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
**Document** module is a collection of classes that handle documents
|
||||
and their transformations.
|
||||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -57,52 +57,51 @@ class Blob(BaseMedia):
|
||||
|
||||
Example: Initialize a blob from in-memory data
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
|
||||
Example: Load from memory and specify mime-type and metadata
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
```
|
||||
|
||||
Example: Load the blob from a file
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
from langchain_core.documents import Blob
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
"""
|
||||
|
||||
data: bytes | str | None = None
|
||||
@@ -278,15 +277,13 @@ class Document(BaseMedia):
|
||||
"""Class for storing a piece of text and associated metadata.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
document = Document(
|
||||
page_content="Hello, world!", metadata={"source": "https://example.com"}
|
||||
)
|
||||
|
||||
document = Document(
|
||||
page_content="Hello, world!", metadata={"source": "https://example.com"}
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
page_content: str
|
||||
|
||||
@@ -20,35 +20,34 @@ class BaseDocumentTransformer(ABC):
|
||||
sequence of transformed Documents.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
|
||||
embeddings: Embeddings
|
||||
similarity_fn: Callable = cosine_similarity
|
||||
similarity_threshold: float = 0.95
|
||||
|
||||
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
|
||||
embeddings: Embeddings
|
||||
similarity_fn: Callable = cosine_similarity
|
||||
similarity_threshold: float = 0.95
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
stateful_documents = get_stateful_documents(documents)
|
||||
embedded_documents = _get_embeddings_from_stateful_docs(
|
||||
self.embeddings, stateful_documents
|
||||
)
|
||||
included_idxs = _filter_similar_embeddings(
|
||||
embedded_documents,
|
||||
self.similarity_fn,
|
||||
self.similarity_threshold,
|
||||
)
|
||||
return [stateful_documents[i] for i in sorted(included_idxs)]
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
stateful_documents = get_stateful_documents(documents)
|
||||
embedded_documents = _get_embeddings_from_stateful_docs(
|
||||
self.embeddings, stateful_documents
|
||||
)
|
||||
included_idxs = _filter_similar_embeddings(
|
||||
embedded_documents,
|
||||
self.similarity_fn,
|
||||
self.similarity_threshold,
|
||||
)
|
||||
return [stateful_documents[i] for i in sorted(included_idxs)]
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
```
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -21,37 +21,34 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.embeddings import FakeEmbeddings
|
||||
|
||||
from langchain_core.embeddings import FakeEmbeddings
|
||||
|
||||
embed = FakeEmbeddings(size=100)
|
||||
embed = FakeEmbeddings(size=100)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
```
|
||||
```python
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```
|
||||
|
||||
Embed multiple texts:
|
||||
.. code-block:: python
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
```
|
||||
```python
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
```
|
||||
"""
|
||||
|
||||
size: int
|
||||
@@ -78,37 +75,34 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
|
||||
embed = DeterministicFakeEmbedding(size=100)
|
||||
embed = DeterministicFakeEmbedding(size=100)
|
||||
```
|
||||
|
||||
Embed single text:
|
||||
.. code-block:: python
|
||||
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```python
|
||||
input_text = "The meaning of life is 42"
|
||||
vector = embed.embed_query(input_text)
|
||||
print(vector[:3])
|
||||
```
|
||||
```python
|
||||
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
|
||||
```
|
||||
|
||||
Embed multiple texts:
|
||||
.. code-block:: python
|
||||
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
```python
|
||||
input_texts = ["Document 1...", "Document 2..."]
|
||||
vectors = embed.embed_documents(input_texts)
|
||||
print(len(vectors))
|
||||
# The first 3 coordinates for the first vector
|
||||
print(vectors[0][:3])
|
||||
```
|
||||
```python
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
```
|
||||
"""
|
||||
|
||||
size: int
|
||||
|
||||
@@ -33,9 +33,9 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
Args:
|
||||
error: The error that's being re-raised or an error message.
|
||||
observation: String explanation of error which can be passed to a
|
||||
model to try and remediate the issue. Defaults to `None`.
|
||||
model to try and remediate the issue.
|
||||
llm_output: String model output which is error-ing.
|
||||
Defaults to `None`.
|
||||
|
||||
send_to_llm: Whether to send the observation and llm_output back to an Agent
|
||||
after an OutputParserException has been raised.
|
||||
This gives the underlying model driving the agent the context that the
|
||||
@@ -44,8 +44,8 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
ValueError: If ``send_to_llm`` is True but either observation or
|
||||
``llm_output`` are not provided.
|
||||
ValueError: If `send_to_llm` is True but either observation or
|
||||
`llm_output` are not provided.
|
||||
"""
|
||||
if isinstance(error, str):
|
||||
error = create_message(
|
||||
|
||||
@@ -61,7 +61,7 @@ class RecordManager(ABC):
|
||||
"""Initialize the record manager.
|
||||
|
||||
Args:
|
||||
namespace (str): The namespace for the record manager.
|
||||
namespace: The namespace for the record manager.
|
||||
"""
|
||||
self.namespace = namespace
|
||||
|
||||
@@ -244,7 +244,7 @@ class InMemoryRecordManager(RecordManager):
|
||||
"""Initialize the in-memory record manager.
|
||||
|
||||
Args:
|
||||
namespace (str): The namespace for the record manager.
|
||||
namespace: The namespace for the record manager.
|
||||
"""
|
||||
super().__init__(namespace)
|
||||
# Each key points to a dictionary
|
||||
@@ -278,10 +278,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -315,10 +315,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -361,13 +361,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -397,13 +397,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -529,7 +529,7 @@ class DocumentIndex(BaseRetriever):
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
UpsertResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully added or updated in the vectorstore and the list of IDs that
|
||||
failed to be added or updated.
|
||||
"""
|
||||
@@ -552,7 +552,7 @@ class DocumentIndex(BaseRetriever):
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
UpsertResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully added or updated in the vectorstore and the list of IDs that
|
||||
failed to be added or updated.
|
||||
"""
|
||||
@@ -571,12 +571,12 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of ids to delete.
|
||||
kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
**kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
For example, can include an option to delete the entire index,
|
||||
or else issue a non-blocking delete etc.
|
||||
|
||||
Returns:
|
||||
DeleteResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully deleted and the list of IDs that failed to be deleted.
|
||||
"""
|
||||
|
||||
@@ -589,11 +589,11 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of ids to delete.
|
||||
kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
**kwargs: Additional keyword arguments. This is up to the implementation.
|
||||
For example, can include an option to delete the entire index.
|
||||
|
||||
Returns:
|
||||
DeleteResponse: A response object that contains the list of IDs that were
|
||||
A response object that contains the list of IDs that were
|
||||
successfully deleted and the list of IDs that failed to be deleted.
|
||||
"""
|
||||
return await run_in_executor(
|
||||
@@ -624,10 +624,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of IDs to get.
|
||||
kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
**kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
|
||||
Returns:
|
||||
list[Document]: List of documents that were found.
|
||||
List of documents that were found.
|
||||
"""
|
||||
|
||||
async def aget(
|
||||
@@ -650,10 +650,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
Args:
|
||||
ids: List of IDs to get.
|
||||
kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
**kwargs: Additional keyword arguments. These are up to the implementation.
|
||||
|
||||
Returns:
|
||||
list[Document]: List of documents that were found.
|
||||
List of documents that were found.
|
||||
"""
|
||||
return await run_in_executor(
|
||||
None,
|
||||
|
||||
@@ -38,8 +38,6 @@ To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
|
||||
Please see the following guide for more information on how to implement a custom LLM:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_llm/
|
||||
|
||||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -35,7 +35,7 @@ def is_openai_data_block(
|
||||
different type, this function will return False.
|
||||
|
||||
Returns:
|
||||
True if the block is a valid OpenAI data block and matches the filter_
|
||||
`True` if the block is a valid OpenAI data block and matches the filter_
|
||||
(if provided).
|
||||
|
||||
"""
|
||||
@@ -92,18 +92,16 @@ def _parse_data_uri(uri: str) -> ParsedDataUri | None:
|
||||
If parsing fails, return None. If either MIME type or data is missing, return None.
|
||||
|
||||
Example:
|
||||
```python
|
||||
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
|
||||
parsed = _parse_data_uri(data_uri)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
|
||||
parsed = _parse_data_uri(data_uri)
|
||||
|
||||
assert parsed == {
|
||||
"source_type": "base64",
|
||||
"mime_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
|
||||
assert parsed == {
|
||||
"source_type": "base64",
|
||||
"mime_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
```
|
||||
"""
|
||||
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
|
||||
match = re.match(regex, uri)
|
||||
@@ -150,48 +148,48 @@ def _normalize_messages(
|
||||
|
||||
`URLContentBlock`:
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['url'],
|
||||
url: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['url'],
|
||||
url: str,
|
||||
}
|
||||
```
|
||||
|
||||
`Base64ContentBlock`:
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['base64'],
|
||||
data: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['base64'],
|
||||
data: str,
|
||||
}
|
||||
```
|
||||
|
||||
`IDContentBlock`:
|
||||
|
||||
(In practice, this was never used)
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
type: Literal['image', 'audio', 'file'],
|
||||
source_type: Literal['id'],
|
||||
id: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
type: Literal["image", "audio", "file"],
|
||||
source_type: Literal["id"],
|
||||
id: str,
|
||||
}
|
||||
```
|
||||
|
||||
`PlainTextContentBlock`:
|
||||
|
||||
.. codeblock::
|
||||
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['file'],
|
||||
source_type: Literal['text'],
|
||||
url: str,
|
||||
}
|
||||
```python
|
||||
{
|
||||
mime_type: NotRequired[str]
|
||||
type: Literal['file'],
|
||||
source_type: Literal['text'],
|
||||
url: str,
|
||||
}
|
||||
```
|
||||
|
||||
If a v1 message is passed in, it will be returned as-is, meaning it is safe to
|
||||
always pass in v1 messages to this function for assurance.
|
||||
|
||||
@@ -123,7 +123,6 @@ class BaseLanguageModel(
|
||||
* If instance of `BaseCache`, will use the provided cache.
|
||||
|
||||
Caching is not currently supported for streaming methods of models.
|
||||
|
||||
"""
|
||||
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
|
||||
"""Whether to print out response text."""
|
||||
@@ -146,7 +145,7 @@ class BaseLanguageModel(
|
||||
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
|
||||
"""If verbose is `None`, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
This allows users to pass in `None` as verbose to access the global setting.
|
||||
|
||||
Args:
|
||||
verbose: The verbosity setting to use.
|
||||
@@ -186,12 +185,12 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
@@ -200,8 +199,8 @@ class BaseLanguageModel(
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -223,12 +222,12 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
@@ -237,8 +236,8 @@ class BaseLanguageModel(
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An `LLMResult`, which contains a list of candidate Generations for each
|
||||
input prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
|
||||
|
||||
|
||||
def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
|
||||
"""Format messages for tracing in ``on_chat_model_start``.
|
||||
"""Format messages for tracing in `on_chat_model_start`.
|
||||
|
||||
- Update image content blocks to OpenAI Chat Completions format (backward
|
||||
compatibility).
|
||||
@@ -185,7 +185,7 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
|
||||
ValueError: If no generations are found in the stream.
|
||||
|
||||
Returns:
|
||||
ChatResult: Chat result.
|
||||
Chat result.
|
||||
|
||||
"""
|
||||
generation = next(stream, None)
|
||||
@@ -213,7 +213,7 @@ async def agenerate_from_stream(
|
||||
stream: Iterator of `ChatGenerationChunk`.
|
||||
|
||||
Returns:
|
||||
ChatResult: Chat result.
|
||||
Chat result.
|
||||
|
||||
"""
|
||||
chunks = [chunk async for chunk in stream]
|
||||
@@ -240,78 +240,54 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
"""Base class for chat models.
|
||||
r"""Base class for chat models.
|
||||
|
||||
Key imperative methods:
|
||||
Methods that actually call the underlying model.
|
||||
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| Method | Input | Output | Description |
|
||||
+===========================+================================================================+=====================================================================+==================================================================================================+
|
||||
| `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
|
||||
|
||||
This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
|
||||
| Method | Input | Output | Description |
|
||||
| ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
|
||||
| `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
|
||||
| `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
|
||||
| `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
|
||||
| `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
|
||||
| `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
|
||||
| `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
| `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
|
||||
Key declarative methods:
|
||||
Methods for creating another Runnable using the ChatModel.
|
||||
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Method | Description |
|
||||
+==================================+===========================================================================================================+
|
||||
| `bind_tools` | Create ChatModel that can call tools. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
Methods for creating another `Runnable` using the chat model.
|
||||
|
||||
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
|
||||
|
||||
| Method | Description |
|
||||
| ---------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| `bind_tools` | Create chat model that can call tools. |
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
|
||||
|
||||
Creating custom chat model:
|
||||
Custom chat model implementations should inherit from this class.
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| Method/Property | Description | Required/Optional |
|
||||
+==================================+====================================================================+===================+
|
||||
| -------------------------------- | ------------------------------------------------------------------ | ----------------- |
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_stream` | Use to implement streaming | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
|
||||
Follow the guide for more information on how to implement a custom Chat Model:
|
||||
Follow the guide for more information on how to implement a custom chat model:
|
||||
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
|
||||
|
||||
""" # noqa: E501
|
||||
@@ -327,9 +303,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
|
||||
- If `True`, will always bypass streaming case.
|
||||
- If `'tool_calling'`, will bypass streaming case only when the model is called
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
- If `False` (Default), will always use streaming case if available.
|
||||
|
||||
The main reason for this flag is that code might be written using `stream` and
|
||||
@@ -342,14 +318,14 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
)
|
||||
"""Version of `AIMessage` output format to store in message content.
|
||||
|
||||
`AIMessage.content_blocks` will lazily parse the contents of ``content`` into a
|
||||
`AIMessage.content_blocks` will lazily parse the contents of `content` into a
|
||||
standard format. This flag can be used to additionally store the standard format
|
||||
in message content, e.g., for serialization purposes.
|
||||
|
||||
Supported values:
|
||||
|
||||
- `'v0'`: provider-specific format in content (can lazily-parse with
|
||||
`.content_blocks`)
|
||||
`.content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `.content_blocks`)
|
||||
|
||||
Partner packages (e.g., `langchain-openai`) can also use this field to roll out
|
||||
@@ -1533,7 +1509,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
- a `TypedDict` class,
|
||||
- or a Pydantic class.
|
||||
|
||||
If ``schema`` is a Pydantic class then the model output will be a
|
||||
If `schema` is a Pydantic class then the model output will be a
|
||||
Pydantic instance of that class, and the model-generated fields will be
|
||||
validated by the Pydantic class. Otherwise the model output will be a
|
||||
dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
|
||||
@@ -1546,104 +1522,104 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
|
||||
with keys `'raw'`, `'parsed'`, and `'parsing_error'`.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are any unsupported ``kwargs``.
|
||||
ValueError: If there are any unsupported `kwargs`.
|
||||
NotImplementedError: If the model does not implement
|
||||
``with_structured_output()``.
|
||||
`with_structured_output()`.
|
||||
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
|
||||
|
||||
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
|
||||
an instance of ``schema`` (i.e., a Pydantic object).
|
||||
If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
|
||||
an instance of `schema` (i.e., a Pydantic object).
|
||||
|
||||
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||
Otherwise, if `include_raw` is False then Runnable outputs a dict.
|
||||
|
||||
If ``include_raw`` is True, then Runnable outputs a dict with keys:
|
||||
If `include_raw` is True, then Runnable outputs a dict with keys:
|
||||
|
||||
- ``'raw'``: BaseMessage
|
||||
- ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- ``'parsing_error'``: BaseException | None
|
||||
- `'raw'`: BaseMessage
|
||||
- `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
|
||||
- `'parsing_error'`: BaseException | None
|
||||
|
||||
Example: Pydantic schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(AnswerWithJustification)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
```
|
||||
|
||||
Example: Pydantic schema (include_raw=True):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
```
|
||||
|
||||
Example: Dict schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(dict_schema)
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(dict_schema)
|
||||
|
||||
structured_llm.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.2.26"
|
||||
Added support for TypedDict class.
|
||||
@@ -1693,7 +1669,7 @@ class SimpleChatModel(BaseChatModel):
|
||||
|
||||
!!! note
|
||||
This implementation is primarily here for backwards compatibility. For new
|
||||
implementations, please use ``BaseChatModel`` directly.
|
||||
implementations, please use `BaseChatModel` directly.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
|
||||
|
||||
|
||||
class FakeMessagesListChatModel(BaseChatModel):
|
||||
"""Fake ``ChatModel`` for testing purposes."""
|
||||
"""Fake `ChatModel` for testing purposes."""
|
||||
|
||||
responses: list[BaseMessage]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
@@ -228,10 +228,10 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
"""Generic fake chat model that can be used to test the chat model interface.
|
||||
|
||||
* Chat model should be usable in both sync and async tests
|
||||
* Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
|
||||
tokens.
|
||||
* Invokes `on_llm_new_token` to allow for testing of callback related code for new
|
||||
tokens.
|
||||
* Includes logic to break messages into message chunk to facilitate testing of
|
||||
streaming.
|
||||
streaming.
|
||||
|
||||
"""
|
||||
|
||||
@@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
to make the interface more generic if needed.
|
||||
|
||||
!!! note
|
||||
if you want to pass a list, you can use ``iter`` to convert it to an iterator.
|
||||
if you want to pass a list, you can use `iter` to convert it to an iterator.
|
||||
|
||||
!!! warning
|
||||
Streaming is not implemented yet. We should try to implement it in the future by
|
||||
|
||||
@@ -835,7 +835,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of string prompts.
|
||||
@@ -857,8 +857,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
|
||||
Raises:
|
||||
ValueError: If prompts is not a list.
|
||||
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
||||
``run_name`` (if provided) does not match the length of prompts.
|
||||
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
@@ -1105,7 +1105,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of string prompts.
|
||||
@@ -1126,8 +1126,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Raises:
|
||||
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
||||
``run_name`` (if provided) does not match the length of prompts.
|
||||
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
@@ -1340,11 +1340,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
ValueError: If the file path is not a string or Path object.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
|
||||
```python
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
```
|
||||
"""
|
||||
# Convert file to Path object.
|
||||
save_path = Path(file_path)
|
||||
|
||||
@@ -45,7 +45,7 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
pretty: Whether to pretty print the json. If true, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of kwargs).
|
||||
Default is False.
|
||||
kwargs: Additional arguments to pass to json.dumps
|
||||
**kwargs: Additional arguments to pass to json.dumps
|
||||
|
||||
Returns:
|
||||
A json string representation of the object.
|
||||
|
||||
@@ -63,14 +63,14 @@ class Reviver:
|
||||
Args:
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
"""
|
||||
@@ -107,7 +107,7 @@ class Reviver:
|
||||
ValueError: If trying to deserialize something that cannot
|
||||
be deserialized in the current version of langchain-core.
|
||||
NotImplementedError: If the object is not implemented and
|
||||
``ignore_unserializable_fields`` is False.
|
||||
`ignore_unserializable_fields` is False.
|
||||
"""
|
||||
if (
|
||||
value.get("lc") == 1
|
||||
@@ -200,14 +200,14 @@ def loads(
|
||||
text: The string to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
@@ -245,14 +245,14 @@ def load(
|
||||
obj: The object to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
|
||||
@@ -25,16 +25,16 @@ class BaseSerialized(TypedDict):
|
||||
id: list[str]
|
||||
"""The unique identifier of the object."""
|
||||
name: NotRequired[str]
|
||||
"""The name of the object. Optional."""
|
||||
"""The name of the object."""
|
||||
graph: NotRequired[dict[str, Any]]
|
||||
"""The graph of the object. Optional."""
|
||||
"""The graph of the object."""
|
||||
|
||||
|
||||
class SerializedConstructor(BaseSerialized):
|
||||
"""Serialized constructor."""
|
||||
|
||||
type: Literal["constructor"]
|
||||
"""The type of the object. Must be ``'constructor'``."""
|
||||
"""The type of the object. Must be `'constructor'`."""
|
||||
kwargs: dict[str, Any]
|
||||
"""The constructor arguments."""
|
||||
|
||||
@@ -43,16 +43,16 @@ class SerializedSecret(BaseSerialized):
|
||||
"""Serialized secret."""
|
||||
|
||||
type: Literal["secret"]
|
||||
"""The type of the object. Must be ``'secret'``."""
|
||||
"""The type of the object. Must be `'secret'`."""
|
||||
|
||||
|
||||
class SerializedNotImplemented(BaseSerialized):
|
||||
"""Serialized not implemented."""
|
||||
|
||||
type: Literal["not_implemented"]
|
||||
"""The type of the object. Must be ``'not_implemented'``."""
|
||||
"""The type of the object. Must be `'not_implemented'`."""
|
||||
repr: str | None
|
||||
"""The representation of the object. Optional."""
|
||||
"""The representation of the object."""
|
||||
|
||||
|
||||
def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
@@ -61,7 +61,7 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
Args:
|
||||
value: The value.
|
||||
key: The key.
|
||||
model: The pydantic model.
|
||||
model: The Pydantic model.
|
||||
|
||||
Returns:
|
||||
Whether the value is different from the default.
|
||||
@@ -93,18 +93,18 @@ class Serializable(BaseModel, ABC):
|
||||
It relies on the following methods and properties:
|
||||
|
||||
- `is_lc_serializable`: Is this class serializable?
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
- ``get_lc_namespace``: Get the namespace of the langchain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
|
||||
During deserialization an additional mapping is handle
|
||||
classes that have moved or been renamed across package versions.
|
||||
- ``lc_secrets``: A map of constructor argument names to secret ids.
|
||||
- ``lc_attributes``: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the langchain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the `Reviver` class in `langchain_core.load.load` for more details.
|
||||
During deserialization an additional mapping is handle classes that have moved
|
||||
or been renamed across package versions.
|
||||
- `lc_secrets`: A map of constructor argument names to secret ids.
|
||||
- `lc_attributes`: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
"""
|
||||
|
||||
# Remove default BaseModel init docstring.
|
||||
@@ -116,12 +116,12 @@ class Serializable(BaseModel, ABC):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""Is this class serializable?
|
||||
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
|
||||
Returns:
|
||||
Whether the class is serializable. Default is False.
|
||||
Whether the class is serializable. Default is `False`.
|
||||
"""
|
||||
return False
|
||||
|
||||
@@ -133,7 +133,7 @@ class Serializable(BaseModel, ABC):
|
||||
namespace is ["langchain", "llms", "openai"]
|
||||
|
||||
Returns:
|
||||
The namespace as a list of strings.
|
||||
The namespace.
|
||||
"""
|
||||
return cls.__module__.split(".")
|
||||
|
||||
@@ -141,8 +141,7 @@ class Serializable(BaseModel, ABC):
|
||||
def lc_secrets(self) -> dict[str, str]:
|
||||
"""A map of constructor argument names to secret ids.
|
||||
|
||||
For example,
|
||||
{"openai_api_key": "OPENAI_API_KEY"}
|
||||
For example, `{"openai_api_key": "OPENAI_API_KEY"}`
|
||||
"""
|
||||
return {}
|
||||
|
||||
@@ -151,6 +150,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""List of attribute names that should be included in the serialized kwargs.
|
||||
|
||||
These attributes must be accepted by the constructor.
|
||||
|
||||
Default is an empty dictionary.
|
||||
"""
|
||||
return {}
|
||||
@@ -194,7 +194,7 @@ class Serializable(BaseModel, ABC):
|
||||
ValueError: If the class has deprecated attributes.
|
||||
|
||||
Returns:
|
||||
A json serializable object or a SerializedNotImplemented object.
|
||||
A json serializable object or a `SerializedNotImplemented` object.
|
||||
"""
|
||||
if not self.is_lc_serializable():
|
||||
return self.to_json_not_implemented()
|
||||
@@ -269,7 +269,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented.
|
||||
`SerializedNotImplemented`.
|
||||
"""
|
||||
return to_json_not_implemented(self)
|
||||
|
||||
@@ -284,8 +284,8 @@ def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool:
|
||||
|
||||
Returns:
|
||||
Whether the field is useful. If the field is required, it is useful.
|
||||
If the field is not required, it is useful if the value is not None.
|
||||
If the field is not required and the value is None, it is useful if the
|
||||
If the field is not required, it is useful if the value is not `None`.
|
||||
If the field is not required and the value is `None`, it is useful if the
|
||||
default value is different from the value.
|
||||
"""
|
||||
field = type(inst).model_fields.get(key)
|
||||
@@ -344,10 +344,10 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Args:
|
||||
obj: object to serialize.
|
||||
obj: Object to serialize.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented
|
||||
`SerializedNotImplemented`
|
||||
"""
|
||||
id_: list[str] = []
|
||||
try:
|
||||
|
||||
@@ -36,28 +36,25 @@ class BaseMemory(Serializable, ABC):
|
||||
the latest input.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
class SimpleMemory(BaseMemory):
|
||||
memories: dict[str, Any] = dict()
|
||||
|
||||
class SimpleMemory(BaseMemory):
|
||||
memories: dict[str, Any] = dict()
|
||||
@property
|
||||
def memory_variables(self) -> list[str]:
|
||||
return list(self.memories.keys())
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> list[str]:
|
||||
return list(self.memories.keys())
|
||||
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
|
||||
return self.memories
|
||||
|
||||
def load_memory_variables(
|
||||
self, inputs: dict[str, Any]
|
||||
) -> dict[str, str]:
|
||||
return self.memories
|
||||
|
||||
def save_context(
|
||||
self, inputs: dict[str, Any], outputs: dict[str, str]
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def clear(self) -> None:
|
||||
pass
|
||||
def save_context(
|
||||
self, inputs: dict[str, Any], outputs: dict[str, str]
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def clear(self) -> None:
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
"""**Messages** are objects used in prompts and chat conversations.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
|
||||
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
ChatPromptTemplate
|
||||
|
||||
""" # noqa: E501
|
||||
"""**Messages** are objects used in prompts and chat conversations."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -40,13 +40,13 @@ class InputTokenDetails(TypedDict, total=False):
|
||||
Does *not* need to sum to full input token count. Does *not* need to have all keys.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
}
|
||||
```python
|
||||
{
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.9"
|
||||
|
||||
@@ -76,12 +76,12 @@ class OutputTokenDetails(TypedDict, total=False):
|
||||
Does *not* need to sum to full output token count. Does *not* need to have all keys.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
```python
|
||||
{
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.9"
|
||||
|
||||
@@ -104,22 +104,22 @@ class UsageMetadata(TypedDict):
|
||||
This is a standard representation of token usage that is consistent across models.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
}
|
||||
```python
|
||||
{
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.3.9"
|
||||
Added `input_token_details` and `output_token_details`.
|
||||
@@ -193,12 +193,12 @@ class AIMessage(BaseMessage):
|
||||
) -> None:
|
||||
"""Initialize `AIMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The content of the message.
|
||||
content_blocks: Typed standard content.
|
||||
kwargs: Additional arguments to pass to the parent class.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
if content_blocks is not None:
|
||||
# If there are tool calls in content_blocks, but not in tool_calls, add them
|
||||
@@ -335,7 +335,7 @@ class AIMessage(BaseMessage):
|
||||
|
||||
Args:
|
||||
html: Whether to return an HTML-formatted string.
|
||||
Defaults to `False`.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -380,7 +380,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for deserialization).
|
||||
|
||||
Defaults to ``AIMessageChunk``.
|
||||
Defaults to `AIMessageChunk`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -390,8 +390,8 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
chunk_position: Literal["last"] | None = None
|
||||
"""Optional span represented by an aggregated AIMessageChunk.
|
||||
|
||||
If a chunk with ``chunk_position="last"`` is aggregated into a stream,
|
||||
``tool_call_chunks`` in message content will be parsed into `tool_calls`.
|
||||
If a chunk with `chunk_position="last"` is aggregated into a stream,
|
||||
`tool_call_chunks` in message content will be parsed into `tool_calls`.
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -596,14 +596,14 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
def add_ai_message_chunks(
|
||||
left: AIMessageChunk, *others: AIMessageChunk
|
||||
) -> AIMessageChunk:
|
||||
"""Add multiple ``AIMessageChunk``s together.
|
||||
"""Add multiple `AIMessageChunk`s together.
|
||||
|
||||
Args:
|
||||
left: The first ``AIMessageChunk``.
|
||||
*others: Other ``AIMessageChunk``s to add.
|
||||
left: The first `AIMessageChunk`.
|
||||
*others: Other `AIMessageChunk`s to add.
|
||||
|
||||
Returns:
|
||||
The resulting ``AIMessageChunk``.
|
||||
The resulting `AIMessageChunk`.
|
||||
|
||||
"""
|
||||
content = merge_content(left.content, *(o.content for o in others))
|
||||
@@ -681,43 +681,42 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
|
||||
"""Recursively add two UsageMetadata objects.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages.ai import add_usage
|
||||
|
||||
from langchain_core.messages.ai import add_usage
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=0,
|
||||
total_tokens=5,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=0,
|
||||
output_tokens=10,
|
||||
total_tokens=10,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=0,
|
||||
total_tokens=5,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=0,
|
||||
output_tokens=10,
|
||||
total_tokens=10,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
add_usage(left, right)
|
||||
add_usage(left, right)
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
```python
|
||||
UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=3),
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
```
|
||||
Args:
|
||||
left: The first ``UsageMetadata`` object.
|
||||
right: The second ``UsageMetadata`` object.
|
||||
left: The first `UsageMetadata` object.
|
||||
right: The second `UsageMetadata` object.
|
||||
|
||||
Returns:
|
||||
The sum of the two ``UsageMetadata`` objects.
|
||||
The sum of the two `UsageMetadata` objects.
|
||||
|
||||
"""
|
||||
if not (left or right):
|
||||
@@ -740,48 +739,47 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
|
||||
def subtract_usage(
|
||||
left: UsageMetadata | None, right: UsageMetadata | None
|
||||
) -> UsageMetadata:
|
||||
"""Recursively subtract two ``UsageMetadata`` objects.
|
||||
"""Recursively subtract two `UsageMetadata` objects.
|
||||
|
||||
Token counts cannot be negative so the actual operation is ``max(left - right, 0)``.
|
||||
Token counts cannot be negative so the actual operation is `max(left - right, 0)`.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages.ai import subtract_usage
|
||||
|
||||
from langchain_core.messages.ai import subtract_usage
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=3,
|
||||
output_tokens=8,
|
||||
total_tokens=11,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
left = UsageMetadata(
|
||||
input_tokens=5,
|
||||
output_tokens=10,
|
||||
total_tokens=15,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
)
|
||||
right = UsageMetadata(
|
||||
input_tokens=3,
|
||||
output_tokens=8,
|
||||
total_tokens=11,
|
||||
output_token_details=OutputTokenDetails(reasoning=4),
|
||||
)
|
||||
|
||||
subtract_usage(left, right)
|
||||
subtract_usage(left, right)
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
UsageMetadata(
|
||||
input_tokens=2,
|
||||
output_tokens=2,
|
||||
total_tokens=4,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
output_token_details=OutputTokenDetails(reasoning=0),
|
||||
)
|
||||
|
||||
```python
|
||||
UsageMetadata(
|
||||
input_tokens=2,
|
||||
output_tokens=2,
|
||||
total_tokens=4,
|
||||
input_token_details=InputTokenDetails(cache_read=4),
|
||||
output_token_details=OutputTokenDetails(reasoning=0),
|
||||
)
|
||||
```
|
||||
Args:
|
||||
left: The first ``UsageMetadata`` object.
|
||||
right: The second ``UsageMetadata`` object.
|
||||
left: The first `UsageMetadata` object.
|
||||
right: The second `UsageMetadata` object.
|
||||
|
||||
Returns:
|
||||
The resulting ``UsageMetadata`` after subtraction.
|
||||
The resulting `UsageMetadata` after subtraction.
|
||||
|
||||
"""
|
||||
if not (left or right):
|
||||
|
||||
@@ -48,13 +48,13 @@ class TextAccessor(str):
|
||||
|
||||
Exists to maintain backward compatibility while transitioning from method-based to
|
||||
property-based text access in message objects. In LangChain <v1.0, message text was
|
||||
accessed via ``.text()`` method calls. In v1.0=<, the preferred pattern is property
|
||||
access via ``.text``.
|
||||
accessed via `.text()` method calls. In v1.0=<, the preferred pattern is property
|
||||
access via `.text`.
|
||||
|
||||
Rather than breaking existing code immediately, ``TextAccessor`` allows both
|
||||
Rather than breaking existing code immediately, `TextAccessor` allows both
|
||||
patterns:
|
||||
- Modern property access: ``message.text`` (returns string directly)
|
||||
- Legacy method access: ``message.text()`` (callable, emits deprecation warning)
|
||||
- Modern property access: `message.text` (returns string directly)
|
||||
- Legacy method access: `message.text()` (callable, emits deprecation warning)
|
||||
|
||||
"""
|
||||
|
||||
@@ -67,12 +67,12 @@ class TextAccessor(str):
|
||||
def __call__(self) -> str:
|
||||
"""Enable method-style text access for backward compatibility.
|
||||
|
||||
This method exists solely to support legacy code that calls ``.text()``
|
||||
as a method. New code should use property access (``.text``) instead.
|
||||
This method exists solely to support legacy code that calls `.text()`
|
||||
as a method. New code should use property access (`.text`) instead.
|
||||
|
||||
!!! deprecated
|
||||
As of `langchain-core` 1.0.0, calling ``.text()`` as a method is deprecated.
|
||||
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
|
||||
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
The string content, identical to property access.
|
||||
@@ -92,7 +92,7 @@ class TextAccessor(str):
|
||||
class BaseMessage(Serializable):
|
||||
"""Base abstract message class.
|
||||
|
||||
Messages are the inputs and outputs of a ``ChatModel``.
|
||||
Messages are the inputs and outputs of a `ChatModel`.
|
||||
"""
|
||||
|
||||
content: str | list[str | dict]
|
||||
@@ -161,12 +161,12 @@ class BaseMessage(Serializable):
|
||||
) -> None:
|
||||
"""Initialize `BaseMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
kwargs: Additional arguments to pass to the parent class.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
if content_blocks is not None:
|
||||
super().__init__(content=content_blocks, **kwargs)
|
||||
@@ -187,7 +187,7 @@ class BaseMessage(Serializable):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "messages"]``
|
||||
`["langchain", "schema", "messages"]`
|
||||
"""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
@@ -259,11 +259,11 @@ class BaseMessage(Serializable):
|
||||
def text(self) -> TextAccessor:
|
||||
"""Get the text content of the message as a string.
|
||||
|
||||
Can be used as both property (``message.text``) and method (``message.text()``).
|
||||
Can be used as both property (`message.text`) and method (`message.text()`).
|
||||
|
||||
!!! deprecated
|
||||
As of langchain-core 1.0.0, calling ``.text()`` as a method is deprecated.
|
||||
Use ``.text`` as a property instead. This method will be removed in 2.0.0.
|
||||
As of langchain-core 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
The text content of the message.
|
||||
@@ -331,8 +331,8 @@ def merge_content(
|
||||
"""Merge multiple message contents.
|
||||
|
||||
Args:
|
||||
first_content: The first ``content``. Can be a string or a list.
|
||||
contents: The other ``content``s. Can be a string or a list.
|
||||
first_content: The first `content`. Can be a string or a list.
|
||||
contents: The other `content`s. Can be a string or a list.
|
||||
|
||||
Returns:
|
||||
The merged content.
|
||||
@@ -388,9 +388,9 @@ class BaseMessageChunk(BaseMessage):
|
||||
|
||||
For example,
|
||||
|
||||
``AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")``
|
||||
`AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
|
||||
|
||||
will give ``AIMessageChunk(content="Hello World")``
|
||||
will give `AIMessageChunk(content="Hello World")`
|
||||
|
||||
"""
|
||||
if isinstance(other, BaseMessageChunk):
|
||||
@@ -440,7 +440,7 @@ def message_to_dict(message: BaseMessage) -> dict:
|
||||
|
||||
Returns:
|
||||
Message as a dict. The dict will have a `type` key with the message type
|
||||
and a ``data`` key with the message data as a dict.
|
||||
and a `data` key with the message data as a dict.
|
||||
|
||||
"""
|
||||
return {"type": message.type, "data": message.model_dump()}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Derivations of standard content blocks from provider content.
|
||||
|
||||
`AIMessage` will first attempt to use a provider-specific translator if
|
||||
``model_provider`` is set in `response_metadata` on the message. Consequently, each
|
||||
`model_provider` is set in `response_metadata` on the message. Consequently, each
|
||||
provider translator must handle all possible content response types from the provider,
|
||||
including text.
|
||||
|
||||
@@ -23,13 +23,13 @@ if TYPE_CHECKING:
|
||||
PROVIDER_TRANSLATORS: dict[str, dict[str, Callable[..., list[types.ContentBlock]]]] = {}
|
||||
"""Map model provider names to translator functions.
|
||||
|
||||
The dictionary maps provider names (e.g. ``'openai'``, ``'anthropic'``) to another
|
||||
The dictionary maps provider names (e.g. `'openai'`, `'anthropic'`) to another
|
||||
dictionary with two keys:
|
||||
- ``'translate_content'``: Function to translate `AIMessage` content.
|
||||
- ``'translate_content_chunk'``: Function to translate ``AIMessageChunk`` content.
|
||||
- `'translate_content'`: Function to translate `AIMessage` content.
|
||||
- `'translate_content_chunk'`: Function to translate `AIMessageChunk` content.
|
||||
|
||||
When calling `.content_blocks` on an `AIMessage` or ``AIMessageChunk``, if
|
||||
``model_provider`` is set in `response_metadata`, the corresponding translator
|
||||
When calling `.content_blocks` on an `AIMessage` or `AIMessageChunk`, if
|
||||
`model_provider` is set in `response_metadata`, the corresponding translator
|
||||
functions will be used to parse the content into blocks. Otherwise, best-effort parsing
|
||||
in `BaseMessage` will be used.
|
||||
"""
|
||||
@@ -43,9 +43,9 @@ def register_translator(
|
||||
"""Register content translators for a provider in `PROVIDER_TRANSLATORS`.
|
||||
|
||||
Args:
|
||||
provider: The model provider name (e.g. ``'openai'``, ``'anthropic'``).
|
||||
provider: The model provider name (e.g. `'openai'`, `'anthropic'`).
|
||||
translate_content: Function to translate `AIMessage` content.
|
||||
translate_content_chunk: Function to translate ``AIMessageChunk`` content.
|
||||
translate_content_chunk: Function to translate `AIMessageChunk` content.
|
||||
"""
|
||||
PROVIDER_TRANSLATORS[provider] = {
|
||||
"translate_content": translate_content,
|
||||
@@ -62,7 +62,7 @@ def get_translator(
|
||||
provider: The model provider name.
|
||||
|
||||
Returns:
|
||||
Dictionary with ``'translate_content'`` and ``'translate_content_chunk'``
|
||||
Dictionary with `'translate_content'` and `'translate_content_chunk'`
|
||||
functions, or None if no translator is registered for the provider. In such
|
||||
case, best-effort parsing in `BaseMessage` will be used.
|
||||
"""
|
||||
@@ -72,10 +72,10 @@ def get_translator(
|
||||
def _register_translators() -> None:
|
||||
"""Register all translators in langchain-core.
|
||||
|
||||
A unit test ensures all modules in ``block_translators`` are represented here.
|
||||
A unit test ensures all modules in `block_translators` are represented here.
|
||||
|
||||
For translators implemented outside langchain-core, they can be registered by
|
||||
calling ``register_translator`` from within the integration package.
|
||||
calling `register_translator` from within the integration package.
|
||||
"""
|
||||
from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415
|
||||
_register_anthropic_translator,
|
||||
|
||||
@@ -32,11 +32,11 @@ def _convert_to_v1_from_anthropic_input(
|
||||
"""Convert Anthropic format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Anthropic format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -36,11 +36,11 @@ def _convert_to_v1_from_converse_input(
|
||||
"""Convert Bedrock Converse format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Converse format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -106,11 +106,11 @@ def _convert_to_v1_from_genai_input(
|
||||
`response_metadata`.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be GenAI format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -11,11 +11,11 @@ def _convert_v0_multimodal_input_to_v1(
|
||||
"""Convert v0 multimodal blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any v0 format
|
||||
blocks to v1 format.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
|
||||
@@ -18,7 +18,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
|
||||
"""Convert ``ImageContentBlock`` to format expected by OpenAI Chat Completions."""
|
||||
"""Convert `ImageContentBlock` to format expected by OpenAI Chat Completions."""
|
||||
if "url" in block:
|
||||
return {
|
||||
"type": "image_url",
|
||||
@@ -156,11 +156,11 @@ def _convert_to_v1_from_chat_completions_input(
|
||||
"""Convert OpenAI Chat Completions format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a ``'non_standard'`` block with the original block stored in the ``value``
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be OpenAI format to v1 ContentBlocks.
|
||||
|
||||
If conversion fails, the block is left as a ``'non_standard'`` block.
|
||||
If conversion fails, the block is left as a `'non_standard'` block.
|
||||
|
||||
Args:
|
||||
content: List of content blocks to process.
|
||||
@@ -263,7 +263,7 @@ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
|
||||
|
||||
|
||||
def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
|
||||
"""Convert v0 AIMessage into ``output_version="responses/v1"`` format."""
|
||||
"""Convert v0 AIMessage into `output_version="responses/v1"` format."""
|
||||
from langchain_core.messages import AIMessageChunk # noqa: PLC0415
|
||||
|
||||
# Only update ChatOpenAI v0.3 AIMessages
|
||||
|
||||
@@ -31,7 +31,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used during serialization).
|
||||
|
||||
Defaults to ``'ChatMessageChunk'``.
|
||||
Defaults to `'ChatMessageChunk'`.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
change in future releases.
|
||||
|
||||
This module provides standardized data structures for representing inputs to and
|
||||
outputs from LLMs. The core abstraction is the **Content Block**, a ``TypedDict``.
|
||||
outputs from LLMs. The core abstraction is the **Content Block**, a `TypedDict`.
|
||||
|
||||
**Rationale**
|
||||
|
||||
@@ -20,59 +20,59 @@ blocks into the format required by its API.
|
||||
**Extensibility**
|
||||
|
||||
Data **not yet mapped** to a standard block may be represented using the
|
||||
``NonStandardContentBlock``, which allows for provider-specific data to be included
|
||||
`NonStandardContentBlock`, which allows for provider-specific data to be included
|
||||
without losing the benefits of type checking and validation.
|
||||
|
||||
Furthermore, provider-specific fields **within** a standard block are fully supported
|
||||
by default in the ``extras`` field of each block. This allows for additional metadata
|
||||
by default in the `extras` field of each block. This allows for additional metadata
|
||||
to be included without breaking the standard structure.
|
||||
|
||||
!!! warning
|
||||
Do not heavily rely on the ``extras`` field for provider-specific data! This field
|
||||
Do not heavily rely on the `extras` field for provider-specific data! This field
|
||||
is subject to deprecation in future releases as we move towards PEP 728.
|
||||
|
||||
!!! note
|
||||
Following widespread adoption of `PEP 728 <https://peps.python.org/pep-0728/>`__, we
|
||||
will add ``extra_items=Any`` as a param to Content Blocks. This will signify to type
|
||||
Following widespread adoption of [PEP 728](https://peps.python.org/pep-0728/), we
|
||||
will add `extra_items=Any` as a param to Content Blocks. This will signify to type
|
||||
checkers that additional provider-specific fields are allowed outside of the
|
||||
``extras`` field, and that will become the new standard approach to adding
|
||||
`extras` field, and that will become the new standard approach to adding
|
||||
provider-specific metadata.
|
||||
|
||||
??? note
|
||||
|
||||
**Example with PEP 728 provider-specific fields:**
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# Content block definition
|
||||
# NOTE: `extra_items=Any`
|
||||
class TextContentBlock(TypedDict, extra_items=Any):
|
||||
type: Literal["text"]
|
||||
id: NotRequired[str]
|
||||
text: str
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
index: NotRequired[int]
|
||||
```
|
||||
|
||||
# Content block definition
|
||||
# NOTE: `extra_items=Any`
|
||||
class TextContentBlock(TypedDict, extra_items=Any):
|
||||
type: Literal["text"]
|
||||
id: NotRequired[str]
|
||||
text: str
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
index: NotRequired[int]
|
||||
```python
|
||||
from langchain_core.messages.content import TextContentBlock
|
||||
|
||||
.. code-block:: python
|
||||
# Create a text content block with provider-specific fields
|
||||
my_block: TextContentBlock = {
|
||||
# Add required fields
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
# Additional fields not specified in the TypedDict
|
||||
# These are valid with PEP 728 and are typed as Any
|
||||
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
|
||||
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
|
||||
"custom_field": "any value",
|
||||
}
|
||||
|
||||
from langchain_core.messages.content import TextContentBlock
|
||||
# Mutating an existing block to add provider-specific fields
|
||||
openai_data = my_block["openai_metadata"] # Type: Any
|
||||
```
|
||||
|
||||
# Create a text content block with provider-specific fields
|
||||
my_block: TextContentBlock = {
|
||||
# Add required fields
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
# Additional fields not specified in the TypedDict
|
||||
# These are valid with PEP 728 and are typed as Any
|
||||
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
|
||||
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
|
||||
"custom_field": "any value",
|
||||
}
|
||||
|
||||
# Mutating an existing block to add provider-specific fields
|
||||
openai_data = my_block["openai_metadata"] # Type: Any
|
||||
|
||||
PEP 728 is enabled with ``# type: ignore[call-arg]`` comments to suppress
|
||||
PEP 728 is enabled with `# type: ignore[call-arg]` comments to suppress
|
||||
warnings from type checkers that don't yet support it. The functionality works
|
||||
correctly in Python 3.13+ and will be fully supported as the ecosystem catches
|
||||
up.
|
||||
@@ -81,52 +81,51 @@ to be included without breaking the standard structure.
|
||||
|
||||
The module defines several types of content blocks, including:
|
||||
|
||||
- ``TextContentBlock``: Standard text output.
|
||||
- ``Citation``: For annotations that link text output to a source document.
|
||||
- ``ToolCall``: For function calling.
|
||||
- ``ReasoningContentBlock``: To capture a model's thought process.
|
||||
- `TextContentBlock`: Standard text output.
|
||||
- `Citation`: For annotations that link text output to a source document.
|
||||
- `ToolCall`: For function calling.
|
||||
- `ReasoningContentBlock`: To capture a model's thought process.
|
||||
- Multimodal data:
|
||||
- ``ImageContentBlock``
|
||||
- ``AudioContentBlock``
|
||||
- ``VideoContentBlock``
|
||||
- ``PlainTextContentBlock`` (e.g. .txt or .md files)
|
||||
- ``FileContentBlock`` (e.g. PDFs, etc.)
|
||||
- `ImageContentBlock`
|
||||
- `AudioContentBlock`
|
||||
- `VideoContentBlock`
|
||||
- `PlainTextContentBlock` (e.g. .txt or .md files)
|
||||
- `FileContentBlock` (e.g. PDFs, etc.)
|
||||
|
||||
**Example Usage**
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# Direct construction:
|
||||
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
|
||||
|
||||
# Direct construction:
|
||||
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
|
||||
multimodal_message: AIMessage(
|
||||
content_blocks=[
|
||||
TextContentBlock(type="text", text="What is shown in this image?"),
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
multimodal_message: AIMessage(
|
||||
content_blocks=[
|
||||
TextContentBlock(type="text", text="What is shown in this image?"),
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
# Using factories:
|
||||
from langchain_core.messages.content import create_text_block, create_image_block
|
||||
|
||||
# Using factories:
|
||||
from langchain_core.messages.content import create_text_block, create_image_block
|
||||
|
||||
multimodal_message: AIMessage(
|
||||
content=[
|
||||
create_text_block("What is shown in this image?"),
|
||||
create_image_block(
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
multimodal_message: AIMessage(
|
||||
content=[
|
||||
create_text_block("What is shown in this image?"),
|
||||
create_image_block(
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
Factory functions offer benefits such as:
|
||||
- Automatic ID generation (when not provided)
|
||||
- No need to manually specify the `type` field
|
||||
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args, get_type_hints
|
||||
@@ -140,12 +139,12 @@ class Citation(TypedDict):
|
||||
"""Annotation for citing data from a document.
|
||||
|
||||
!!! note
|
||||
``start``/``end`` indices refer to the **response text**,
|
||||
`start`/`end` indices refer to the **response text**,
|
||||
not the source text. This means that the indices are relative to the model's
|
||||
response, not the original document (as specified in the ``url``).
|
||||
response, not the original document (as specified in the `url`).
|
||||
|
||||
!!! note
|
||||
``create_citation`` may also be used as a factory to create a ``Citation``.
|
||||
`create_citation` may also be used as a factory to create a `Citation`.
|
||||
Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
@@ -160,7 +159,7 @@ class Citation(TypedDict):
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -174,10 +173,10 @@ class Citation(TypedDict):
|
||||
"""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the **response text** (``TextContentBlock.text``)."""
|
||||
"""Start index of the **response text** (`TextContentBlock.text`)."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the **response text** (``TextContentBlock.text``)"""
|
||||
"""End index of the **response text** (`TextContentBlock.text`)"""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Excerpt of source text being cited."""
|
||||
@@ -203,7 +202,7 @@ class NonStandardAnnotation(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -221,8 +220,8 @@ class TextContentBlock(TypedDict):
|
||||
from a language model or the text of a user message.
|
||||
|
||||
!!! note
|
||||
``create_text_block`` may also be used as a factory to create a
|
||||
``TextContentBlock``. Benefits include:
|
||||
`create_text_block` may also be used as a factory to create a
|
||||
`TextContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -237,7 +236,7 @@ class TextContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -245,7 +244,7 @@ class TextContentBlock(TypedDict):
|
||||
"""Block text."""
|
||||
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
"""``Citation``s and other annotations."""
|
||||
"""`Citation`s and other annotations."""
|
||||
|
||||
index: NotRequired[int | str]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
@@ -258,17 +257,16 @@ class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```python
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```
|
||||
|
||||
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
||||
and an identifier of "123".
|
||||
|
||||
!!! note
|
||||
``create_tool_call`` may also be used as a factory to create a
|
||||
``ToolCall``. Benefits include:
|
||||
`create_tool_call` may also be used as a factory to create a
|
||||
`ToolCall`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -303,22 +301,20 @@ class ToolCall(TypedDict):
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
|
||||
When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``),
|
||||
When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of ``index`` are equal and not `None`.
|
||||
values of `index` are equal and not `None`.
|
||||
|
||||
Example:
|
||||
```python
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
```
|
||||
"""
|
||||
|
||||
# TODO: Consider making fields NotRequired[str] in the future.
|
||||
@@ -457,8 +453,8 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning output from a LLM.
|
||||
|
||||
!!! note
|
||||
``create_reasoning_block`` may also be used as a factory to create a
|
||||
``ReasoningContentBlock``. Benefits include:
|
||||
`create_reasoning_block` may also be used as a factory to create a
|
||||
`ReasoningContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -473,7 +469,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -481,7 +477,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning text.
|
||||
|
||||
Either the thought summary or the raw reasoning text itself. This is often parsed
|
||||
from ``<think>`` tags in the model's response.
|
||||
from `<think>` tags in the model's response.
|
||||
|
||||
"""
|
||||
|
||||
@@ -499,8 +495,8 @@ class ImageContentBlock(TypedDict):
|
||||
"""Image data.
|
||||
|
||||
!!! note
|
||||
``create_image_block`` may also be used as a factory to create a
|
||||
``ImageContentBlock``. Benefits include:
|
||||
`create_image_block` may also be used as a factory to create a
|
||||
`ImageContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -515,7 +511,7 @@ class ImageContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -525,7 +521,7 @@ class ImageContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the image. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#image>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#image)
|
||||
|
||||
"""
|
||||
|
||||
@@ -546,8 +542,8 @@ class VideoContentBlock(TypedDict):
|
||||
"""Video data.
|
||||
|
||||
!!! note
|
||||
``create_video_block`` may also be used as a factory to create a
|
||||
``VideoContentBlock``. Benefits include:
|
||||
`create_video_block` may also be used as a factory to create a
|
||||
`VideoContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -562,7 +558,7 @@ class VideoContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -572,7 +568,7 @@ class VideoContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the video. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#video>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#video)
|
||||
|
||||
"""
|
||||
|
||||
@@ -593,8 +589,8 @@ class AudioContentBlock(TypedDict):
|
||||
"""Audio data.
|
||||
|
||||
!!! note
|
||||
``create_audio_block`` may also be used as a factory to create an
|
||||
``AudioContentBlock``. Benefits include:
|
||||
`create_audio_block` may also be used as a factory to create an
|
||||
`AudioContentBlock`. Benefits include:
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
|
||||
@@ -608,7 +604,7 @@ class AudioContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -618,7 +614,7 @@ class AudioContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the audio. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#audio>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#audio)
|
||||
|
||||
"""
|
||||
|
||||
@@ -639,18 +635,18 @@ class PlainTextContentBlock(TypedDict):
|
||||
"""Plaintext data (e.g., from a document).
|
||||
|
||||
!!! note
|
||||
A ``PlainTextContentBlock`` existed in ``langchain-core<1.0.0``. Although the
|
||||
A `PlainTextContentBlock` existed in `langchain-core<1.0.0`. Although the
|
||||
name has carried over, the structure has changed significantly. The only shared
|
||||
keys between the old and new versions are `type` and ``text``, though the
|
||||
`type` value has changed from ``'text'`` to ``'text-plain'``.
|
||||
keys between the old and new versions are `type` and `text`, though the
|
||||
`type` value has changed from `'text'` to `'text-plain'`.
|
||||
|
||||
!!! note
|
||||
Title and context are optional fields that may be passed to the model. See
|
||||
Anthropic `example <https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content>`__.
|
||||
Anthropic [example](https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
|
||||
|
||||
!!! note
|
||||
``create_plaintext_block`` may also be used as a factory to create a
|
||||
``PlainTextContentBlock``. Benefits include:
|
||||
`create_plaintext_block` may also be used as a factory to create a
|
||||
`PlainTextContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -665,7 +661,7 @@ class PlainTextContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -704,12 +700,12 @@ class FileContentBlock(TypedDict):
|
||||
example, it can be used for PDFs, Word documents, etc.
|
||||
|
||||
If the file is an image, audio, or plaintext, you should use the corresponding
|
||||
content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``,
|
||||
``PlainTextContentBlock``).
|
||||
content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
|
||||
`PlainTextContentBlock`).
|
||||
|
||||
!!! note
|
||||
``create_file_block`` may also be used as a factory to create a
|
||||
``FileContentBlock``. Benefits include:
|
||||
`create_file_block` may also be used as a factory to create a
|
||||
`FileContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -724,7 +720,7 @@ class FileContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -734,7 +730,7 @@ class FileContentBlock(TypedDict):
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the file. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml>`__
|
||||
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml)
|
||||
|
||||
"""
|
||||
|
||||
@@ -764,14 +760,14 @@ class NonStandardContentBlock(TypedDict):
|
||||
The purpose of this block should be to simply hold a provider-specific payload.
|
||||
If a provider's non-standard output includes reasoning and tool calls, it should be
|
||||
the adapter's job to parse that payload and emit the corresponding standard
|
||||
``ReasoningContentBlock`` and ``ToolCalls``.
|
||||
`ReasoningContentBlock` and `ToolCalls`.
|
||||
|
||||
Has no ``extras`` field, as provider-specific data should be included in the
|
||||
``value`` field.
|
||||
Has no `extras` field, as provider-specific data should be included in the
|
||||
`value` field.
|
||||
|
||||
!!! note
|
||||
``create_non_standard_block`` may also be used as a factory to create a
|
||||
``NonStandardContentBlock``. Benefits include:
|
||||
`create_non_standard_block` may also be used as a factory to create a
|
||||
`NonStandardContentBlock`. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
@@ -786,7 +782,7 @@ class NonStandardContentBlock(TypedDict):
|
||||
|
||||
Either:
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
"""
|
||||
|
||||
@@ -842,7 +838,7 @@ KNOWN_BLOCK_TYPES = {
|
||||
"non_standard",
|
||||
# citation and non_standard_annotation intentionally omitted
|
||||
}
|
||||
"""These are block types known to ``langchain-core>=1.0.0``.
|
||||
"""These are block types known to `langchain-core>=1.0.0`.
|
||||
|
||||
If a block has a type not in this set, it is considered to be provider-specific.
|
||||
"""
|
||||
@@ -881,7 +877,7 @@ def is_data_content_block(block: dict) -> bool:
|
||||
block: The content block to check.
|
||||
|
||||
Returns:
|
||||
True if the content block is a data content block, False otherwise.
|
||||
`True` if the content block is a data content block, `False` otherwise.
|
||||
|
||||
"""
|
||||
if block.get("type") not in _get_data_content_block_types():
|
||||
@@ -923,20 +919,20 @@ def create_text_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> TextContentBlock:
|
||||
"""Create a ``TextContentBlock``.
|
||||
"""Create a `TextContentBlock`.
|
||||
|
||||
Args:
|
||||
text: The text content of the block.
|
||||
id: Content block identifier. Generated automatically if not provided.
|
||||
annotations: ``Citation``s and other annotations for the text.
|
||||
annotations: `Citation`s and other annotations for the text.
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``TextContentBlock``.
|
||||
A properly formatted `TextContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = TextContentBlock(
|
||||
@@ -966,7 +962,7 @@ def create_image_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ImageContentBlock:
|
||||
"""Create an ``ImageContentBlock``.
|
||||
"""Create an `ImageContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the image.
|
||||
@@ -977,15 +973,15 @@ def create_image_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ImageContentBlock``.
|
||||
A properly formatted `ImageContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no image source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no image source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1022,7 +1018,7 @@ def create_video_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> VideoContentBlock:
|
||||
"""Create a ``VideoContentBlock``.
|
||||
"""Create a `VideoContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the video.
|
||||
@@ -1033,15 +1029,15 @@ def create_video_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``VideoContentBlock``.
|
||||
A properly formatted `VideoContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no video source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no video source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1082,7 +1078,7 @@ def create_audio_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> AudioContentBlock:
|
||||
"""Create an ``AudioContentBlock``.
|
||||
"""Create an `AudioContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the audio.
|
||||
@@ -1093,15 +1089,15 @@ def create_audio_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``AudioContentBlock``.
|
||||
A properly formatted `AudioContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no audio source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no audio source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1142,7 +1138,7 @@ def create_file_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> FileContentBlock:
|
||||
"""Create a ``FileContentBlock``.
|
||||
"""Create a `FileContentBlock`.
|
||||
|
||||
Args:
|
||||
url: URL of the file.
|
||||
@@ -1153,15 +1149,15 @@ def create_file_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``FileContentBlock``.
|
||||
A properly formatted `FileContentBlock`.
|
||||
|
||||
Raises:
|
||||
ValueError: If no file source is provided or if ``base64`` is used without
|
||||
``mime_type``.
|
||||
ValueError: If no file source is provided or if `base64` is used without
|
||||
`mime_type`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
if not any([url, base64, file_id]):
|
||||
@@ -1203,7 +1199,7 @@ def create_plaintext_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> PlainTextContentBlock:
|
||||
"""Create a ``PlainTextContentBlock``.
|
||||
"""Create a `PlainTextContentBlock`.
|
||||
|
||||
Args:
|
||||
text: The plaintext content.
|
||||
@@ -1216,11 +1212,11 @@ def create_plaintext_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``PlainTextContentBlock``.
|
||||
A properly formatted `PlainTextContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = PlainTextContentBlock(
|
||||
@@ -1259,7 +1255,7 @@ def create_tool_call(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ToolCall:
|
||||
"""Create a ``ToolCall``.
|
||||
"""Create a `ToolCall`.
|
||||
|
||||
Args:
|
||||
name: The name of the tool to be called.
|
||||
@@ -1268,11 +1264,11 @@ def create_tool_call(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ToolCall``.
|
||||
A properly formatted `ToolCall`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = ToolCall(
|
||||
@@ -1298,7 +1294,7 @@ def create_reasoning_block(
|
||||
index: int | str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ReasoningContentBlock:
|
||||
"""Create a ``ReasoningContentBlock``.
|
||||
"""Create a `ReasoningContentBlock`.
|
||||
|
||||
Args:
|
||||
reasoning: The reasoning text or thought summary.
|
||||
@@ -1306,11 +1302,11 @@ def create_reasoning_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``ReasoningContentBlock``.
|
||||
A properly formatted `ReasoningContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = ReasoningContentBlock(
|
||||
@@ -1339,7 +1335,7 @@ def create_citation(
|
||||
id: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Citation:
|
||||
"""Create a ``Citation``.
|
||||
"""Create a `Citation`.
|
||||
|
||||
Args:
|
||||
url: URL of the document source.
|
||||
@@ -1350,11 +1346,11 @@ def create_citation(
|
||||
id: Content block identifier. Generated automatically if not provided.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``Citation``.
|
||||
A properly formatted `Citation`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = Citation(type="citation", id=ensure_id(id))
|
||||
@@ -1383,7 +1379,7 @@ def create_non_standard_block(
|
||||
id: str | None = None,
|
||||
index: int | str | None = None,
|
||||
) -> NonStandardContentBlock:
|
||||
"""Create a ``NonStandardContentBlock``.
|
||||
"""Create a `NonStandardContentBlock`.
|
||||
|
||||
Args:
|
||||
value: Provider-specific data.
|
||||
@@ -1391,11 +1387,11 @@ def create_non_standard_block(
|
||||
index: Index of block in aggregate response. Used during streaming.
|
||||
|
||||
Returns:
|
||||
A properly formatted ``NonStandardContentBlock``.
|
||||
A properly formatted `NonStandardContentBlock`.
|
||||
|
||||
!!! note
|
||||
The `id` is generated automatically if not provided, using a UUID4 format
|
||||
prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID.
|
||||
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
|
||||
|
||||
"""
|
||||
block = NonStandardContentBlock(
|
||||
|
||||
@@ -15,7 +15,7 @@ from langchain_core.utils._merge import merge_dicts
|
||||
class FunctionMessage(BaseMessage):
|
||||
"""Message for passing the result of executing a tool back to a model.
|
||||
|
||||
``FunctionMessage`` are an older version of the `ToolMessage` schema, and
|
||||
`FunctionMessage` are an older version of the `ToolMessage` schema, and
|
||||
do not contain the `tool_call_id` field.
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
|
||||
"""The name of the function that was executed."""
|
||||
|
||||
type: Literal["function"] = "function"
|
||||
"""The type of the message (used for serialization). Defaults to ``'function'``."""
|
||||
"""The type of the message (used for serialization). Defaults to `'function'`."""
|
||||
|
||||
|
||||
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
@@ -40,7 +40,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'FunctionMessageChunk'``.
|
||||
Defaults to `'FunctionMessageChunk'`.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -12,26 +12,24 @@ class HumanMessage(BaseMessage):
|
||||
`HumanMessage`s are messages that are passed in from a human to the model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
```
|
||||
"""
|
||||
|
||||
type: Literal["human"] = "human"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'human'``.
|
||||
Defaults to `'human'`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -56,7 +54,7 @@ class HumanMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
||||
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
||||
if content_blocks is not None:
|
||||
super().__init__(
|
||||
content=cast("str | list[str | dict]", content_blocks),
|
||||
|
||||
@@ -20,7 +20,7 @@ class RemoveMessage(BaseMessage):
|
||||
|
||||
Args:
|
||||
id: The ID of the message to remove.
|
||||
kwargs: Additional fields to pass to the message.
|
||||
**kwargs: Additional fields to pass to the message.
|
||||
|
||||
Raises:
|
||||
ValueError: If the 'content' field is passed in kwargs.
|
||||
|
||||
@@ -13,25 +13,23 @@ class SystemMessage(BaseMessage):
|
||||
of input messages.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Define a chat model and invoke it with the messages
|
||||
print(model.invoke(messages))
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
||||
HumanMessage(content="What is your name?"),
|
||||
]
|
||||
|
||||
# Define a chat model and invoke it with the messages
|
||||
print(model.invoke(messages))
|
||||
```
|
||||
"""
|
||||
|
||||
type: Literal["system"] = "system"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'system'``.
|
||||
Defaults to `'system'`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -56,7 +54,7 @@ class SystemMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
||||
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
||||
if content_blocks is not None:
|
||||
super().__init__(
|
||||
content=cast("str | list[str | dict]", content_blocks),
|
||||
@@ -75,6 +73,6 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'SystemMessageChunk'``.
|
||||
Defaults to `'SystemMessageChunk'`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -16,8 +16,8 @@ from langchain_core.utils._merge import merge_dicts, merge_obj
|
||||
class ToolOutputMixin:
|
||||
"""Mixin for objects that tools can return directly.
|
||||
|
||||
If a custom BaseTool is invoked with a ``ToolCall`` and the output of custom code is
|
||||
not an instance of ``ToolOutputMixin``, the output will automatically be coerced to
|
||||
If a custom BaseTool is invoked with a `ToolCall` and the output of custom code is
|
||||
not an instance of `ToolOutputMixin`, the output will automatically be coerced to
|
||||
a string and wrapped in a `ToolMessage`.
|
||||
|
||||
"""
|
||||
@@ -27,38 +27,37 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
"""Message for passing the result of executing a tool back to a model.
|
||||
|
||||
`ToolMessage` objects contain the result of a tool invocation. Typically, the result
|
||||
is encoded inside the ``content`` field.
|
||||
is encoded inside the `content` field.
|
||||
|
||||
Example: A `ToolMessage` representing a result of ``42`` from a tool call with id
|
||||
Example: A `ToolMessage` representing a result of `42` from a tool call with id
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```
|
||||
|
||||
Example: A `ToolMessage` where only part of the tool output is sent to the model
|
||||
and the full output is passed in to artifact.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
```
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
@@ -72,7 +71,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
type: Literal["tool"] = "tool"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to ``'tool'``.
|
||||
Defaults to `'tool'`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -167,7 +166,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
) -> None:
|
||||
"""Initialize `ToolMessage`.
|
||||
|
||||
Specify ``content`` as positional arg or ``content_blocks`` for typing.
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
@@ -219,13 +218,12 @@ class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
|
||||
Example:
|
||||
```python
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
||||
|
||||
This represents a request to call the tool named ``'foo'`` with arguments
|
||||
``{"a": 1}`` and an identifier of ``'123'``.
|
||||
This represents a request to call the tool named `'foo'` with arguments
|
||||
`{"a": 1}` and an identifier of `'123'`.
|
||||
|
||||
"""
|
||||
|
||||
@@ -265,22 +263,20 @@ def tool_call(
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
|
||||
When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``),
|
||||
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of ``index`` are equal and not None.
|
||||
values of `index` are equal and not None.
|
||||
|
||||
Example:
|
||||
```python
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
||||
```
|
||||
"""
|
||||
|
||||
name: str | None
|
||||
|
||||
@@ -5,7 +5,6 @@ Some examples of what you can do with these functions include:
|
||||
* Convert messages to strings (serialization)
|
||||
* Convert messages from dicts to Message objects (deserialization)
|
||||
* Filter messages from a list of messages based on name, type or id etc.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -97,9 +96,9 @@ def get_buffer_string(
|
||||
Args:
|
||||
messages: Messages to be converted to strings.
|
||||
human_prefix: The prefix to prepend to contents of `HumanMessage`s.
|
||||
Default is ``'Human'``.
|
||||
Default is `'Human'`.
|
||||
ai_prefix: The prefix to prepend to contents of `AIMessage`. Default is
|
||||
``'AI'``.
|
||||
`'AI'`.
|
||||
|
||||
Returns:
|
||||
A single string concatenation of all input messages.
|
||||
@@ -108,17 +107,16 @@ def get_buffer_string(
|
||||
ValueError: If an unsupported message type is encountered.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core import AIMessage, HumanMessage
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="Hi, how are you?"),
|
||||
AIMessage(content="Good, how are you?"),
|
||||
]
|
||||
get_buffer_string(messages)
|
||||
# -> "Human: Hi, how are you?\nAI: Good, how are you?"
|
||||
```python
|
||||
from langchain_core import AIMessage, HumanMessage
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="Hi, how are you?"),
|
||||
AIMessage(content="Good, how are you?"),
|
||||
]
|
||||
get_buffer_string(messages)
|
||||
# -> "Human: Hi, how are you?\nAI: Good, how are you?"
|
||||
```
|
||||
"""
|
||||
string_messages = []
|
||||
for m in messages:
|
||||
@@ -178,7 +176,7 @@ def _message_from_dict(message: dict) -> BaseMessage:
|
||||
|
||||
|
||||
def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
|
||||
"""Convert a sequence of messages from dicts to ``Message`` objects.
|
||||
"""Convert a sequence of messages from dicts to `Message` objects.
|
||||
|
||||
Args:
|
||||
messages: Sequence of messages (as dicts) to convert.
|
||||
@@ -191,7 +189,7 @@ def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
|
||||
|
||||
|
||||
def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
||||
"""Convert a message chunk to a ``Message``.
|
||||
"""Convert a message chunk to a `Message`.
|
||||
|
||||
Args:
|
||||
chunk: Message chunk to convert.
|
||||
@@ -224,10 +222,10 @@ def _create_message_from_message_type(
|
||||
id: str | None = None,
|
||||
**additional_kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
"""Create a message from a ``Message`` type and content string.
|
||||
"""Create a message from a `Message` type and content string.
|
||||
|
||||
Args:
|
||||
message_type: (str) the type of the message (e.g., ``'human'``, ``'ai'``, etc.).
|
||||
message_type: (str) the type of the message (e.g., `'human'`, `'ai'`, etc.).
|
||||
content: (str) the content string.
|
||||
name: (str) the name of the message. Default is None.
|
||||
tool_call_id: (str) the tool call id. Default is None.
|
||||
@@ -239,9 +237,9 @@ def _create_message_from_message_type(
|
||||
a message of the appropriate type.
|
||||
|
||||
Raises:
|
||||
ValueError: if the message type is not one of ``'human'``, ``'user'``, ``'ai'``,
|
||||
``'assistant'``, ``'function'``, ``'tool'``, ``'system'``, or
|
||||
``'developer'``.
|
||||
ValueError: if the message type is not one of `'human'`, `'user'`, `'ai'`,
|
||||
`'assistant'`, `'function'`, `'tool'`, `'system'`, or
|
||||
`'developer'`.
|
||||
"""
|
||||
kwargs: dict[str, Any] = {}
|
||||
if name is not None:
|
||||
@@ -307,15 +305,15 @@ def _create_message_from_message_type(
|
||||
|
||||
|
||||
def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
||||
"""Instantiate a ``Message`` from a variety of message formats.
|
||||
"""Instantiate a `Message` from a variety of message formats.
|
||||
|
||||
The message format can be one of the following:
|
||||
|
||||
- ``BaseMessagePromptTemplate``
|
||||
- `BaseMessagePromptTemplate`
|
||||
- `BaseMessage`
|
||||
- 2-tuple of (role string, template); e.g., (``'human'``, ``'{user_input}'``)
|
||||
- 2-tuple of (role string, template); e.g., (`'human'`, `'{user_input}'`)
|
||||
- dict: a message dict with role and content keys
|
||||
- string: shorthand for (``'human'``, template); e.g., ``'{user_input}'``
|
||||
- string: shorthand for (`'human'`, template); e.g., `'{user_input}'`
|
||||
|
||||
Args:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
@@ -430,11 +428,11 @@ def filter_messages(
|
||||
include_names: Message names to include. Default is None.
|
||||
exclude_names: Messages names to exclude. Default is None.
|
||||
include_types: Message types to include. Can be specified as string names
|
||||
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as `BaseMessage`
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
exclude_types: Message types to exclude. Can be specified as string names
|
||||
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as `BaseMessage`
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
include_ids: Message IDs to include. Default is None.
|
||||
@@ -442,60 +440,59 @@ def filter_messages(
|
||||
exclude_tool_calls: Tool call IDs to exclude. Default is None.
|
||||
Can be one of the following:
|
||||
- `True`: all `AIMessage`s with tool calls and all
|
||||
`ToolMessage` objects will be excluded.
|
||||
`ToolMessage` objects will be excluded.
|
||||
- a sequence of tool call IDs to exclude:
|
||||
- `ToolMessage` objects with the corresponding tool call ID will be
|
||||
excluded.
|
||||
- The `tool_calls` in the AIMessage will be updated to exclude
|
||||
matching tool calls. If all `tool_calls` are filtered from an
|
||||
AIMessage, the whole message is excluded.
|
||||
- `ToolMessage` objects with the corresponding tool call ID will be
|
||||
excluded.
|
||||
- The `tool_calls` in the AIMessage will be updated to exclude
|
||||
matching tool calls. If all `tool_calls` are filtered from an
|
||||
AIMessage, the whole message is excluded.
|
||||
|
||||
Returns:
|
||||
A list of Messages that meets at least one of the ``incl_*`` conditions and none
|
||||
of the ``excl_*`` conditions. If not ``incl_*`` conditions are specified then
|
||||
A list of Messages that meets at least one of the `incl_*` conditions and none
|
||||
of the `excl_*` conditions. If not `incl_*` conditions are specified then
|
||||
anything that is not explicitly excluded will be included.
|
||||
|
||||
Raises:
|
||||
ValueError: If two incompatible arguments are provided.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
filter_messages,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
|
||||
from langchain_core.messages import (
|
||||
filter_messages,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
AIMessage("steve-o", id="bar", name="example_assistant"),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
"silicon blue",
|
||||
id="blah",
|
||||
),
|
||||
]
|
||||
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
AIMessage("steve-o", id="bar", name="example_assistant"),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
"silicon blue",
|
||||
id="blah",
|
||||
),
|
||||
]
|
||||
|
||||
filter_messages(
|
||||
messages,
|
||||
incl_names=("example_user", "example_assistant"),
|
||||
incl_types=("system",),
|
||||
excl_ids=("bar",),
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
]
|
||||
filter_messages(
|
||||
messages,
|
||||
incl_names=("example_user", "example_assistant"),
|
||||
incl_types=("system",),
|
||||
excl_ids=("bar",),
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
]
|
||||
```
|
||||
"""
|
||||
messages = convert_to_messages(messages)
|
||||
filtered: list[BaseMessage] = []
|
||||
@@ -571,7 +568,7 @@ def merge_message_runs(
|
||||
Args:
|
||||
messages: Sequence Message-like objects to merge.
|
||||
chunk_separator: Specify the string to be inserted between message chunks.
|
||||
Defaults to ``'\n'``.
|
||||
Defaults to `'\n'`.
|
||||
|
||||
Returns:
|
||||
list of BaseMessages with consecutive runs of message types merged into single
|
||||
@@ -579,87 +576,86 @@ def merge_message_runs(
|
||||
the merged content is a concatenation of the two strings with a new-line
|
||||
separator.
|
||||
The separator inserted between message chunks can be controlled by specifying
|
||||
any string with ``chunk_separator``. If at least one of the messages has a list
|
||||
any string with `chunk_separator`. If at least one of the messages has a list
|
||||
of content blocks, the merged content is a list of content blocks.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
merge_message_runs,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
ToolCall,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="foo",
|
||||
),
|
||||
HumanMessage(
|
||||
"wait your favorite food",
|
||||
id="bar",
|
||||
),
|
||||
AIMessage(
|
||||
"my favorite colo",
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool", args={"x": 2}, id="123", type="tool_call"
|
||||
)
|
||||
],
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "my favorite dish is lasagna"}],
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool",
|
||||
args={"x": -10},
|
||||
id="456",
|
||||
type="tool_call",
|
||||
)
|
||||
],
|
||||
id="blur",
|
||||
),
|
||||
]
|
||||
|
||||
from langchain_core.messages import (
|
||||
merge_message_runs,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
ToolCall,
|
||||
)
|
||||
merge_message_runs(messages)
|
||||
```
|
||||
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color",
|
||||
id="foo",
|
||||
),
|
||||
HumanMessage(
|
||||
"wait your favorite food",
|
||||
id="bar",
|
||||
),
|
||||
AIMessage(
|
||||
```python
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color\\n"
|
||||
"wait your favorite food", id="foo",
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
"my favorite colo",
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool", args={"x": 2}, id="123", type="tool_call"
|
||||
)
|
||||
],
|
||||
id="baz",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "my favorite dish is lasagna"}],
|
||||
tool_calls=[
|
||||
ToolCall(
|
||||
name="blah_tool",
|
||||
args={"x": -10},
|
||||
id="456",
|
||||
type="tool_call",
|
||||
)
|
||||
],
|
||||
id="blur",
|
||||
),
|
||||
]
|
||||
|
||||
merge_message_runs(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage(
|
||||
"what's your favorite color\\n"
|
||||
"wait your favorite food", id="foo",
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
"my favorite colo",
|
||||
{"type": "text", "text": "my favorite dish is lasagna"}
|
||||
],
|
||||
tool_calls=[
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": 2},
|
||||
"id": "123",
|
||||
"type": "tool_call"
|
||||
}),
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": -10},
|
||||
"id": "456",
|
||||
"type": "tool_call"
|
||||
})
|
||||
]
|
||||
id="baz"
|
||||
),
|
||||
]
|
||||
{"type": "text", "text": "my favorite dish is lasagna"}
|
||||
],
|
||||
tool_calls=[
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": 2},
|
||||
"id": "123",
|
||||
"type": "tool_call"
|
||||
}),
|
||||
ToolCall({
|
||||
"name": "blah_tool",
|
||||
"args": {"x": -10},
|
||||
"id": "456",
|
||||
"type": "tool_call"
|
||||
})
|
||||
]
|
||||
id="baz"
|
||||
),
|
||||
]
|
||||
|
||||
```
|
||||
"""
|
||||
if not messages:
|
||||
return []
|
||||
@@ -706,7 +702,7 @@ def trim_messages(
|
||||
) -> list[BaseMessage]:
|
||||
r"""Trim messages to be below a token count.
|
||||
|
||||
``trim_messages`` can be used to reduce the size of a chat history to a specified
|
||||
`trim_messages` can be used to reduce the size of a chat history to a specified
|
||||
token count or specified message count.
|
||||
|
||||
In either case, if passing the trimmed chat history back into a chat model
|
||||
@@ -714,22 +710,22 @@ def trim_messages(
|
||||
properties:
|
||||
|
||||
1. The resulting chat history should be valid. Most chat models expect that chat
|
||||
history starts with either (1) a `HumanMessage` or (2) a `SystemMessage`
|
||||
followed by a `HumanMessage`. To achieve this, set ``start_on='human'``.
|
||||
In addition, generally a `ToolMessage` can only appear after an `AIMessage`
|
||||
that involved a tool call.
|
||||
Please see the following link for more information about messages:
|
||||
https://python.langchain.com/docs/concepts/#messages
|
||||
history starts with either (1) a `HumanMessage` or (2) a `SystemMessage`
|
||||
followed by a `HumanMessage`. To achieve this, set `start_on='human'`.
|
||||
In addition, generally a `ToolMessage` can only appear after an `AIMessage`
|
||||
that involved a tool call.
|
||||
Please see the following link for more information about messages:
|
||||
https://python.langchain.com/docs/concepts/#messages
|
||||
2. It includes recent messages and drops old messages in the chat history.
|
||||
To achieve this set the ``strategy='last'``.
|
||||
To achieve this set the `strategy='last'`.
|
||||
3. Usually, the new chat history should include the `SystemMessage` if it
|
||||
was present in the original chat history since the `SystemMessage` includes
|
||||
special instructions to the chat model. The `SystemMessage` is almost always
|
||||
the first message in the history if present. To achieve this set the
|
||||
``include_system=True``.
|
||||
was present in the original chat history since the `SystemMessage` includes
|
||||
special instructions to the chat model. The `SystemMessage` is almost always
|
||||
the first message in the history if present. To achieve this set the
|
||||
`include_system=True`.
|
||||
|
||||
!!! note
|
||||
The examples below show how to configure ``trim_messages`` to achieve a behavior
|
||||
The examples below show how to configure `trim_messages` to achieve a behavior
|
||||
consistent with the above properties.
|
||||
|
||||
Args:
|
||||
@@ -737,49 +733,49 @@ def trim_messages(
|
||||
max_tokens: Max token count of trimmed messages.
|
||||
token_counter: Function or llm for counting tokens in a `BaseMessage` or a
|
||||
list of `BaseMessage`. If a `BaseLanguageModel` is passed in then
|
||||
``BaseLanguageModel.get_num_tokens_from_messages()`` will be used.
|
||||
Set to ``len`` to count the number of **messages** in the chat history.
|
||||
`BaseLanguageModel.get_num_tokens_from_messages()` will be used.
|
||||
Set to `len` to count the number of **messages** in the chat history.
|
||||
|
||||
!!! note
|
||||
Use ``count_tokens_approximately`` to get fast, approximate token
|
||||
Use `count_tokens_approximately` to get fast, approximate token
|
||||
counts.
|
||||
This is recommended for using ``trim_messages`` on the hot path, where
|
||||
This is recommended for using `trim_messages` on the hot path, where
|
||||
exact token counting is not necessary.
|
||||
|
||||
strategy: Strategy for trimming.
|
||||
- ``'first'``: Keep the first ``<= n_count`` tokens of the messages.
|
||||
- ``'last'``: Keep the last ``<= n_count`` tokens of the messages.
|
||||
Default is ``'last'``.
|
||||
- `'first'`: Keep the first `<= n_count` tokens of the messages.
|
||||
- `'last'`: Keep the last `<= n_count` tokens of the messages.
|
||||
Default is `'last'`.
|
||||
allow_partial: Whether to split a message if only part of the message can be
|
||||
included. If ``strategy='last'`` then the last partial contents of a message
|
||||
are included. If ``strategy='first'`` then the first partial contents of a
|
||||
included. If `strategy='last'` then the last partial contents of a message
|
||||
are included. If `strategy='first'` then the first partial contents of a
|
||||
message are included.
|
||||
Default is False.
|
||||
end_on: The message type to end on. If specified then every message after the
|
||||
last occurrence of this type is ignored. If ``strategy='last'`` then this
|
||||
is done before we attempt to get the last ``max_tokens``. If
|
||||
``strategy='first'`` then this is done after we get the first
|
||||
``max_tokens``. Can be specified as string names (e.g. ``'system'``,
|
||||
``'human'``, ``'ai'``, ...) or as `BaseMessage` classes (e.g.
|
||||
last occurrence of this type is ignored. If `strategy='last'` then this
|
||||
is done before we attempt to get the last `max_tokens`. If
|
||||
`strategy='first'` then this is done after we get the first
|
||||
`max_tokens`. Can be specified as string names (e.g. `'system'`,
|
||||
`'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g.
|
||||
`SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single
|
||||
type or a list of types.
|
||||
Default is None.
|
||||
start_on: The message type to start on. Should only be specified if
|
||||
``strategy='last'``. If specified then every message before
|
||||
`strategy='last'`. If specified then every message before
|
||||
the first occurrence of this type is ignored. This is done after we trim
|
||||
the initial messages to the last ``max_tokens``. Does not
|
||||
apply to a `SystemMessage` at index 0 if ``include_system=True``. Can be
|
||||
specified as string names (e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or
|
||||
the initial messages to the last `max_tokens`. Does not
|
||||
apply to a `SystemMessage` at index 0 if `include_system=True`. Can be
|
||||
specified as string names (e.g. `'system'`, `'human'`, `'ai'`, ...) or
|
||||
as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`,
|
||||
`AIMessage`, ...). Can be a single type or a list of types.
|
||||
Default is None.
|
||||
include_system: Whether to keep the SystemMessage if there is one at index 0.
|
||||
Should only be specified if ``strategy="last"``.
|
||||
Should only be specified if `strategy="last"`.
|
||||
Default is False.
|
||||
text_splitter: Function or ``langchain_text_splitters.TextSplitter`` for
|
||||
text_splitter: Function or `langchain_text_splitters.TextSplitter` for
|
||||
splitting the string contents of a message. Only used if
|
||||
``allow_partial=True``. If ``strategy='last'`` then the last split tokens
|
||||
from a partial message will be included. if ``strategy='first'`` then the
|
||||
`allow_partial=True`. If `strategy='last'` then the last split tokens
|
||||
from a partial message will be included. if `strategy='first'` then the
|
||||
first split tokens from a partial message will be included. Token splitter
|
||||
assumes that separators are kept, so that split contents can be directly
|
||||
concatenated to recreate the original text. Defaults to splitting on
|
||||
@@ -790,65 +786,63 @@ def trim_messages(
|
||||
|
||||
Raises:
|
||||
ValueError: if two incompatible arguments are specified or an unrecognized
|
||||
``strategy`` is specified.
|
||||
`strategy` is specified.
|
||||
|
||||
Example:
|
||||
Trim chat history based on token count, keeping the `SystemMessage` if
|
||||
present, and ensuring that the chat history starts with a `HumanMessage` (
|
||||
or a `SystemMessage` followed by a `HumanMessage`).
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
BaseMessage,
|
||||
SystemMessage,
|
||||
trim_messages,
|
||||
)
|
||||
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
BaseMessage,
|
||||
SystemMessage,
|
||||
trim_messages,
|
||||
)
|
||||
|
||||
messages = [
|
||||
SystemMessage(
|
||||
"you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage("i wonder why it's called langchain"),
|
||||
AIMessage(
|
||||
'Well, I guess they thought "WordRope" and "SentenceString" just '
|
||||
"didn't have the same ring to it!"
|
||||
),
|
||||
HumanMessage("and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
"Hmmm let me think.\n\nWhy, he's probably chasing after the last "
|
||||
"cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage("what do you call a speechless parrot"),
|
||||
]
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant, you always respond with a joke."),
|
||||
HumanMessage("i wonder why it's called langchain"),
|
||||
AIMessage(
|
||||
'Well, I guess they thought "WordRope" and "SentenceString" just '
|
||||
"didn't have the same ring to it!"
|
||||
),
|
||||
HumanMessage("and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
"Hmmm let me think.\n\nWhy, he's probably chasing after the last "
|
||||
"cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage("what do you call a speechless parrot"),
|
||||
]
|
||||
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=45,
|
||||
strategy="last",
|
||||
token_counter=ChatOpenAI(model="gpt-4o"),
|
||||
# Most chat models expect that chat history starts with either:
|
||||
# (1) a HumanMessage or
|
||||
# (2) a SystemMessage followed by a HumanMessage
|
||||
start_on="human",
|
||||
# Usually, we want to keep the SystemMessage
|
||||
# if it's present in the original history.
|
||||
# The SystemMessage has special instructions for the model.
|
||||
include_system=True,
|
||||
allow_partial=False,
|
||||
)
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=45,
|
||||
strategy="last",
|
||||
token_counter=ChatOpenAI(model="gpt-4o"),
|
||||
# Most chat models expect that chat history starts with either:
|
||||
# (1) a HumanMessage or
|
||||
# (2) a SystemMessage followed by a HumanMessage
|
||||
start_on="human",
|
||||
# Usually, we want to keep the SystemMessage
|
||||
# if it's present in the original history.
|
||||
# The SystemMessage has special instructions for the model.
|
||||
include_system=True,
|
||||
allow_partial=False,
|
||||
)
|
||||
```
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
```python
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
```
|
||||
|
||||
Trim chat history based on the message count, keeping the `SystemMessage` if
|
||||
present, and ensuring that the chat history starts with a `HumanMessage` (
|
||||
@@ -874,100 +868,95 @@ def trim_messages(
|
||||
allow_partial=False,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
content="Hmmm let me think.\n\nWhy, he's probably chasing after "
|
||||
"the last cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
|
||||
|
||||
```python
|
||||
[
|
||||
SystemMessage(
|
||||
content="you're a good assistant, you always respond with a joke."
|
||||
),
|
||||
HumanMessage(content="and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
content="Hmmm let me think.\n\nWhy, he's probably chasing after "
|
||||
"the last cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage(content="what do you call a speechless parrot"),
|
||||
]
|
||||
```
|
||||
Trim chat history using a custom token counter function that counts the
|
||||
number of tokens in each message.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
messages = [
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="first"
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "text", "text": "This is the FIRST 4 token block."},
|
||||
{"type": "text", "text": "This is the SECOND 4 token block."},
|
||||
],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="third"
|
||||
),
|
||||
AIMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="fourth",
|
||||
),
|
||||
]
|
||||
```python
|
||||
messages = [
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="first"
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "text", "text": "This is the FIRST 4 token block."},
|
||||
{"type": "text", "text": "This is the SECOND 4 token block."},
|
||||
],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.", id="third"
|
||||
),
|
||||
AIMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="fourth",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def dummy_token_counter(messages: list[BaseMessage]) -> int:
|
||||
# treat each message like it adds 3 default tokens at the beginning
|
||||
# of the message and at the end of the message. 3 + 4 + 3 = 10 tokens
|
||||
# per message.
|
||||
def dummy_token_counter(messages: list[BaseMessage]) -> int:
|
||||
# treat each message like it adds 3 default tokens at the beginning
|
||||
# of the message and at the end of the message. 3 + 4 + 3 = 10 tokens
|
||||
# per message.
|
||||
|
||||
default_content_len = 4
|
||||
default_msg_prefix_len = 3
|
||||
default_msg_suffix_len = 3
|
||||
default_content_len = 4
|
||||
default_msg_prefix_len = 3
|
||||
default_msg_suffix_len = 3
|
||||
|
||||
count = 0
|
||||
for msg in messages:
|
||||
if isinstance(msg.content, str):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
if isinstance(msg.content, list):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ len(msg.content) * default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
return count
|
||||
count = 0
|
||||
for msg in messages:
|
||||
if isinstance(msg.content, str):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
if isinstance(msg.content, list):
|
||||
count += (
|
||||
default_msg_prefix_len
|
||||
+ len(msg.content) * default_content_len
|
||||
+ default_msg_suffix_len
|
||||
)
|
||||
return count
|
||||
```
|
||||
|
||||
First 30 tokens, allowing partial messages:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first",
|
||||
allow_partial=True,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens."
|
||||
),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="first",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."}],
|
||||
id="second",
|
||||
),
|
||||
]
|
||||
```python
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first",
|
||||
allow_partial=True,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage(
|
||||
"This is a 4 token text. The full message is 10 tokens.",
|
||||
id="first",
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."}],
|
||||
id="second",
|
||||
),
|
||||
]
|
||||
```
|
||||
"""
|
||||
# Validate arguments
|
||||
if start_on and strategy == "first":
|
||||
@@ -1042,21 +1031,21 @@ def convert_to_openai_messages(
|
||||
messages: Message-like object or iterable of objects whose contents are
|
||||
in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats.
|
||||
text_format: How to format string or text block contents:
|
||||
- ``'string'``:
|
||||
- `'string'`:
|
||||
If a message has a string content, this is left as a string. If
|
||||
a message has content blocks that are all of type ``'text'``, these
|
||||
a message has content blocks that are all of type `'text'`, these
|
||||
are joined with a newline to make a single string. If a message has
|
||||
content blocks and at least one isn't of type ``'text'``, then
|
||||
content blocks and at least one isn't of type `'text'`, then
|
||||
all blocks are left as dicts.
|
||||
- ``'block'``:
|
||||
- `'block'`:
|
||||
If a message has a string content, this is turned into a list
|
||||
with a single content block of type ``'text'``. If a message has
|
||||
with a single content block of type `'text'`. If a message has
|
||||
content blocks these are left as is.
|
||||
include_id: Whether to include message ids in the openai messages, if they
|
||||
are present in the source messages.
|
||||
|
||||
Raises:
|
||||
ValueError: if an unrecognized ``text_format`` is specified, or if a message
|
||||
ValueError: if an unrecognized `text_format` is specified, or if a message
|
||||
content block is missing expected keys.
|
||||
|
||||
Returns:
|
||||
@@ -1070,50 +1059,49 @@ def convert_to_openai_messages(
|
||||
message dicts is returned.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.messages import (
|
||||
convert_to_openai_messages,
|
||||
AIMessage,
|
||||
SystemMessage,
|
||||
ToolMessage,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import (
|
||||
convert_to_openai_messages,
|
||||
AIMessage,
|
||||
SystemMessage,
|
||||
ToolMessage,
|
||||
)
|
||||
|
||||
messages = [
|
||||
SystemMessage([{"type": "text", "text": "foo"}]),
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "whats in this"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"},
|
||||
},
|
||||
],
|
||||
},
|
||||
AIMessage(
|
||||
"",
|
||||
tool_calls=[
|
||||
{
|
||||
"name": "analyze",
|
||||
"args": {"baz": "buz"},
|
||||
"id": "1",
|
||||
"type": "tool_call",
|
||||
}
|
||||
],
|
||||
),
|
||||
ToolMessage("foobar", tool_call_id="1", name="bar"),
|
||||
{"role": "assistant", "content": "thats nice"},
|
||||
]
|
||||
oai_messages = convert_to_openai_messages(messages)
|
||||
# -> [
|
||||
# {'role': 'system', 'content': 'foo'},
|
||||
# {'role': 'user', 'content': [{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]},
|
||||
# {'role': 'assistant', 'tool_calls': [{'type': 'function', 'id': '1','function': {'name': 'analyze', 'arguments': '{"baz": "buz"}'}}], 'content': ''},
|
||||
# {'role': 'tool', 'name': 'bar', 'content': 'foobar'},
|
||||
# {'role': 'assistant', 'content': 'thats nice'}
|
||||
# ]
|
||||
messages = [
|
||||
SystemMessage([{"type": "text", "text": "foo"}]),
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "whats in this"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"},
|
||||
},
|
||||
],
|
||||
},
|
||||
AIMessage(
|
||||
"",
|
||||
tool_calls=[
|
||||
{
|
||||
"name": "analyze",
|
||||
"args": {"baz": "buz"},
|
||||
"id": "1",
|
||||
"type": "tool_call",
|
||||
}
|
||||
],
|
||||
),
|
||||
ToolMessage("foobar", tool_call_id="1", name="bar"),
|
||||
{"role": "assistant", "content": "thats nice"},
|
||||
]
|
||||
oai_messages = convert_to_openai_messages(messages)
|
||||
# -> [
|
||||
# {'role': 'system', 'content': 'foo'},
|
||||
# {'role': 'user', 'content': [{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]},
|
||||
# {'role': 'assistant', 'tool_calls': [{'type': 'function', 'id': '1','function': {'name': 'analyze', 'arguments': '{"baz": "buz"}'}}], 'content': ''},
|
||||
# {'role': 'tool', 'name': 'bar', 'content': 'foobar'},
|
||||
# {'role': 'assistant', 'content': 'thats nice'}
|
||||
# ]
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.3.11"
|
||||
|
||||
@@ -1697,11 +1685,11 @@ def count_tokens_approximately(
|
||||
chars_per_token: Number of characters per token to use for the approximation.
|
||||
Default is 4 (one token corresponds to ~4 chars for common English text).
|
||||
You can also specify float values for more fine-grained control.
|
||||
`See more here. <https://platform.openai.com/tokenizer>`__
|
||||
[See more here](https://platform.openai.com/tokenizer).
|
||||
extra_tokens_per_message: Number of extra tokens to add per message.
|
||||
Default is 3 (special tokens, including beginning/end of message).
|
||||
You can also specify float values for more fine-grained control.
|
||||
`See more here. <https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb>`__
|
||||
[See more here](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb).
|
||||
count_name: Whether to include message names in the count.
|
||||
Enabled by default.
|
||||
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
"""**OutputParser** classes parse the output of an LLM call.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Serializable, Generation, PromptValue
|
||||
""" # noqa: E501
|
||||
"""**OutputParser** classes parse the output of an LLM call."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -134,29 +134,28 @@ class BaseOutputParser(
|
||||
Output parsers help structure language model responses.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
class BooleanOutputParser(BaseOutputParser[bool]):
|
||||
true_val: str = "YES"
|
||||
false_val: str = "NO"
|
||||
|
||||
class BooleanOutputParser(BaseOutputParser[bool]):
|
||||
true_val: str = "YES"
|
||||
false_val: str = "NO"
|
||||
|
||||
def parse(self, text: str) -> bool:
|
||||
cleaned_text = text.strip().upper()
|
||||
if cleaned_text not in (
|
||||
self.true_val.upper(),
|
||||
self.false_val.upper(),
|
||||
):
|
||||
raise OutputParserException(
|
||||
f"BooleanOutputParser expected output value to either be "
|
||||
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
||||
f"Received {cleaned_text}."
|
||||
)
|
||||
return cleaned_text == self.true_val.upper()
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "boolean_output_parser"
|
||||
def parse(self, text: str) -> bool:
|
||||
cleaned_text = text.strip().upper()
|
||||
if cleaned_text not in (
|
||||
self.true_val.upper(),
|
||||
self.false_val.upper(),
|
||||
):
|
||||
raise OutputParserException(
|
||||
f"BooleanOutputParser expected output value to either be "
|
||||
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
||||
f"Received {cleaned_text}."
|
||||
)
|
||||
return cleaned_text == self.true_val.upper()
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "boolean_output_parser"
|
||||
```
|
||||
"""
|
||||
|
||||
@property
|
||||
|
||||
@@ -149,7 +149,7 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "output_parsers", "list"]``
|
||||
`["langchain", "output_parsers", "list"]`
|
||||
"""
|
||||
return ["langchain", "output_parsers", "list"]
|
||||
|
||||
|
||||
@@ -31,13 +31,13 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
`OutputParserException`: If the output is not valid JSON.
|
||||
"""
|
||||
generation = result[0]
|
||||
if not isinstance(generation, ChatGeneration):
|
||||
@@ -56,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
"""Parse an output as the Json object."""
|
||||
"""Parse an output as the JSON object."""
|
||||
|
||||
strict: bool = False
|
||||
"""Whether to allow non-JSON-compliant strings.
|
||||
@@ -82,13 +82,13 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
OutputParserExcept`ion: If the output is not valid JSON.
|
||||
"""
|
||||
if len(result) != 1:
|
||||
msg = f"Expected exactly one result, but got {len(result)}"
|
||||
@@ -155,7 +155,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
"""Parse an output as the element of the Json object."""
|
||||
"""Parse an output as the element of the JSON object."""
|
||||
|
||||
key_name: str
|
||||
"""The name of the key to return."""
|
||||
@@ -165,7 +165,7 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -177,48 +177,50 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
|
||||
class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
"""Parse an output as a pydantic object.
|
||||
"""Parse an output as a Pydantic object.
|
||||
|
||||
This parser is used to parse the output of a ChatModel that uses
|
||||
OpenAI function format to invoke functions.
|
||||
This parser is used to parse the output of a chat model that uses OpenAI function
|
||||
format to invoke functions.
|
||||
|
||||
The parser extracts the function call invocation and matches
|
||||
them to the pydantic schema provided.
|
||||
The parser extracts the function call invocation and matches them to the Pydantic
|
||||
schema provided.
|
||||
|
||||
An exception will be raised if the function call does not match
|
||||
the provided schema.
|
||||
An exception will be raised if the function call does not match the provided schema.
|
||||
|
||||
Example:
|
||||
... code-block:: python
|
||||
```python
|
||||
message = AIMessage(
|
||||
content="This is a test message",
|
||||
additional_kwargs={
|
||||
"function_call": {
|
||||
"name": "cookie",
|
||||
"arguments": json.dumps({"name": "value", "age": 10}),
|
||||
}
|
||||
},
|
||||
)
|
||||
chat_generation = ChatGeneration(message=message)
|
||||
|
||||
message = AIMessage(
|
||||
content="This is a test message",
|
||||
additional_kwargs={
|
||||
"function_call": {
|
||||
"name": "cookie",
|
||||
"arguments": json.dumps({"name": "value", "age": 10}),
|
||||
}
|
||||
},
|
||||
)
|
||||
chat_generation = ChatGeneration(message=message)
|
||||
|
||||
class Cookie(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
class Cookie(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
|
||||
class Dog(BaseModel):
|
||||
species: str
|
||||
|
||||
# Full output
|
||||
parser = PydanticOutputFunctionsParser(
|
||||
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
||||
)
|
||||
result = parser.parse_result([chat_generation])
|
||||
class Dog(BaseModel):
|
||||
species: str
|
||||
|
||||
|
||||
# Full output
|
||||
parser = PydanticOutputFunctionsParser(
|
||||
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
||||
)
|
||||
result = parser.parse_result([chat_generation])
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
||||
"""The pydantic schema to parse the output with.
|
||||
"""The Pydantic schema to parse the output with.
|
||||
|
||||
If multiple schemas are provided, then the function name will be used to
|
||||
determine which schema to use.
|
||||
@@ -227,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_schema(cls, values: dict) -> Any:
|
||||
"""Validate the pydantic schema.
|
||||
"""Validate the Pydantic schema.
|
||||
|
||||
Args:
|
||||
values: The values to validate.
|
||||
@@ -236,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
The validated values.
|
||||
|
||||
Raises:
|
||||
ValueError: If the schema is not a pydantic schema.
|
||||
`ValueError`: If the schema is not a Pydantic schema.
|
||||
"""
|
||||
schema = values["pydantic_schema"]
|
||||
if "args_only" not in values:
|
||||
@@ -259,10 +261,10 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Raises:
|
||||
ValueError: If the pydantic schema is not valid.
|
||||
`ValueError`: If the Pydantic schema is not valid.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -285,13 +287,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
elif issubclass(pydantic_schema, BaseModelV1):
|
||||
pydantic_args = pydantic_schema.parse_raw(args)
|
||||
else:
|
||||
msg = f"Unsupported pydantic schema: {pydantic_schema}"
|
||||
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
|
||||
raise ValueError(msg)
|
||||
return pydantic_args
|
||||
|
||||
|
||||
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
"""Parse an output as an attribute of a pydantic object."""
|
||||
"""Parse an output as an attribute of a Pydantic object."""
|
||||
|
||||
attr_name: str
|
||||
"""The name of the attribute to return."""
|
||||
@@ -302,7 +304,7 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
|
||||
|
||||
|
||||
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
"""Parse an output using a pydantic model."""
|
||||
"""Parse an output using a Pydantic model."""
|
||||
|
||||
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
|
||||
"""The pydantic model to parse."""
|
||||
"""The Pydantic model to parse."""
|
||||
|
||||
def _parse_obj(self, obj: dict) -> TBaseModel:
|
||||
try:
|
||||
@@ -45,21 +45,20 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> TBaseModel | None:
|
||||
"""Parse the result of an LLM call to a pydantic object.
|
||||
"""Parse the result of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the result is not valid JSON
|
||||
or does not conform to the pydantic model.
|
||||
`OutputParserException`: If the result is not valid JSON
|
||||
or does not conform to the Pydantic model.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
try:
|
||||
json_object = super().parse_result(result)
|
||||
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
raise
|
||||
|
||||
def parse(self, text: str) -> TBaseModel:
|
||||
"""Parse the output of an LLM call to a pydantic object.
|
||||
"""Parse the output of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
text: The output of the LLM call.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
return super().parse(text)
|
||||
|
||||
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> type[TBaseModel]:
|
||||
"""Return the pydantic model."""
|
||||
"""Return the Pydantic model."""
|
||||
return self.pydantic_object
|
||||
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "output_parser"]``
|
||||
`["langchain", "schema", "output_parser"]`
|
||||
"""
|
||||
return ["langchain", "schema", "output_parser"]
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
Args:
|
||||
input: The input to transform.
|
||||
config: The configuration to use for the transformation.
|
||||
kwargs: Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Yields:
|
||||
The transformed output.
|
||||
@@ -85,7 +85,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
Args:
|
||||
input: The input to transform.
|
||||
config: The configuration to use for the transformation.
|
||||
kwargs: Additional keyword arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Yields:
|
||||
The transformed output.
|
||||
|
||||
@@ -82,7 +82,7 @@ class _StreamingParser:
|
||||
chunk: A chunk of text to parse. This can be a string or a BaseMessage.
|
||||
|
||||
Yields:
|
||||
AddableDict: A dictionary representing the parsed XML element.
|
||||
A dictionary representing the parsed XML element.
|
||||
|
||||
Raises:
|
||||
xml.etree.ElementTree.ParseError: If the XML is not well-formed.
|
||||
|
||||
@@ -12,7 +12,7 @@ When invoking models via the standard runnable methods (e.g. invoke, batch, etc.
|
||||
- LLMs will return regular text strings.
|
||||
|
||||
In addition, users can access the raw output of either LLMs or chat models via
|
||||
callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
|
||||
callbacks. The `on_chat_model_end` and `on_llm_end` callbacks will return an
|
||||
LLMResult object containing the generated outputs and any additional information
|
||||
returned by the model provider.
|
||||
|
||||
|
||||
@@ -15,10 +15,10 @@ from langchain_core.utils._merge import merge_dicts
|
||||
class ChatGeneration(Generation):
|
||||
"""A single chat generation output.
|
||||
|
||||
A subclass of ``Generation`` that represents the response from a chat model
|
||||
A subclass of `Generation` that represents the response from a chat model
|
||||
that generates chat messages.
|
||||
|
||||
The ``message`` attribute is a structured representation of the chat message.
|
||||
The `message` attribute is a structured representation of the chat message.
|
||||
Most of the time, the message will be of type `AIMessage`.
|
||||
|
||||
Users working with chat models will usually access information via either
|
||||
@@ -70,9 +70,9 @@ class ChatGeneration(Generation):
|
||||
|
||||
|
||||
class ChatGenerationChunk(ChatGeneration):
|
||||
"""``ChatGeneration`` chunk.
|
||||
"""`ChatGeneration` chunk.
|
||||
|
||||
``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks.
|
||||
`ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks.
|
||||
"""
|
||||
|
||||
message: BaseMessageChunk
|
||||
|
||||
@@ -47,7 +47,7 @@ class Generation(Serializable):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "output"]``
|
||||
`["langchain", "schema", "output"]`
|
||||
"""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
@@ -56,16 +56,16 @@ class GenerationChunk(Generation):
|
||||
"""Generation chunk, which can be concatenated with other Generation chunks."""
|
||||
|
||||
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
||||
"""Concatenate two ``GenerationChunk``s.
|
||||
"""Concatenate two `GenerationChunk`s.
|
||||
|
||||
Args:
|
||||
other: Another ``GenerationChunk`` to concatenate with.
|
||||
other: Another `GenerationChunk` to concatenate with.
|
||||
|
||||
Raises:
|
||||
TypeError: If other is not a ``GenerationChunk``.
|
||||
TypeError: If other is not a `GenerationChunk`.
|
||||
|
||||
Returns:
|
||||
A new ``GenerationChunk`` concatenated from self and other.
|
||||
A new `GenerationChunk` concatenated from self and other.
|
||||
"""
|
||||
if isinstance(other, GenerationChunk):
|
||||
generation_info = merge_dicts(
|
||||
|
||||
@@ -30,8 +30,8 @@ class LLMResult(BaseModel):
|
||||
The second dimension of the list represents different candidate generations for a
|
||||
given prompt.
|
||||
|
||||
- When returned from **an LLM**, the type is ``list[list[Generation]]``.
|
||||
- When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
|
||||
- When returned from **an LLM**, the type is `list[list[Generation]]`.
|
||||
- When returned from a **chat model**, the type is `list[list[ChatGeneration]]`.
|
||||
|
||||
ChatGeneration is a subclass of Generation that has a field for a structured chat
|
||||
message.
|
||||
@@ -97,7 +97,7 @@ class LLMResult(BaseModel):
|
||||
other: Another `LLMResult` object to compare against.
|
||||
|
||||
Returns:
|
||||
True if the generations and ``llm_output`` are equal, False otherwise.
|
||||
`True` if the generations and `llm_output` are equal, `False` otherwise.
|
||||
"""
|
||||
if not isinstance(other, LLMResult):
|
||||
return NotImplemented
|
||||
|
||||
@@ -40,7 +40,7 @@ class PromptValue(Serializable, ABC):
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "prompt"]``
|
||||
`["langchain", "schema", "prompt"]`
|
||||
"""
|
||||
return ["langchain", "schema", "prompt"]
|
||||
|
||||
@@ -67,7 +67,7 @@ class StringPromptValue(PromptValue):
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "base"]``
|
||||
`["langchain", "prompts", "base"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "base"]
|
||||
|
||||
@@ -104,7 +104,7 @@ class ChatPromptValue(PromptValue):
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "chat"]``
|
||||
`["langchain", "prompts", "chat"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
|
||||
@@ -1,28 +1,8 @@
|
||||
"""**Prompt** is the input to the model.
|
||||
|
||||
Prompt is often constructed
|
||||
from multiple components and prompt values. Prompt classes and functions make constructing
|
||||
and working with prompts easy.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BasePromptTemplate --> StringPromptTemplate --> PromptTemplate
|
||||
FewShotPromptTemplate
|
||||
FewShotPromptWithTemplates
|
||||
BaseChatPromptTemplate --> AutoGPTPrompt
|
||||
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
|
||||
|
||||
|
||||
|
||||
BaseMessagePromptTemplate --> MessagesPlaceholder
|
||||
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
|
||||
HumanMessagePromptTemplate
|
||||
AIMessagePromptTemplate
|
||||
SystemMessagePromptTemplate
|
||||
|
||||
""" # noqa: E501
|
||||
Prompt is often constructed from multiple components and prompt values. Prompt classes
|
||||
and functions make constructing and working with prompts easy.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ class BasePromptTemplate(
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "prompt_template"]``
|
||||
`["langchain", "schema", "prompt_template"]`
|
||||
"""
|
||||
return ["langchain", "schema", "prompt_template"]
|
||||
|
||||
@@ -127,10 +127,10 @@ class BasePromptTemplate(
|
||||
"""Get the input schema for the prompt.
|
||||
|
||||
Args:
|
||||
config: RunnableConfig, configuration for the prompt.
|
||||
config: configuration for the prompt.
|
||||
|
||||
Returns:
|
||||
Type[BaseModel]: The input schema for the prompt.
|
||||
The input schema for the prompt.
|
||||
"""
|
||||
# This is correct, but pydantic typings/mypy don't think so.
|
||||
required_input_variables = {
|
||||
@@ -199,7 +199,7 @@ class BasePromptTemplate(
|
||||
config: RunnableConfig, configuration for the prompt.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
if self.metadata:
|
||||
@@ -225,7 +225,7 @@ class BasePromptTemplate(
|
||||
config: RunnableConfig, configuration for the prompt.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
if self.metadata:
|
||||
@@ -245,20 +245,20 @@ class BasePromptTemplate(
|
||||
"""Create Prompt Value.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
|
||||
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
|
||||
"""Async create Prompt Value.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
PromptValue: The output of the prompt.
|
||||
The output of the prompt.
|
||||
"""
|
||||
return self.format_prompt(**kwargs)
|
||||
|
||||
@@ -266,10 +266,10 @@ class BasePromptTemplate(
|
||||
"""Return a partial of the prompt template.
|
||||
|
||||
Args:
|
||||
kwargs: Union[str, Callable[[], str]], partial variables to set.
|
||||
**kwargs: partial variables to set.
|
||||
|
||||
Returns:
|
||||
BasePromptTemplate: A partial of the prompt template.
|
||||
A partial of the prompt template.
|
||||
"""
|
||||
prompt_dict = self.__dict__.copy()
|
||||
prompt_dict["input_variables"] = list(
|
||||
@@ -290,34 +290,30 @@ class BasePromptTemplate(
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
prompt.format(variable1="foo")
|
||||
|
||||
```python
|
||||
prompt.format(variable1="foo")
|
||||
```
|
||||
"""
|
||||
|
||||
async def aformat(self, **kwargs: Any) -> FormatOutputType:
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
await prompt.aformat(variable1="foo")
|
||||
|
||||
```python
|
||||
await prompt.aformat(variable1="foo")
|
||||
```
|
||||
"""
|
||||
return self.format(**kwargs)
|
||||
|
||||
@@ -330,10 +326,10 @@ class BasePromptTemplate(
|
||||
"""Return dictionary representation of prompt.
|
||||
|
||||
Args:
|
||||
kwargs: Any additional arguments to pass to the dictionary.
|
||||
**kwargs: Any additional arguments to pass to the dictionary.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary representation of the prompt.
|
||||
Dictionary representation of the prompt.
|
||||
"""
|
||||
prompt_dict = super().model_dump(**kwargs)
|
||||
with contextlib.suppress(NotImplementedError):
|
||||
@@ -352,10 +348,9 @@ class BasePromptTemplate(
|
||||
NotImplementedError: If the prompt type is not implemented.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
prompt.save(file_path="path/prompt.yaml")
|
||||
|
||||
```python
|
||||
prompt.save(file_path="path/prompt.yaml")
|
||||
```
|
||||
"""
|
||||
if self.partial_variables:
|
||||
msg = "Cannot save prompt with partial variables."
|
||||
@@ -426,16 +421,16 @@ def format_document(doc: Document, prompt: BasePromptTemplate[str]) -> str:
|
||||
string of the document formatted.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
doc = Document(page_content="This is a joke", metadata={"page": "1"})
|
||||
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
|
||||
format_document(doc, prompt)
|
||||
>>> "Page 1: This is a joke"
|
||||
doc = Document(page_content="This is a joke", metadata={"page": "1"})
|
||||
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
|
||||
format_document(doc, prompt)
|
||||
>>> "Page 1: This is a joke"
|
||||
|
||||
```
|
||||
"""
|
||||
return prompt.format(**_get_document_info(doc, prompt))
|
||||
|
||||
|
||||
@@ -59,71 +59,70 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
|
||||
Direct usage:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
prompt = MessagesPlaceholder("history")
|
||||
prompt.format_messages() # raises KeyError
|
||||
|
||||
prompt = MessagesPlaceholder("history")
|
||||
prompt.format_messages() # raises KeyError
|
||||
prompt = MessagesPlaceholder("history", optional=True)
|
||||
prompt.format_messages() # returns empty list []
|
||||
|
||||
prompt = MessagesPlaceholder("history", optional=True)
|
||||
prompt.format_messages() # returns empty list []
|
||||
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# SystemMessage(content="You are an AI assistant."),
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# SystemMessage(content="You are an AI assistant."),
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
```
|
||||
|
||||
Building a prompt with chat history:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful assistant."),
|
||||
MessagesPlaceholder("history"),
|
||||
("human", "{question}"),
|
||||
]
|
||||
)
|
||||
prompt.invoke(
|
||||
{
|
||||
"history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
|
||||
"question": "now multiply that by 4",
|
||||
}
|
||||
)
|
||||
# -> ChatPromptValue(messages=[
|
||||
# SystemMessage(content="You are a helpful assistant."),
|
||||
# HumanMessage(content="what's 5 + 2"),
|
||||
# AIMessage(content="5 + 2 is 7"),
|
||||
# HumanMessage(content="now multiply that by 4"),
|
||||
# ])
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful assistant."),
|
||||
MessagesPlaceholder("history"),
|
||||
("human", "{question}"),
|
||||
]
|
||||
)
|
||||
prompt.invoke(
|
||||
{
|
||||
"history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
|
||||
"question": "now multiply that by 4",
|
||||
}
|
||||
)
|
||||
# -> ChatPromptValue(messages=[
|
||||
# SystemMessage(content="You are a helpful assistant."),
|
||||
# HumanMessage(content="what's 5 + 2"),
|
||||
# AIMessage(content="5 + 2 is 7"),
|
||||
# HumanMessage(content="now multiply that by 4"),
|
||||
# ])
|
||||
```
|
||||
|
||||
Limiting the number of messages:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
|
||||
from langchain_core.prompts import MessagesPlaceholder
|
||||
|
||||
prompt = MessagesPlaceholder("history", n_messages=1)
|
||||
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
prompt = MessagesPlaceholder("history", n_messages=1)
|
||||
|
||||
prompt.format_messages(
|
||||
history=[
|
||||
("system", "You are an AI assistant."),
|
||||
("human", "Hello!"),
|
||||
]
|
||||
)
|
||||
# -> [
|
||||
# HumanMessage(content="Hello!"),
|
||||
# ]
|
||||
```
|
||||
"""
|
||||
|
||||
variable_name: str
|
||||
@@ -136,7 +135,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
|
||||
n_messages: PositiveInt | None = None
|
||||
"""Maximum number of messages to include. If `None`, then will include all.
|
||||
Defaults to `None`."""
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, variable_name: str, *, optional: bool = False, **kwargs: Any
|
||||
@@ -238,11 +237,11 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
template: a template.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
Defaults to `None`.
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
@@ -415,7 +414,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
template_format: format of the template.
|
||||
Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
|
||||
partial_variables: A dictionary of variables that can be used too partially.
|
||||
Defaults to `None`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
@@ -685,7 +684,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Returns:
|
||||
formatted string.
|
||||
@@ -697,7 +696,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Returns:
|
||||
formatted string.
|
||||
@@ -781,78 +780,78 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
Examples:
|
||||
!!! warning "Behavior changed in 0.2.24"
|
||||
You can pass any Message-like formats supported by
|
||||
``ChatPromptTemplate.from_messages()`` directly to ``ChatPromptTemplate()``
|
||||
`ChatPromptTemplate.from_messages()` directly to `ChatPromptTemplate()`
|
||||
init.
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is {name}."),
|
||||
("human", "Hello, how are you doing?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is {name}."),
|
||||
("human", "Hello, how are you doing?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"name": "Bob",
|
||||
"user_input": "What is your name?",
|
||||
}
|
||||
)
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
|
||||
# HumanMessage(content='Hello, how are you doing?'),
|
||||
# AIMessage(content="I'm doing well, thanks!"),
|
||||
# HumanMessage(content='What is your name?')
|
||||
# ]
|
||||
# )
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"name": "Bob",
|
||||
"user_input": "What is your name?",
|
||||
}
|
||||
)
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
|
||||
# HumanMessage(content='Hello, how are you doing?'),
|
||||
# AIMessage(content="I'm doing well, thanks!"),
|
||||
# HumanMessage(content='What is your name?')
|
||||
# ]
|
||||
# )
|
||||
```
|
||||
|
||||
Messages Placeholder:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
# In addition to Human/AI/Tool/Function messages,
|
||||
# you can initialize the template with a MessagesPlaceholder
|
||||
# either using the class directly or with the shorthand tuple syntax:
|
||||
|
||||
# In addition to Human/AI/Tool/Function messages,
|
||||
# you can initialize the template with a MessagesPlaceholder
|
||||
# either using the class directly or with the shorthand tuple syntax:
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot."),
|
||||
# Means the template will receive an optional list of messages under
|
||||
# the "conversation" key
|
||||
("placeholder", "{conversation}"),
|
||||
# Equivalently:
|
||||
# MessagesPlaceholder(variable_name="conversation", optional=True)
|
||||
]
|
||||
)
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot."),
|
||||
# Means the template will receive an optional list of messages under
|
||||
# the "conversation" key
|
||||
("placeholder", "{conversation}"),
|
||||
# Equivalently:
|
||||
# MessagesPlaceholder(variable_name="conversation", optional=True)
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"conversation": [
|
||||
("human", "Hi!"),
|
||||
("ai", "How can I assist you today?"),
|
||||
("human", "Can you make me an ice cream sundae?"),
|
||||
("ai", "No."),
|
||||
]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
prompt_value = template.invoke(
|
||||
{
|
||||
"conversation": [
|
||||
("human", "Hi!"),
|
||||
("ai", "How can I assist you today?"),
|
||||
("human", "Can you make me an ice cream sundae?"),
|
||||
("ai", "No."),
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot.'),
|
||||
# HumanMessage(content='Hi!'),
|
||||
# AIMessage(content='How can I assist you today?'),
|
||||
# HumanMessage(content='Can you make me an ice cream sundae?'),
|
||||
# AIMessage(content='No.'),
|
||||
# ]
|
||||
# )
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot.'),
|
||||
# HumanMessage(content='Hi!'),
|
||||
# AIMessage(content='How can I assist you today?'),
|
||||
# HumanMessage(content='Can you make me an ice cream sundae?'),
|
||||
# AIMessage(content='No.'),
|
||||
# ]
|
||||
# )
|
||||
```
|
||||
|
||||
Single-variable template:
|
||||
|
||||
@@ -861,29 +860,28 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
inject the provided argument into that variable location.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is Carl."),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("system", "You are a helpful AI bot. Your name is Carl."),
|
||||
("human", "{user_input}"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt_value = template.invoke("Hello, there!")
|
||||
# Equivalent to
|
||||
# prompt_value = template.invoke({"user_input": "Hello, there!"})
|
||||
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
|
||||
# HumanMessage(content='Hello, there!'),
|
||||
# ]
|
||||
# )
|
||||
prompt_value = template.invoke("Hello, there!")
|
||||
# Equivalent to
|
||||
# prompt_value = template.invoke({"user_input": "Hello, there!"})
|
||||
|
||||
# Output:
|
||||
# ChatPromptValue(
|
||||
# messages=[
|
||||
# SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
|
||||
# HumanMessage(content='Hello, there!'),
|
||||
# ]
|
||||
# )
|
||||
```
|
||||
""" # noqa: E501
|
||||
|
||||
messages: Annotated[list[MessageLike], SkipValidation()]
|
||||
@@ -902,11 +900,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
messages: sequence of message representations.
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
input_variables: A list of the names of the variables whose values are
|
||||
required as inputs to the prompt.
|
||||
@@ -924,27 +922,26 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
Examples:
|
||||
Instantiation from a list of message templates:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```python
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
Instantiation from mixed message formats:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
|
||||
```python
|
||||
template = ChatPromptTemplate(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
```
|
||||
"""
|
||||
messages_ = [
|
||||
_convert_to_message_template(message, template_format)
|
||||
@@ -977,7 +974,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "chat"]``
|
||||
`["langchain", "prompts", "chat"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@@ -1104,34 +1101,33 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
Examples:
|
||||
Instantiation from a list of message templates:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```python
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
Instantiation from mixed message formats:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
|
||||
```python
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessage(content="hello"),
|
||||
("human", "Hello, how are you?"),
|
||||
]
|
||||
)
|
||||
```
|
||||
Args:
|
||||
messages: sequence of message representations.
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
|
||||
Returns:
|
||||
@@ -1145,7 +1141,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Raises:
|
||||
ValueError: if messages are of unexpected types.
|
||||
@@ -1173,7 +1169,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
**kwargs: keyword arguments to use for filling in template variables
|
||||
in all the template messages in this chat template.
|
||||
in all the template messages in this chat template.
|
||||
|
||||
Returns:
|
||||
list of formatted messages.
|
||||
@@ -1208,23 +1204,21 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are an AI assistant named {name}."),
|
||||
("human", "Hi I'm {user}"),
|
||||
("ai", "Hi there, {user}, I'm {name}."),
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
template2 = template.partial(user="Lucy", name="R2D2")
|
||||
|
||||
template2.format_messages(input="hello")
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are an AI assistant named {name}."),
|
||||
("human", "Hi I'm {user}"),
|
||||
("ai", "Hi there, {user}, I'm {name}."),
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
template2 = template.partial(user="Lucy", name="R2D2")
|
||||
|
||||
template2.format_messages(input="hello")
|
||||
```
|
||||
"""
|
||||
prompt_dict = self.__dict__.copy()
|
||||
prompt_dict["input_variables"] = list(
|
||||
@@ -1262,7 +1256,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
Returns:
|
||||
If index is an int, returns the message at that index.
|
||||
If index is a slice, returns a new ``ChatPromptTemplate``
|
||||
If index is a slice, returns a new `ChatPromptTemplate`
|
||||
containing the messages in that slice.
|
||||
"""
|
||||
if isinstance(index, slice):
|
||||
|
||||
@@ -77,7 +77,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain_core", "prompts", "dict"]``
|
||||
`["langchain_core", "prompts", "dict"]`
|
||||
"""
|
||||
return ["langchain_core", "prompts", "dict"]
|
||||
|
||||
|
||||
@@ -268,97 +268,90 @@ class FewShotChatMessagePromptTemplate(
|
||||
Prompt template with a fixed list of examples (matching the sample
|
||||
conversation above):
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import (
|
||||
FewShotChatMessagePromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
)
|
||||
|
||||
from langchain_core.prompts import (
|
||||
FewShotChatMessagePromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
)
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
]
|
||||
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
example_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "What is {input}?"),
|
||||
("ai", "{output}"),
|
||||
]
|
||||
)
|
||||
|
||||
example_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "What is {input}?"),
|
||||
("ai", "{output}"),
|
||||
]
|
||||
)
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
examples=examples,
|
||||
# This is a prompt template used to format each individual example.
|
||||
example_prompt=example_prompt,
|
||||
)
|
||||
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
examples=examples,
|
||||
# This is a prompt template used to format each individual example.
|
||||
example_prompt=example_prompt,
|
||||
)
|
||||
|
||||
final_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful AI Assistant"),
|
||||
few_shot_prompt,
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
final_prompt.format(input="What is 4+4?")
|
||||
final_prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a helpful AI Assistant"),
|
||||
few_shot_prompt,
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
final_prompt.format(input="What is 4+4?")
|
||||
```
|
||||
|
||||
Prompt template with dynamically selected examples:
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.prompts import SemanticSimilarityExampleSelector
|
||||
from langchain_core.embeddings import OpenAIEmbeddings
|
||||
from langchain_core.vectorstores import Chroma
|
||||
|
||||
from langchain_core.prompts import SemanticSimilarityExampleSelector
|
||||
from langchain_core.embeddings import OpenAIEmbeddings
|
||||
from langchain_core.vectorstores import Chroma
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
{"input": "2+4", "output": "6"},
|
||||
# ...
|
||||
]
|
||||
|
||||
examples = [
|
||||
{"input": "2+2", "output": "4"},
|
||||
{"input": "2+3", "output": "5"},
|
||||
{"input": "2+4", "output": "6"},
|
||||
# ...
|
||||
]
|
||||
to_vectorize = [" ".join(example.values()) for example in examples]
|
||||
embeddings = OpenAIEmbeddings()
|
||||
vectorstore = Chroma.from_texts(to_vectorize, embeddings, metadatas=examples)
|
||||
example_selector = SemanticSimilarityExampleSelector(vectorstore=vectorstore)
|
||||
|
||||
to_vectorize = [" ".join(example.values()) for example in examples]
|
||||
embeddings = OpenAIEmbeddings()
|
||||
vectorstore = Chroma.from_texts(
|
||||
to_vectorize, embeddings, metadatas=examples
|
||||
)
|
||||
example_selector = SemanticSimilarityExampleSelector(
|
||||
vectorstore=vectorstore
|
||||
)
|
||||
from langchain_core import SystemMessage
|
||||
from langchain_core.prompts import HumanMessagePromptTemplate
|
||||
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
|
||||
|
||||
from langchain_core import SystemMessage
|
||||
from langchain_core.prompts import HumanMessagePromptTemplate
|
||||
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
# Which variable(s) will be passed to the example selector.
|
||||
input_variables=["input"],
|
||||
example_selector=example_selector,
|
||||
# Define how each example will be formatted.
|
||||
# In this case, each example will become 2 messages:
|
||||
# 1 human, and 1 AI
|
||||
example_prompt=(
|
||||
HumanMessagePromptTemplate.from_template("{input}")
|
||||
+ AIMessagePromptTemplate.from_template("{output}")
|
||||
),
|
||||
)
|
||||
# Define the overall prompt.
|
||||
final_prompt = (
|
||||
SystemMessagePromptTemplate.from_template("You are a helpful AI Assistant")
|
||||
+ few_shot_prompt
|
||||
+ HumanMessagePromptTemplate.from_template("{input}")
|
||||
)
|
||||
# Show the prompt
|
||||
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
|
||||
|
||||
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
||||
# Which variable(s) will be passed to the example selector.
|
||||
input_variables=["input"],
|
||||
example_selector=example_selector,
|
||||
# Define how each example will be formatted.
|
||||
# In this case, each example will become 2 messages:
|
||||
# 1 human, and 1 AI
|
||||
example_prompt=(
|
||||
HumanMessagePromptTemplate.from_template("{input}")
|
||||
+ AIMessagePromptTemplate.from_template("{output}")
|
||||
),
|
||||
)
|
||||
# Define the overall prompt.
|
||||
final_prompt = (
|
||||
SystemMessagePromptTemplate.from_template(
|
||||
"You are a helpful AI Assistant"
|
||||
)
|
||||
+ few_shot_prompt
|
||||
+ HumanMessagePromptTemplate.from_template("{input}")
|
||||
)
|
||||
# Show the prompt
|
||||
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
|
||||
|
||||
# Use within an LLM
|
||||
from langchain_core.chat_models import ChatAnthropic
|
||||
|
||||
chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307")
|
||||
chain.invoke({"input": "What's 3+3?"})
|
||||
# Use within an LLM
|
||||
from langchain_core.chat_models import ChatAnthropic
|
||||
|
||||
chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307")
|
||||
chain.invoke({"input": "What's 3+3?"})
|
||||
```
|
||||
"""
|
||||
|
||||
input_variables: list[str] = Field(default_factory=list)
|
||||
|
||||
@@ -49,7 +49,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "few_shot_with_templates"]``
|
||||
`["langchain", "prompts", "few_shot_with_templates"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "few_shot_with_templates"]
|
||||
|
||||
@@ -116,17 +116,15 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
prompt.format(variable1="foo")
|
||||
|
||||
```python
|
||||
prompt.format(variable1="foo")
|
||||
```
|
||||
"""
|
||||
kwargs = self._merge_partial_and_user_variables(**kwargs)
|
||||
# Get the examples to use.
|
||||
@@ -165,7 +163,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
@@ -26,8 +26,8 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Create an image prompt template.
|
||||
|
||||
Raises:
|
||||
ValueError: If the input variables contain ``'url'``, ``'path'``, or
|
||||
``'detail'``.
|
||||
ValueError: If the input variables contain `'url'`, `'path'`, or
|
||||
`'detail'`.
|
||||
"""
|
||||
if "input_variables" not in kwargs:
|
||||
kwargs["input_variables"] = []
|
||||
@@ -52,7 +52,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "image"]``
|
||||
`["langchain", "prompts", "image"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "image"]
|
||||
|
||||
@@ -60,7 +60,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -71,7 +71,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -85,7 +85,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -93,14 +93,12 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
Raises:
|
||||
ValueError: If the url is not provided.
|
||||
ValueError: If the url is not a string.
|
||||
ValueError: If ``'path'`` is provided in the template or kwargs.
|
||||
ValueError: If `'path'` is provided in the template or kwargs.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
prompt.format(variable1="foo")
|
||||
|
||||
```python
|
||||
prompt.format(variable1="foo")
|
||||
```
|
||||
"""
|
||||
formatted = {}
|
||||
for k, v in self.template.items():
|
||||
@@ -134,7 +132,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
|
||||
@@ -139,7 +139,7 @@ def load_prompt(path: str | Path, encoding: str | None = None) -> BasePromptTemp
|
||||
|
||||
Args:
|
||||
path: Path to the prompt file.
|
||||
encoding: Encoding of the file. Defaults to `None`.
|
||||
encoding: Encoding of the file.
|
||||
|
||||
Returns:
|
||||
A PromptTemplate object.
|
||||
|
||||
@@ -26,7 +26,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "chat"]``
|
||||
`["langchain", "prompts", "chat"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
|
||||
@@ -44,18 +44,16 @@ class PromptTemplate(StringPromptTemplate):
|
||||
from untrusted sources.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
# Instantiation using from_template (recommended)
|
||||
prompt = PromptTemplate.from_template("Say {foo}")
|
||||
prompt.format(foo="bar")
|
||||
|
||||
# Instantiation using initializer
|
||||
prompt = PromptTemplate(template="Say {foo}")
|
||||
# Instantiation using from_template (recommended)
|
||||
prompt = PromptTemplate.from_template("Say {foo}")
|
||||
prompt.format(foo="bar")
|
||||
|
||||
# Instantiation using initializer
|
||||
prompt = PromptTemplate(template="Say {foo}")
|
||||
```
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -71,7 +69,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "prompt"]``
|
||||
`["langchain", "prompts", "prompt"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "prompt"]
|
||||
|
||||
@@ -144,10 +142,10 @@ class PromptTemplate(StringPromptTemplate):
|
||||
Raises:
|
||||
ValueError: If the template formats are not f-string or if there are
|
||||
conflicting partial variables.
|
||||
NotImplementedError: If the other object is not a ``PromptTemplate`` or str.
|
||||
NotImplementedError: If the other object is not a `PromptTemplate` or str.
|
||||
|
||||
Returns:
|
||||
A new ``PromptTemplate`` that is the combination of the two.
|
||||
A new `PromptTemplate` that is the combination of the two.
|
||||
"""
|
||||
# Allow for easy combining
|
||||
if isinstance(other, PromptTemplate):
|
||||
@@ -191,7 +189,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -283,8 +281,8 @@ class PromptTemplate(StringPromptTemplate):
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`. Defaults to `None`.
|
||||
kwargs: Any other arguments to pass to the prompt template.
|
||||
`"foo {variable2}"`.
|
||||
**kwargs: Any other arguments to pass to the prompt template.
|
||||
|
||||
Returns:
|
||||
The prompt template loaded from the template.
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from string import Formatter
|
||||
from typing import Any, Literal
|
||||
|
||||
@@ -149,9 +149,7 @@ def mustache_template_vars(
|
||||
Defs = dict[str, "Defs"]
|
||||
|
||||
|
||||
def mustache_schema(
|
||||
template: str,
|
||||
) -> type[BaseModel]:
|
||||
def mustache_schema(template: str) -> type[BaseModel]:
|
||||
"""Get the variables from a mustache template.
|
||||
|
||||
Args:
|
||||
@@ -175,6 +173,11 @@ def mustache_schema(
|
||||
fields[prefix] = False
|
||||
elif type_ in {"variable", "no escape"}:
|
||||
fields[prefix + tuple(key.split("."))] = True
|
||||
|
||||
for fkey, fval in fields.items():
|
||||
fields[fkey] = fval and not any(
|
||||
is_subsequence(fkey, k) for k in fields if k != fkey
|
||||
)
|
||||
defs: Defs = {} # None means leaf node
|
||||
while fields:
|
||||
field, is_leaf = fields.popitem()
|
||||
@@ -276,7 +279,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "prompts", "base"]``
|
||||
`["langchain", "prompts", "base"]`
|
||||
"""
|
||||
return ["langchain", "prompts", "base"]
|
||||
|
||||
@@ -284,7 +287,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -295,7 +298,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Async format the prompt with the inputs.
|
||||
|
||||
Args:
|
||||
kwargs: Any arguments to be passed to the prompt template.
|
||||
**kwargs: Any arguments to be passed to the prompt template.
|
||||
|
||||
Returns:
|
||||
A formatted string.
|
||||
@@ -327,3 +330,12 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
def pretty_print(self) -> None:
|
||||
"""Print a pretty representation of the prompt."""
|
||||
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
|
||||
|
||||
|
||||
def is_subsequence(child: Sequence, parent: Sequence) -> bool:
|
||||
"""Return True if child is subsequence of parent."""
|
||||
if len(child) == 0 or len(parent) == 0:
|
||||
return False
|
||||
if len(parent) < len(child):
|
||||
return False
|
||||
return all(child[i] == parent[i] for i in range(len(child)))
|
||||
|
||||
@@ -65,8 +65,8 @@ class StructuredPrompt(ChatPromptTemplate):
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
For example, if the class is ``langchain.llms.openai.OpenAI``, then the
|
||||
namespace is ``["langchain", "llms", "openai"]``
|
||||
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
||||
namespace is `["langchain", "llms", "openai"]`
|
||||
|
||||
Returns:
|
||||
The namespace of the langchain object.
|
||||
@@ -85,35 +85,34 @@ class StructuredPrompt(ChatPromptTemplate):
|
||||
Examples:
|
||||
Instantiation from a list of message templates:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.prompts import StructuredPrompt
|
||||
```python
|
||||
from langchain_core.prompts import StructuredPrompt
|
||||
|
||||
|
||||
class OutputSchema(BaseModel):
|
||||
name: str
|
||||
value: int
|
||||
class OutputSchema(BaseModel):
|
||||
name: str
|
||||
value: int
|
||||
|
||||
|
||||
template = StructuredPrompt(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
],
|
||||
OutputSchema,
|
||||
)
|
||||
|
||||
template = StructuredPrompt(
|
||||
[
|
||||
("human", "Hello, how are you?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "That's good to hear."),
|
||||
],
|
||||
OutputSchema,
|
||||
)
|
||||
```
|
||||
Args:
|
||||
messages: sequence of message representations.
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}"
|
||||
A message can be represented using the following formats:
|
||||
(1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}"
|
||||
schema: a dictionary representation of function call, or a Pydantic model.
|
||||
kwargs: Any additional kwargs to pass through to
|
||||
``ChatModel.with_structured_output(schema, **kwargs)``.
|
||||
**kwargs: Any additional kwargs to pass through to
|
||||
`ChatModel.with_structured_output(schema, **kwargs)`.
|
||||
|
||||
Returns:
|
||||
a structured prompt template
|
||||
@@ -145,7 +144,7 @@ class StructuredPrompt(ChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
others: The language model to pipe the structured prompt to.
|
||||
name: The name of the pipeline. Defaults to `None`.
|
||||
name: The name of the pipeline.
|
||||
|
||||
Returns:
|
||||
A RunnableSequence object.
|
||||
|
||||
@@ -44,7 +44,7 @@ class BaseRateLimiter(abc.ABC):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -63,7 +63,7 @@ class BaseRateLimiter(abc.ABC):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
|
||||
@@ -90,36 +90,34 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
Current limitations:
|
||||
|
||||
- The rate limiter is not designed to work across different processes. It is
|
||||
an in-memory rate limiter, but it is thread safe.
|
||||
an in-memory rate limiter, but it is thread safe.
|
||||
- The rate limiter only supports time-based rate limiting. It does not take
|
||||
into account the size of the request or any other factors.
|
||||
into account the size of the request or any other factors.
|
||||
|
||||
Example:
|
||||
```python
|
||||
import time
|
||||
|
||||
.. code-block:: python
|
||||
from langchain_core.rate_limiters import InMemoryRateLimiter
|
||||
|
||||
import time
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=0.1, # <-- Can only make a request once every 10 seconds!!
|
||||
check_every_n_seconds=0.1, # Wake up every 100 ms to check whether allowed to make a request,
|
||||
max_bucket_size=10, # Controls the maximum burst size.
|
||||
)
|
||||
|
||||
from langchain_core.rate_limiters import InMemoryRateLimiter
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=0.1, # <-- Can only make a request once every 10 seconds!!
|
||||
check_every_n_seconds=0.1, # Wake up every 100 ms to check whether allowed to make a request,
|
||||
max_bucket_size=10, # Controls the maximum burst size.
|
||||
)
|
||||
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(
|
||||
model_name="claude-3-opus-20240229", rate_limiter=rate_limiter
|
||||
)
|
||||
|
||||
for _ in range(5):
|
||||
tic = time.time()
|
||||
model.invoke("hello")
|
||||
toc = time.time()
|
||||
print(toc - tic)
|
||||
model = ChatAnthropic(
|
||||
model_name="claude-3-opus-20240229", rate_limiter=rate_limiter
|
||||
)
|
||||
|
||||
for _ in range(5):
|
||||
tic = time.time()
|
||||
model.invoke("hello")
|
||||
toc = time.time()
|
||||
print(toc - tic)
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
|
||||
@@ -212,7 +210,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
@@ -236,7 +234,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
|
||||
@@ -3,25 +3,10 @@
|
||||
It is more general than a vector store. A retriever does not need to be able to
|
||||
store documents, only to return (or retrieve) it. Vector stores can be used as
|
||||
the backbone of a retriever, but there are other types of retrievers as well.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseRetriever --> <name>Retriever # Examples: ArxivRetriever, MergerRetriever
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
RetrieverInput, RetrieverOutput, RetrieverLike, RetrieverOutputLike,
|
||||
Document, Serializable, Callbacks,
|
||||
CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from inspect import signature
|
||||
from typing import TYPE_CHECKING, Any
|
||||
@@ -29,8 +14,6 @@ from typing import TYPE_CHECKING, Any
|
||||
from pydantic import ConfigDict
|
||||
from typing_extensions import Self, TypedDict, override
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.callbacks import Callbacks
|
||||
from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.runnables import (
|
||||
@@ -87,48 +70,45 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Example: A retriever that returns the first 5 documents from a list of documents
|
||||
|
||||
.. code-block:: python
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
class SimpleRetriever(BaseRetriever):
|
||||
docs: list[Document]
|
||||
k: int = 5
|
||||
|
||||
class SimpleRetriever(BaseRetriever):
|
||||
docs: list[Document]
|
||||
k: int = 5
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"Return the first k documents from the list of documents\"\"\"
|
||||
return self.docs[:self.k]
|
||||
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"Return the first k documents from the list of documents\"\"\"
|
||||
return self.docs[:self.k]
|
||||
|
||||
async def _aget_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"(Optional) async native implementation.\"\"\"
|
||||
return self.docs[:self.k]
|
||||
async def _aget_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"(Optional) async native implementation.\"\"\"
|
||||
return self.docs[:self.k]
|
||||
```
|
||||
|
||||
Example: A simple retriever based on a scikit-learn vectorizer
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
```python
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
|
||||
|
||||
class TFIDFRetriever(BaseRetriever, BaseModel):
|
||||
vectorizer: Any
|
||||
docs: list[Document]
|
||||
tfidf_array: Any
|
||||
k: int = 4
|
||||
class TFIDFRetriever(BaseRetriever, BaseModel):
|
||||
vectorizer: Any
|
||||
docs: list[Document]
|
||||
tfidf_array: Any
|
||||
k: int = 4
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
# Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
|
||||
query_vec = self.vectorizer.transform([query])
|
||||
# Op -- (n_docs,1) -- Cosine Sim with each doc
|
||||
results = cosine_similarity(self.tfidf_array, query_vec).reshape(
|
||||
(-1,)
|
||||
)
|
||||
return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
# Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
|
||||
query_vec = self.vectorizer.transform([query])
|
||||
# Op -- (n_docs,1) -- Cosine Sim with each doc
|
||||
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
|
||||
return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
|
||||
```
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
@@ -138,14 +118,14 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
_new_arg_supported: bool = False
|
||||
_expects_other_args: bool = False
|
||||
tags: list[str] | None = None
|
||||
"""Optional list of tags associated with the retriever. Defaults to `None`.
|
||||
"""Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
use case.
|
||||
"""
|
||||
metadata: dict[str, Any] | None = None
|
||||
"""Optional metadata associated with the retriever. Defaults to `None`.
|
||||
"""Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
@@ -155,35 +135,6 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
@override
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
# Version upgrade for old retrievers that implemented the public
|
||||
# methods directly.
|
||||
if cls.get_relevant_documents != BaseRetriever.get_relevant_documents:
|
||||
warnings.warn(
|
||||
"Retrievers must implement abstract `_get_relevant_documents` method"
|
||||
" instead of `get_relevant_documents`",
|
||||
DeprecationWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
swap = cls.get_relevant_documents
|
||||
cls.get_relevant_documents = ( # type: ignore[method-assign]
|
||||
BaseRetriever.get_relevant_documents
|
||||
)
|
||||
cls._get_relevant_documents = swap # type: ignore[method-assign]
|
||||
if (
|
||||
hasattr(cls, "aget_relevant_documents")
|
||||
and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents
|
||||
):
|
||||
warnings.warn(
|
||||
"Retrievers must implement abstract `_aget_relevant_documents` method"
|
||||
" instead of `aget_relevant_documents`",
|
||||
DeprecationWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
aswap = cls.aget_relevant_documents
|
||||
cls.aget_relevant_documents = ( # type: ignore[method-assign]
|
||||
BaseRetriever.aget_relevant_documents
|
||||
)
|
||||
cls._aget_relevant_documents = aswap # type: ignore[method-assign]
|
||||
parameters = signature(cls._get_relevant_documents).parameters
|
||||
cls._new_arg_supported = parameters.get("run_manager") is not None
|
||||
if (
|
||||
@@ -224,18 +175,16 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Args:
|
||||
input: The query string.
|
||||
config: Configuration for the retriever. Defaults to `None`.
|
||||
kwargs: Additional arguments to pass to the retriever.
|
||||
config: Configuration for the retriever.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
retriever.invoke("query")
|
||||
|
||||
```python
|
||||
retriever.invoke("query")
|
||||
```
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
inheritable_metadata = {
|
||||
@@ -287,18 +236,16 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Args:
|
||||
input: The query string.
|
||||
config: Configuration for the retriever. Defaults to `None`.
|
||||
kwargs: Additional arguments to pass to the retriever.
|
||||
config: Configuration for the retriever.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
await retriever.ainvoke("query")
|
||||
|
||||
```python
|
||||
await retriever.ainvoke("query")
|
||||
```
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
inheritable_metadata = {
|
||||
@@ -369,91 +316,3 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
query,
|
||||
run_manager=run_manager.get_sync(),
|
||||
)
|
||||
|
||||
@deprecated(since="0.1.46", alternative="invoke", removal="1.0")
|
||||
def get_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
callbacks: Callbacks = None,
|
||||
tags: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
run_name: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> list[Document]:
|
||||
"""Retrieve documents relevant to a query.
|
||||
|
||||
Users should favor using `.invoke` or `.batch` rather than
|
||||
`get_relevant_documents directly`.
|
||||
|
||||
Args:
|
||||
query: string to find relevant documents for.
|
||||
callbacks: Callback manager or list of callbacks. Defaults to `None`.
|
||||
tags: Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
metadata: Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
run_name: Optional name for the run. Defaults to `None`.
|
||||
kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
"""
|
||||
config: RunnableConfig = {}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
if tags:
|
||||
config["tags"] = tags
|
||||
if metadata:
|
||||
config["metadata"] = metadata
|
||||
if run_name:
|
||||
config["run_name"] = run_name
|
||||
return self.invoke(query, config, **kwargs)
|
||||
|
||||
@deprecated(since="0.1.46", alternative="ainvoke", removal="1.0")
|
||||
async def aget_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
callbacks: Callbacks = None,
|
||||
tags: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
run_name: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> list[Document]:
|
||||
"""Asynchronously get documents relevant to a query.
|
||||
|
||||
Users should favor using `.ainvoke` or `.abatch` rather than
|
||||
`aget_relevant_documents directly`.
|
||||
|
||||
Args:
|
||||
query: string to find relevant documents for.
|
||||
callbacks: Callback manager or list of callbacks.
|
||||
tags: Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
metadata: Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
run_name: Optional name for the run. Defaults to `None`.
|
||||
kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
"""
|
||||
config: RunnableConfig = {}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
if tags:
|
||||
config["tags"] = tags
|
||||
if metadata:
|
||||
config["metadata"] = metadata
|
||||
if run_name:
|
||||
config["run_name"] = run_name
|
||||
return await self.ainvoke(query, config, **kwargs)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -149,7 +149,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "runnable"]``
|
||||
`["langchain", "schema", "runnable"]`
|
||||
"""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@@ -191,8 +191,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Returns:
|
||||
The output of the branch that was run.
|
||||
@@ -301,8 +301,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Yields:
|
||||
The output of the branch that was run.
|
||||
@@ -385,8 +385,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Yields:
|
||||
The output of the branch that was run.
|
||||
|
||||
@@ -131,7 +131,7 @@ def _set_config_context(
|
||||
"""Set the child Runnable config + tracing context.
|
||||
|
||||
Args:
|
||||
config (RunnableConfig): The config to set.
|
||||
config: The config to set.
|
||||
|
||||
Returns:
|
||||
The token to reset the config and the previous tracing context.
|
||||
@@ -165,7 +165,7 @@ def set_config_context(config: RunnableConfig) -> Generator[Context, None, None]
|
||||
"""Set the child Runnable config + tracing context.
|
||||
|
||||
Args:
|
||||
config (RunnableConfig): The config to set.
|
||||
config: The config to set.
|
||||
|
||||
Yields:
|
||||
The config context.
|
||||
@@ -193,10 +193,10 @@ def ensure_config(config: RunnableConfig | None = None) -> RunnableConfig:
|
||||
"""Ensure that a config is a dict with all keys present.
|
||||
|
||||
Args:
|
||||
config: The config to ensure. Defaults to `None`.
|
||||
config: The config to ensure.
|
||||
|
||||
Returns:
|
||||
RunnableConfig: The ensured config.
|
||||
The ensured config.
|
||||
"""
|
||||
empty = RunnableConfig(
|
||||
tags=[],
|
||||
@@ -254,7 +254,7 @@ def get_config_list(
|
||||
length: The length of the list.
|
||||
|
||||
Returns:
|
||||
list[RunnableConfig]: The list of configs.
|
||||
The list of configs.
|
||||
|
||||
Raises:
|
||||
ValueError: If the length of the list is not equal to the length of the inputs.
|
||||
@@ -308,7 +308,7 @@ def patch_config(
|
||||
configurable: The configurable to set.
|
||||
|
||||
Returns:
|
||||
RunnableConfig: The patched config.
|
||||
The patched config.
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
if callbacks is not None:
|
||||
@@ -337,7 +337,7 @@ def merge_configs(*configs: RunnableConfig | None) -> RunnableConfig:
|
||||
*configs: The configs to merge.
|
||||
|
||||
Returns:
|
||||
RunnableConfig: The merged config.
|
||||
The merged config.
|
||||
"""
|
||||
base: RunnableConfig = {}
|
||||
# Even though the keys aren't literals, this is correct
|
||||
@@ -412,7 +412,7 @@ def call_func_with_variable_args(
|
||||
func: The function to call.
|
||||
input: The input to the function.
|
||||
config: The config to pass to the function.
|
||||
run_manager: The run manager to pass to the function. Defaults to `None`.
|
||||
run_manager: The run manager to pass to the function.
|
||||
**kwargs: The keyword arguments to pass to the function.
|
||||
|
||||
Returns:
|
||||
@@ -446,7 +446,7 @@ def acall_func_with_variable_args(
|
||||
func: The function to call.
|
||||
input: The input to the function.
|
||||
config: The config to pass to the function.
|
||||
run_manager: The run manager to pass to the function. Defaults to `None`.
|
||||
run_manager: The run manager to pass to the function.
|
||||
**kwargs: The keyword arguments to pass to the function.
|
||||
|
||||
Returns:
|
||||
@@ -466,10 +466,10 @@ def get_callback_manager_for_config(config: RunnableConfig) -> CallbackManager:
|
||||
"""Get a callback manager for a config.
|
||||
|
||||
Args:
|
||||
config (RunnableConfig): The config.
|
||||
config: The config.
|
||||
|
||||
Returns:
|
||||
CallbackManager: The callback manager.
|
||||
The callback manager.
|
||||
"""
|
||||
return CallbackManager.configure(
|
||||
inheritable_callbacks=config.get("callbacks"),
|
||||
@@ -484,10 +484,10 @@ def get_async_callback_manager_for_config(
|
||||
"""Get an async callback manager for a config.
|
||||
|
||||
Args:
|
||||
config (RunnableConfig): The config.
|
||||
config: The config.
|
||||
|
||||
Returns:
|
||||
AsyncCallbackManager: The async callback manager.
|
||||
The async callback manager.
|
||||
"""
|
||||
return AsyncCallbackManager.configure(
|
||||
inheritable_callbacks=config.get("callbacks"),
|
||||
@@ -512,12 +512,12 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
"""Submit a function to the executor.
|
||||
|
||||
Args:
|
||||
func (Callable[..., T]): The function to submit.
|
||||
*args (Any): The positional arguments to the function.
|
||||
**kwargs (Any): The keyword arguments to the function.
|
||||
func: The function to submit.
|
||||
*args: The positional arguments to the function.
|
||||
**kwargs: The keyword arguments to the function.
|
||||
|
||||
Returns:
|
||||
Future[T]: The future for the function.
|
||||
The future for the function.
|
||||
"""
|
||||
return super().submit(
|
||||
cast("Callable[..., T]", partial(copy_context().run, func, *args, **kwargs))
|
||||
@@ -533,14 +533,13 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
"""Map a function to multiple iterables.
|
||||
|
||||
Args:
|
||||
fn (Callable[..., T]): The function to map.
|
||||
*iterables (Iterable[Any]): The iterables to map over.
|
||||
timeout (float | None, optional): The timeout for the map.
|
||||
Defaults to `None`.
|
||||
chunksize (int, optional): The chunksize for the map. Defaults to 1.
|
||||
fn: The function to map.
|
||||
*iterables: The iterables to map over.
|
||||
timeout: The timeout for the map.
|
||||
chunksize: The chunksize for the map. Defaults to 1.
|
||||
|
||||
Returns:
|
||||
Iterator[T]: The iterator for the mapped function.
|
||||
The iterator for the mapped function.
|
||||
"""
|
||||
contexts = [copy_context() for _ in range(len(iterables[0]))] # type: ignore[arg-type]
|
||||
|
||||
@@ -562,10 +561,10 @@ def get_executor_for_config(
|
||||
"""Get an executor for a config.
|
||||
|
||||
Args:
|
||||
config (RunnableConfig): The config.
|
||||
config: The config.
|
||||
|
||||
Yields:
|
||||
Generator[Executor, None, None]: The executor.
|
||||
The executor.
|
||||
"""
|
||||
config = config or {}
|
||||
with ContextThreadPoolExecutor(
|
||||
@@ -584,12 +583,12 @@ async def run_in_executor(
|
||||
|
||||
Args:
|
||||
executor_or_config: The executor or config to run in.
|
||||
func (Callable[P, Output]): The function.
|
||||
*args (Any): The positional arguments to the function.
|
||||
**kwargs (Any): The keyword arguments to the function.
|
||||
func: The function.
|
||||
*args: The positional arguments to the function.
|
||||
**kwargs: The keyword arguments to the function.
|
||||
|
||||
Returns:
|
||||
Output: The output of the function.
|
||||
The output of the function.
|
||||
"""
|
||||
|
||||
def wrapper() -> T:
|
||||
|
||||
@@ -75,7 +75,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "runnable"]``
|
||||
`["langchain", "schema", "runnable"]`
|
||||
"""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@@ -123,11 +123,10 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
||||
"""Prepare the Runnable for invocation.
|
||||
|
||||
Args:
|
||||
config: The configuration to use. Defaults to `None`.
|
||||
config: The configuration to use.
|
||||
|
||||
Returns:
|
||||
tuple[Runnable[Input, Output], RunnableConfig]: The prepared Runnable and
|
||||
configuration.
|
||||
The prepared Runnable and configuration.
|
||||
"""
|
||||
runnable: Runnable[Input, Output] = self
|
||||
while isinstance(runnable, DynamicRunnable):
|
||||
@@ -384,7 +383,7 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
|
||||
"""Get the configuration specs for the RunnableConfigurableFields.
|
||||
|
||||
Returns:
|
||||
list[ConfigurableFieldSpec]: The configuration specs.
|
||||
The configuration specs.
|
||||
"""
|
||||
config_specs = []
|
||||
|
||||
@@ -656,7 +655,7 @@ def prefix_config_spec(
|
||||
prefix: The prefix to add.
|
||||
|
||||
Returns:
|
||||
ConfigurableFieldSpec: The prefixed ConfigurableFieldSpec.
|
||||
The prefixed ConfigurableFieldSpec.
|
||||
"""
|
||||
return (
|
||||
ConfigurableFieldSpec(
|
||||
|
||||
@@ -47,8 +47,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
of a chain of Runnables. Fallbacks are tried in order until one succeeds or
|
||||
all fail.
|
||||
|
||||
While you can instantiate a ``RunnableWithFallbacks`` directly, it is usually
|
||||
more convenient to use the ``with_fallbacks`` method on a Runnable.
|
||||
While you can instantiate a `RunnableWithFallbacks` directly, it is usually
|
||||
more convenient to use the `with_fallbacks` method on a Runnable.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -146,7 +146,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
"""Get the namespace of the langchain object.
|
||||
|
||||
Returns:
|
||||
``["langchain", "schema", "runnable"]``
|
||||
`["langchain", "schema", "runnable"]`
|
||||
"""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@@ -594,7 +594,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
Returns:
|
||||
If the attribute is anything other than a method that outputs a Runnable,
|
||||
returns getattr(self.runnable, name). If the attribute is a method that
|
||||
does return a new Runnable (e.g. llm.bind_tools([...]) outputs a new
|
||||
does return a new Runnable (e.g. model.bind_tools([...]) outputs a new
|
||||
RunnableBinding) then self.runnable and each of the runnables in
|
||||
self.fallbacks is replaced with getattr(x, name).
|
||||
|
||||
@@ -605,15 +605,15 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
|
||||
gpt_4o = ChatOpenAI(model="gpt-4o")
|
||||
claude_3_sonnet = ChatAnthropic(model="claude-3-7-sonnet-20250219")
|
||||
llm = gpt_4o.with_fallbacks([claude_3_sonnet])
|
||||
model = gpt_4o.with_fallbacks([claude_3_sonnet])
|
||||
|
||||
llm.model_name
|
||||
model.model_name
|
||||
# -> "gpt-4o"
|
||||
|
||||
# .bind_tools() is called on both ChatOpenAI and ChatAnthropic
|
||||
# Equivalent to:
|
||||
# gpt_4o.bind_tools([...]).with_fallbacks([claude_3_sonnet.bind_tools([...])])
|
||||
llm.bind_tools([...])
|
||||
model.bind_tools([...])
|
||||
# -> RunnableWithFallbacks(
|
||||
runnable=RunnableBinding(bound=ChatOpenAI(...), kwargs={"tools": [...]}),
|
||||
fallbacks=[RunnableBinding(bound=ChatAnthropic(...), kwargs={"tools": [...]})],
|
||||
|
||||
@@ -52,7 +52,7 @@ def is_uuid(value: str) -> bool:
|
||||
value: The string to check.
|
||||
|
||||
Returns:
|
||||
True if the string is a valid UUID, False otherwise.
|
||||
`True` if the string is a valid UUID, `False` otherwise.
|
||||
"""
|
||||
try:
|
||||
UUID(value)
|
||||
@@ -69,7 +69,7 @@ class Edge(NamedTuple):
|
||||
target: str
|
||||
"""The target node id."""
|
||||
data: Stringifiable | None = None
|
||||
"""Optional data associated with the edge. Defaults to `None`."""
|
||||
"""Optional data associated with the edge. """
|
||||
conditional: bool = False
|
||||
"""Whether the edge is conditional. Defaults to `False`."""
|
||||
|
||||
@@ -77,8 +77,8 @@ class Edge(NamedTuple):
|
||||
"""Return a copy of the edge with optional new source and target nodes.
|
||||
|
||||
Args:
|
||||
source: The new source node id. Defaults to `None`.
|
||||
target: The new target node id. Defaults to `None`.
|
||||
source: The new source node id.
|
||||
target: The new target node id.
|
||||
|
||||
Returns:
|
||||
A copy of the edge with the new source and target nodes.
|
||||
@@ -101,7 +101,7 @@ class Node(NamedTuple):
|
||||
data: type[BaseModel] | RunnableType | None
|
||||
"""The data of the node."""
|
||||
metadata: dict[str, Any] | None
|
||||
"""Optional metadata for the node. Defaults to `None`."""
|
||||
"""Optional metadata for the node. """
|
||||
|
||||
def copy(
|
||||
self,
|
||||
@@ -112,8 +112,8 @@ class Node(NamedTuple):
|
||||
"""Return a copy of the node with optional new id and name.
|
||||
|
||||
Args:
|
||||
id: The new node id. Defaults to `None`.
|
||||
name: The new node name. Defaults to `None`.
|
||||
id: The new node id.
|
||||
name: The new node name.
|
||||
|
||||
Returns:
|
||||
A copy of the node with the new id and name.
|
||||
@@ -132,7 +132,7 @@ class Branch(NamedTuple):
|
||||
condition: Callable[..., str]
|
||||
"""A callable that returns a string representation of the condition."""
|
||||
ends: dict[str, str] | None
|
||||
"""Optional dictionary of end node ids for the branches. Defaults to `None`."""
|
||||
"""Optional dictionary of end node ids for the branches. """
|
||||
|
||||
|
||||
class CurveStyle(Enum):
|
||||
@@ -321,8 +321,8 @@ class Graph:
|
||||
|
||||
Args:
|
||||
data: The data of the node.
|
||||
id: The id of the node. Defaults to `None`.
|
||||
metadata: Optional metadata for the node. Defaults to `None`.
|
||||
id: The id of the node.
|
||||
metadata: Optional metadata for the node.
|
||||
|
||||
Returns:
|
||||
The node that was added to the graph.
|
||||
@@ -361,7 +361,7 @@ class Graph:
|
||||
Args:
|
||||
source: The source node of the edge.
|
||||
target: The target node of the edge.
|
||||
data: Optional data associated with the edge. Defaults to `None`.
|
||||
data: Optional data associated with the edge.
|
||||
conditional: Whether the edge is conditional. Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
@@ -549,8 +549,8 @@ class Graph:
|
||||
|
||||
Args:
|
||||
output_file_path: The path to save the image to. If `None`, the image
|
||||
is not saved. Defaults to `None`.
|
||||
fontname: The name of the font to use. Defaults to `None`.
|
||||
is not saved.
|
||||
fontname: The name of the font to use.
|
||||
labels: Optional labels for nodes and edges in the graph. Defaults to
|
||||
`None`.
|
||||
|
||||
@@ -590,9 +590,9 @@ class Graph:
|
||||
node_colors: The colors of the nodes. Defaults to NodeStyles().
|
||||
wrap_label_n_words: The number of words to wrap the node labels at.
|
||||
Defaults to 9.
|
||||
frontmatter_config (dict[str, Any], optional): Mermaid frontmatter config.
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph. Defaults to `None`.
|
||||
added to the beginning of the mermaid graph.
|
||||
|
||||
See more here: https://mermaid.js.org/config/configuration.html.
|
||||
|
||||
@@ -652,7 +652,7 @@ class Graph:
|
||||
wrap_label_n_words: The number of words to wrap the node labels at.
|
||||
Defaults to 9.
|
||||
output_file_path: The path to save the image to. If `None`, the image
|
||||
is not saved. Defaults to `None`.
|
||||
is not saved.
|
||||
draw_method: The method to use to draw the graph.
|
||||
Defaults to MermaidDrawMethod.API.
|
||||
background_color: The color of the background. Defaults to "white".
|
||||
@@ -661,9 +661,9 @@ class Graph:
|
||||
Defaults to 1.
|
||||
retry_delay: The delay between retries (MermaidDrawMethod.API).
|
||||
Defaults to 1.0.
|
||||
frontmatter_config (dict[str, Any], optional): Mermaid frontmatter config.
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph. Defaults to `None`.
|
||||
added to the beginning of the mermaid graph.
|
||||
|
||||
See more here: https://mermaid.js.org/config/configuration.html.
|
||||
|
||||
@@ -679,7 +679,7 @@ class Graph:
|
||||
}
|
||||
```
|
||||
base_url: The base URL of the Mermaid server for rendering via API.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
The PNG image as bytes.
|
||||
|
||||
@@ -62,8 +62,8 @@ class AsciiCanvas:
|
||||
"""Create an ASCII canvas.
|
||||
|
||||
Args:
|
||||
cols: number of columns in the canvas. Should be ``> 1``.
|
||||
lines: number of lines in the canvas. Should be ``> 1``.
|
||||
cols: number of columns in the canvas. Should be `> 1`.
|
||||
lines: number of lines in the canvas. Should be `> 1`.
|
||||
|
||||
Raises:
|
||||
ValueError: if canvas dimensions are invalid.
|
||||
@@ -90,9 +90,9 @@ class AsciiCanvas:
|
||||
"""Create a point on ASCII canvas.
|
||||
|
||||
Args:
|
||||
x: x coordinate. Should be ``>= 0`` and ``<`` number of columns in
|
||||
x: x coordinate. Should be `>= 0` and `<` number of columns in
|
||||
the canvas.
|
||||
y: y coordinate. Should be ``>= 0`` an ``<`` number of lines in the
|
||||
y: y coordinate. Should be `>= 0` an `<` number of lines in the
|
||||
canvas.
|
||||
char: character to place in the specified point on the
|
||||
canvas.
|
||||
@@ -117,11 +117,11 @@ class AsciiCanvas:
|
||||
"""Create a line on ASCII canvas.
|
||||
|
||||
Args:
|
||||
x0 (int): x coordinate where the line should start.
|
||||
y0 (int): y coordinate where the line should start.
|
||||
x1 (int): x coordinate where the line should end.
|
||||
y1 (int): y coordinate where the line should end.
|
||||
char (str): character to draw the line with.
|
||||
x0: x coordinate where the line should start.
|
||||
y0: y coordinate where the line should start.
|
||||
x1: x coordinate where the line should end.
|
||||
y1: y coordinate where the line should end.
|
||||
char: character to draw the line with.
|
||||
"""
|
||||
if x0 > x1:
|
||||
x1, x0 = x0, x1
|
||||
@@ -149,9 +149,9 @@ class AsciiCanvas:
|
||||
"""Print a text on ASCII canvas.
|
||||
|
||||
Args:
|
||||
x (int): x coordinate where the text should start.
|
||||
y (int): y coordinate where the text should start.
|
||||
text (str): string that should be printed.
|
||||
x: x coordinate where the text should start.
|
||||
y: y coordinate where the text should start.
|
||||
text: string that should be printed.
|
||||
"""
|
||||
for i, char in enumerate(text):
|
||||
self.point(x + i, y, char)
|
||||
@@ -160,10 +160,10 @@ class AsciiCanvas:
|
||||
"""Create a box on ASCII canvas.
|
||||
|
||||
Args:
|
||||
x0 (int): x coordinate of the box corner.
|
||||
y0 (int): y coordinate of the box corner.
|
||||
width (int): box width.
|
||||
height (int): box height.
|
||||
x0: x coordinate of the box corner.
|
||||
y0: y coordinate of the box corner.
|
||||
width: box width.
|
||||
height: box height.
|
||||
"""
|
||||
if width <= 1 or height <= 1:
|
||||
msg = "Box dimensions should be > 1"
|
||||
@@ -267,7 +267,8 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:
|
||||
|
||||
print(draw_ascii(vertices, edges))
|
||||
```
|
||||
.. code-block::
|
||||
|
||||
```txt
|
||||
|
||||
+---+
|
||||
| 1 |
|
||||
@@ -284,7 +285,7 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:
|
||||
+---+ +---+
|
||||
| 3 | | 4 |
|
||||
+---+ +---+
|
||||
|
||||
```
|
||||
"""
|
||||
# NOTE: coordinates might me negative, so we need to shift
|
||||
# everything to the positive plane before we actually draw it.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user