mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-11 19:49:54 +00:00
Compare commits
45 Commits
nh/subagen
...
nh/danglin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
526cbb36e6 | ||
|
|
68ceeb64f6 | ||
|
|
edae976b81 | ||
|
|
9f4366bc9d | ||
|
|
99e0a60aab | ||
|
|
d38729fbac | ||
|
|
ff0d21cfd5 | ||
|
|
9140a7cb86 | ||
|
|
41fe18bc80 | ||
|
|
9105573cb3 | ||
|
|
fff87e95d1 | ||
|
|
9beb29a34c | ||
|
|
ca00f5aed9 | ||
|
|
637777b8e7 | ||
|
|
1cf851e054 | ||
|
|
961f965f0c | ||
|
|
760fc3bc12 | ||
|
|
e3fc7d8aa6 | ||
|
|
2b3b209e40 | ||
|
|
78903ac285 | ||
|
|
f361acc11c | ||
|
|
ed185c0026 | ||
|
|
6dc34beb71 | ||
|
|
c2205f88e6 | ||
|
|
abdbe185c5 | ||
|
|
c1b816cb7e | ||
|
|
0559558715 | ||
|
|
75965474fc | ||
|
|
5dc014fdf4 | ||
|
|
291a9fcea1 | ||
|
|
dd994b9d7f | ||
|
|
83901b30e3 | ||
|
|
bcfa21a6e7 | ||
|
|
af1da28459 | ||
|
|
ed2ee4e8cc | ||
|
|
f293c8ffd6 | ||
|
|
714c370191 | ||
|
|
a29d4e9c3a | ||
|
|
74983f8a96 | ||
|
|
11c5b86981 | ||
|
|
383f4c0ee9 | ||
|
|
045e7ad4a1 | ||
|
|
0e80291804 | ||
|
|
c99773b652 | ||
|
|
5f9e3e33cd |
2
.github/workflows/check_diffs.yml
vendored
2
.github/workflows/check_diffs.yml
vendored
@@ -186,7 +186,7 @@ jobs:
|
||||
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: "📦 Install UV Package Manager"
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
|
||||
23
.github/workflows/integration_tests.yml
vendored
23
.github/workflows/integration_tests.yml
vendored
@@ -23,10 +23,8 @@ permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
|
||||
POETRY_LIBS: ("libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
# Generate dynamic test matrix based on input parameters or defaults
|
||||
@@ -60,7 +58,6 @@ jobs:
|
||||
echo $matrix
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
# Run integration tests against partner libraries with live API credentials
|
||||
# Tests are run with Poetry or UV depending on the library's setup
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
name: "🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}"
|
||||
@@ -95,17 +92,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + Poetry"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + UV"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -123,15 +110,7 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: "📦 Install Dependencies (Poetry)"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: "📦 Install Dependencies (UV)"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
- name: "📦 Install Dependencies"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
|
||||
@@ -152,11 +152,11 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
|
||||
@@ -152,11 +152,11 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
priority: Email priority level (`'low'`, `'normal'`, `'high'`).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
`InvalidEmailError`: If the email address format is invalid.
|
||||
`SMTPConnectionError`: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
|
||||
21
README.md
21
README.md
@@ -34,14 +34,14 @@
|
||||
LangChain is a framework for building LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves.
|
||||
|
||||
```bash
|
||||
pip install -U langchain
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/).
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/oss/python/langchain/overview).
|
||||
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building controllable agent workflows.
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our framework for building controllable agent workflows.
|
||||
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
@@ -62,16 +62,13 @@ While the LangChain framework can be used standalone, it also integrates seamles
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Conceptual Guides](https://docs.langchain.com/oss/python/langchain/overview): Explanations of key
|
||||
concepts behind the LangChain framework.
|
||||
- [Tutorials](https://docs.langchain.com/oss/python/learn): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [API Reference](https://reference.langchain.com/python/): Detailed reference on
|
||||
- [Learn](https://docs.langchain.com/oss/python/learn): Use cases, conceptual overviews, and more.
|
||||
- [API Reference](https://reference.langchain.com/python): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.
|
||||
- [LangChain Forum](https://forum.langchain.com): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com): Ask questions & chat with our documentation.
|
||||
|
||||
@@ -19,8 +19,8 @@ And you should configure credentials by setting the following environment variab
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__()
|
||||
llm.invoke("Sing a ballad of LangChain.")
|
||||
model = Chat__ModuleName__()
|
||||
model.invoke("Sing a ballad of LangChain.")
|
||||
```
|
||||
|
||||
## Embeddings
|
||||
@@ -41,6 +41,6 @@ embeddings.embed_query("What is the meaning of life?")
|
||||
```python
|
||||
from __module_name__ import __ModuleName__LLM
|
||||
|
||||
llm = __ModuleName__LLM()
|
||||
llm.invoke("The meaning of life is")
|
||||
model = __ModuleName__LLM()
|
||||
model.invoke("The meaning of life is")
|
||||
```
|
||||
|
||||
@@ -72,7 +72,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -126,7 +128,7 @@
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"llm = Chat__ModuleName__(\n",
|
||||
"model = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
@@ -162,7 +164,7 @@
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg = model.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
@@ -207,7 +209,7 @@
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
|
||||
@@ -65,7 +65,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -119,7 +121,7 @@
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"llm = __ModuleName__LLM(\n",
|
||||
"model = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
@@ -141,7 +143,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -150,7 +152,7 @@
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = llm.invoke(input_text)\n",
|
||||
"completion = model.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
@@ -177,7 +179,7 @@
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -185,7 +185,7 @@
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": null,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -203,7 +203,7 @@
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -216,7 +216,7 @@
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(llm, tools)"
|
||||
"agent = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -60,7 +60,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
```python
|
||||
from __module_name__ import Chat__ModuleName__
|
||||
|
||||
llm = Chat__ModuleName__(
|
||||
model = Chat__ModuleName__(
|
||||
model="...",
|
||||
temperature=0,
|
||||
max_tokens=None,
|
||||
@@ -77,7 +77,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
("system", "You are a helpful translator. Translate the user sentence to French."),
|
||||
("human", "I love programming."),
|
||||
]
|
||||
llm.invoke(messages)
|
||||
model.invoke(messages)
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -87,7 +87,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# TODO: Delete if token-level streaming isn't supported.
|
||||
Stream:
|
||||
```python
|
||||
for chunk in llm.stream(messages):
|
||||
for chunk in model.stream(messages):
|
||||
print(chunk.text, end="")
|
||||
```
|
||||
|
||||
@@ -96,7 +96,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
```
|
||||
|
||||
```python
|
||||
stream = llm.stream(messages)
|
||||
stream = model.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
@@ -110,13 +110,13 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# TODO: Delete if native async isn't supported.
|
||||
Async:
|
||||
```python
|
||||
await llm.ainvoke(messages)
|
||||
await model.ainvoke(messages)
|
||||
|
||||
# stream:
|
||||
# async for chunk in (await llm.astream(messages))
|
||||
# async for chunk in (await model.astream(messages))
|
||||
|
||||
# batch:
|
||||
# await llm.abatch([messages])
|
||||
# await model.abatch([messages])
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -137,8 +137,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
|
||||
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
|
||||
@@ -162,8 +162,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
|
||||
|
||||
structured_llm = llm.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats")
|
||||
structured_model = model.with_structured_output(Joke)
|
||||
structured_model.invoke("Tell me a joke about cats")
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -176,8 +176,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
JSON mode:
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
json_llm = llm.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_llm.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
json_model = model.bind(response_format={"type": "json_object"})
|
||||
ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
@@ -204,7 +204,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
},
|
||||
],
|
||||
)
|
||||
ai_msg = llm.invoke([message])
|
||||
ai_msg = model.invoke([message])
|
||||
ai_msg.content
|
||||
```
|
||||
|
||||
@@ -235,7 +235,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
# TODO: Delete if token usage metadata isn't supported.
|
||||
Token usage:
|
||||
```python
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.usage_metadata
|
||||
```
|
||||
|
||||
@@ -247,8 +247,8 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
Logprobs:
|
||||
```python
|
||||
# TODO: Replace with appropriate bind arg.
|
||||
logprobs_llm = llm.bind(logprobs=True)
|
||||
ai_msg = logprobs_llm.invoke(messages)
|
||||
logprobs_model = model.bind(logprobs=True)
|
||||
ai_msg = logprobs_model.invoke(messages)
|
||||
ai_msg.response_metadata["logprobs"]
|
||||
```
|
||||
|
||||
@@ -257,7 +257,7 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
```
|
||||
Response metadata
|
||||
```python
|
||||
ai_msg = llm.invoke(messages)
|
||||
ai_msg = model.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
```
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
Question: {question}\"\"\"
|
||||
)
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
def format_docs(docs):
|
||||
return "\\n\\n".join(doc.page_content for doc in docs)
|
||||
@@ -73,7 +73,7 @@ class __ModuleName__Retriever(BaseRetriever):
|
||||
chain = (
|
||||
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| llm
|
||||
| model
|
||||
| StrOutputParser()
|
||||
)
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ def is_subclass(class_obj: type, classes_: list[type]) -> bool:
|
||||
classes_: A list of classes to check against.
|
||||
|
||||
Returns:
|
||||
True if `class_obj` is a subclass of any class in `classes_`, False otherwise.
|
||||
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
|
||||
"""
|
||||
return any(
|
||||
issubclass(class_obj, kls)
|
||||
|
||||
@@ -47,7 +47,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
filename: The file path to write to.
|
||||
mode: The file open mode. Defaults to `'a'` (append).
|
||||
color: Default color for text output. Defaults to `None`.
|
||||
color: Default color for text output.
|
||||
|
||||
!!! note
|
||||
When not used as a context manager, a deprecation warning will be issued
|
||||
@@ -64,7 +64,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
filename: Path to the output file.
|
||||
mode: File open mode (e.g., `'w'`, `'a'`, `'x'`). Defaults to `'a'`.
|
||||
color: Default text color for output. Defaults to `None`.
|
||||
color: Default text color for output.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
|
||||
@@ -79,13 +79,13 @@ def trace_as_chain_group(
|
||||
|
||||
Args:
|
||||
group_name: The name of the chain group.
|
||||
callback_manager: The callback manager to use. Defaults to `None`.
|
||||
inputs: The inputs to the chain group. Defaults to `None`.
|
||||
project_name: The name of the project. Defaults to `None`.
|
||||
example_id: The ID of the example. Defaults to `None`.
|
||||
callback_manager: The callback manager to use.
|
||||
inputs: The inputs to the chain group.
|
||||
project_name: The name of the project.
|
||||
example_id: The ID of the example.
|
||||
run_id: The ID of the run.
|
||||
tags: The inheritable tags to apply to all runs. Defaults to `None`.
|
||||
metadata: The metadata to apply to all runs. Defaults to `None`.
|
||||
tags: The inheritable tags to apply to all runs.
|
||||
metadata: The metadata to apply to all runs.
|
||||
|
||||
!!! note
|
||||
Must have `LANGCHAIN_TRACING_V2` env var set to true to see the trace in
|
||||
@@ -155,13 +155,13 @@ async def atrace_as_chain_group(
|
||||
Args:
|
||||
group_name: The name of the chain group.
|
||||
callback_manager: The async callback manager to use,
|
||||
which manages tracing and other callback behavior. Defaults to `None`.
|
||||
inputs: The inputs to the chain group. Defaults to `None`.
|
||||
project_name: The name of the project. Defaults to `None`.
|
||||
example_id: The ID of the example. Defaults to `None`.
|
||||
which manages tracing and other callback behavior.
|
||||
inputs: The inputs to the chain group.
|
||||
project_name: The name of the project.
|
||||
example_id: The ID of the example.
|
||||
run_id: The ID of the run.
|
||||
tags: The inheritable tags to apply to all runs. Defaults to `None`.
|
||||
metadata: The metadata to apply to all runs. Defaults to `None`.
|
||||
tags: The inheritable tags to apply to all runs.
|
||||
metadata: The metadata to apply to all runs.
|
||||
|
||||
Yields:
|
||||
The async callback manager for the chain group.
|
||||
@@ -462,11 +462,11 @@ class BaseRunManager(RunManagerMixin):
|
||||
run_id: The ID of the run.
|
||||
handlers: The list of handlers.
|
||||
inheritable_handlers: The list of inheritable handlers.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
tags: The list of tags. Defaults to `None`.
|
||||
inheritable_tags: The list of inheritable tags. Defaults to `None`.
|
||||
metadata: The metadata. Defaults to `None`.
|
||||
inheritable_metadata: The inheritable metadata. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run.
|
||||
tags: The list of tags.
|
||||
inheritable_tags: The list of inheritable tags.
|
||||
metadata: The metadata.
|
||||
inheritable_metadata: The inheritable metadata.
|
||||
|
||||
"""
|
||||
self.run_id = run_id
|
||||
@@ -557,7 +557,7 @@ class ParentRunManager(RunManager):
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag: The tag for the child callback manager. Defaults to `None`.
|
||||
tag: The tag for the child callback manager.
|
||||
|
||||
Returns:
|
||||
The child callback manager.
|
||||
@@ -641,7 +641,7 @@ class AsyncParentRunManager(AsyncRunManager):
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag: The tag for the child callback manager. Defaults to `None`.
|
||||
tag: The tag for the child callback manager.
|
||||
|
||||
Returns:
|
||||
The child callback manager.
|
||||
@@ -1303,7 +1303,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
prompts: The list of prompts.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1354,7 +1354,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
messages: The list of messages.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1408,7 +1408,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1457,8 +1457,8 @@ class CallbackManager(BaseCallbackManager):
|
||||
serialized: Serialized representation of the tool.
|
||||
input_str: The input to the tool as a string.
|
||||
Non-string inputs are cast to strings.
|
||||
run_id: ID for the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: ID for the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
inputs: The original input to the tool if provided.
|
||||
Recommended for usage instead of input_str when the original
|
||||
input is needed.
|
||||
@@ -1512,8 +1512,8 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1562,7 +1562,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
|
||||
Raises:
|
||||
ValueError: If additional keyword arguments are passed.
|
||||
@@ -1782,7 +1782,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
prompts: The list of prompts.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1870,7 +1870,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized LLM.
|
||||
messages: The list of messages.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1941,7 +1941,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs to the chain.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -1988,8 +1988,8 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized tool.
|
||||
input_str: The input to the tool.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -2038,7 +2038,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
|
||||
Raises:
|
||||
ValueError: If additional keyword arguments are passed.
|
||||
@@ -2082,8 +2082,8 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
Args:
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The ID of the run. Defaults to `None`.
|
||||
parent_run_id: The ID of the parent run. Defaults to `None`.
|
||||
run_id: The ID of the run.
|
||||
parent_run_id: The ID of the parent run.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -20,7 +20,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Initialize callback handler.
|
||||
|
||||
Args:
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
"""
|
||||
self.color = color
|
||||
|
||||
@@ -61,7 +61,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
action: The agent action.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(action.log, color=color or self.color)
|
||||
@@ -79,9 +79,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
output: The output to print.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
observation_prefix: The observation prefix. Defaults to `None`.
|
||||
llm_prefix: The LLM prefix. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
observation_prefix: The observation prefix.
|
||||
llm_prefix: The LLM prefix.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
output = str(output)
|
||||
@@ -103,7 +103,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
text: The text to print.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
end: The end character to use. Defaults to "".
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
@@ -117,7 +117,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
finish: The agent finish.
|
||||
color: The color to use for the text. Defaults to `None`.
|
||||
color: The color to use for the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(finish.log, color=color or self.color, end="\n")
|
||||
|
||||
@@ -60,15 +60,15 @@ class LangSmithLoader(BaseLoader):
|
||||
"""Create a LangSmith loader.
|
||||
|
||||
Args:
|
||||
dataset_id: The ID of the dataset to filter by. Defaults to `None`.
|
||||
dataset_name: The name of the dataset to filter by. Defaults to `None`.
|
||||
dataset_id: The ID of the dataset to filter by.
|
||||
dataset_name: The name of the dataset to filter by.
|
||||
content_key: The inputs key to set as Document page content. `'.'` characters
|
||||
are interpreted as nested keys. E.g. `content_key="first.second"` will
|
||||
result in
|
||||
`Document(page_content=format_content(example.inputs["first"]["second"]))`
|
||||
format_content: Function for converting the content extracted from the example
|
||||
inputs into a string. Defaults to JSON-encoding the contents.
|
||||
example_ids: The IDs of the examples to filter by. Defaults to `None`.
|
||||
example_ids: The IDs of the examples to filter by.
|
||||
as_of: The dataset version tag OR
|
||||
timestamp to retrieve the examples as of.
|
||||
Response examples will only be those that were present at the time
|
||||
@@ -79,7 +79,7 @@ class LangSmithLoader(BaseLoader):
|
||||
inline_s3_urls: Whether to inline S3 URLs. Defaults to `True`.
|
||||
offset: The offset to start from. Defaults to 0.
|
||||
limit: The maximum number of examples to return.
|
||||
metadata: Metadata to filter by. Defaults to `None`.
|
||||
metadata: Metadata to filter by.
|
||||
filter: A structured filter string to apply to the examples.
|
||||
client: LangSmith Client. If not provided will be initialized from below args.
|
||||
client_kwargs: Keyword args to pass to LangSmith client init. Should only be
|
||||
|
||||
@@ -33,9 +33,9 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
Args:
|
||||
error: The error that's being re-raised or an error message.
|
||||
observation: String explanation of error which can be passed to a
|
||||
model to try and remediate the issue. Defaults to `None`.
|
||||
model to try and remediate the issue.
|
||||
llm_output: String model output which is error-ing.
|
||||
Defaults to `None`.
|
||||
|
||||
send_to_llm: Whether to send the observation and llm_output back to an Agent
|
||||
after an OutputParserException has been raised.
|
||||
This gives the underlying model driving the agent the context that the
|
||||
|
||||
@@ -278,10 +278,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -315,10 +315,10 @@ class InMemoryRecordManager(RecordManager):
|
||||
Args:
|
||||
keys: A list of record keys to upsert.
|
||||
group_ids: A list of group IDs corresponding to the keys.
|
||||
Defaults to `None`.
|
||||
|
||||
time_at_least: Optional timestamp. Implementation can use this
|
||||
to optionally verify that the timestamp IS at least this time
|
||||
in the system that stores. Defaults to `None`.
|
||||
in the system that stores.
|
||||
E.g., use to validate that the time in the postgres database
|
||||
is equal to or larger than the given timestamp, if not
|
||||
raise an error.
|
||||
@@ -361,13 +361,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
@@ -397,13 +397,13 @@ class InMemoryRecordManager(RecordManager):
|
||||
|
||||
Args:
|
||||
before: Filter to list records updated before this time.
|
||||
Defaults to `None`.
|
||||
|
||||
after: Filter to list records updated after this time.
|
||||
Defaults to `None`.
|
||||
|
||||
group_ids: Filter to list records with specific group IDs.
|
||||
Defaults to `None`.
|
||||
|
||||
limit: optional limit on the number of records to return.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A list of keys for the matching records.
|
||||
|
||||
@@ -35,7 +35,7 @@ def is_openai_data_block(
|
||||
different type, this function will return False.
|
||||
|
||||
Returns:
|
||||
True if the block is a valid OpenAI data block and matches the filter_
|
||||
`True` if the block is a valid OpenAI data block and matches the filter_
|
||||
(if provided).
|
||||
|
||||
"""
|
||||
|
||||
@@ -123,7 +123,6 @@ class BaseLanguageModel(
|
||||
* If instance of `BaseCache`, will use the provided cache.
|
||||
|
||||
Caching is not currently supported for streaming methods of models.
|
||||
|
||||
"""
|
||||
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
|
||||
"""Whether to print out response text."""
|
||||
@@ -146,7 +145,7 @@ class BaseLanguageModel(
|
||||
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
|
||||
"""If verbose is `None`, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
This allows users to pass in `None` as verbose to access the global setting.
|
||||
|
||||
Args:
|
||||
verbose: The verbosity setting to use.
|
||||
@@ -186,12 +185,12 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
@@ -200,8 +199,8 @@ class BaseLanguageModel(
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -223,12 +222,12 @@ class BaseLanguageModel(
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
prompts: List of PromptValues. A PromptValue is an object that can be
|
||||
converted to match the format of any language model (string for pure
|
||||
text generation models and BaseMessages for chat models).
|
||||
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
||||
can be converted to match the format of any language model (string for
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
@@ -237,8 +236,8 @@ class BaseLanguageModel(
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An `LLMResult`, which contains a list of candidate Generations for each
|
||||
input prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -240,78 +240,54 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
"""Base class for chat models.
|
||||
r"""Base class for chat models.
|
||||
|
||||
Key imperative methods:
|
||||
Methods that actually call the underlying model.
|
||||
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| Method | Input | Output | Description |
|
||||
+===========================+================================================================+=====================================================================+==================================================================================================+
|
||||
| `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
|
||||
|
||||
This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
|
||||
| Method | Input | Output | Description |
|
||||
| ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
|
||||
| `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
|
||||
| `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
|
||||
| `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
|
||||
| `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
|
||||
| `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
|
||||
| `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
| `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
|
||||
| `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
|
||||
|
||||
Key declarative methods:
|
||||
Methods for creating another Runnable using the ChatModel.
|
||||
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Method | Description |
|
||||
+==================================+===========================================================================================================+
|
||||
| `bind_tools` | Create ChatModel that can call tools. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
Methods for creating another `Runnable` using the chat model.
|
||||
|
||||
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
|
||||
|
||||
| Method | Description |
|
||||
| ---------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| `bind_tools` | Create chat model that can call tools. |
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
|
||||
|
||||
Creating custom chat model:
|
||||
Custom chat model implementations should inherit from this class.
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| Method/Property | Description | Required/Optional |
|
||||
+==================================+====================================================================+===================+
|
||||
| -------------------------------- | ------------------------------------------------------------------ | ----------------- |
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_stream` | Use to implement streaming | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
|
||||
Follow the guide for more information on how to implement a custom Chat Model:
|
||||
Follow the guide for more information on how to implement a custom chat model:
|
||||
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
|
||||
|
||||
""" # noqa: E501
|
||||
@@ -327,9 +303,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
|
||||
- If `True`, will always bypass streaming case.
|
||||
- If `'tool_calling'`, will bypass streaming case only when the model is called
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
with a `tools` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (`invoke`) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
- If `False` (Default), will always use streaming case if available.
|
||||
|
||||
The main reason for this flag is that code might be written using `stream` and
|
||||
@@ -349,7 +325,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
Supported values:
|
||||
|
||||
- `'v0'`: provider-specific format in content (can lazily-parse with
|
||||
`.content_blocks`)
|
||||
`.content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `.content_blocks`)
|
||||
|
||||
Partner packages (e.g., `langchain-openai`) can also use this field to roll out
|
||||
@@ -1579,10 +1555,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(AnswerWithJustification)
|
||||
|
||||
structured_llm.invoke(
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
|
||||
@@ -1604,12 +1580,12 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
justification: str
|
||||
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
|
||||
structured_llm.invoke(
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
@@ -1633,10 +1609,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
|
||||
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(dict_schema)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(dict_schema)
|
||||
|
||||
structured_llm.invoke(
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
|
||||
@@ -63,14 +63,14 @@ class Reviver:
|
||||
Args:
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
"""
|
||||
@@ -200,14 +200,14 @@ def loads(
|
||||
text: The string to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
@@ -245,14 +245,14 @@ def load(
|
||||
obj: The object to load.
|
||||
secrets_map: A map of secrets to load. If a secret is not found in
|
||||
the map, it will be loaded from the environment if `secrets_from_env`
|
||||
is True. Defaults to `None`.
|
||||
is True.
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized. Defaults to `None`.
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
Defaults to `None`.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
|
||||
@@ -25,9 +25,9 @@ class BaseSerialized(TypedDict):
|
||||
id: list[str]
|
||||
"""The unique identifier of the object."""
|
||||
name: NotRequired[str]
|
||||
"""The name of the object. Optional."""
|
||||
"""The name of the object."""
|
||||
graph: NotRequired[dict[str, Any]]
|
||||
"""The graph of the object. Optional."""
|
||||
"""The graph of the object."""
|
||||
|
||||
|
||||
class SerializedConstructor(BaseSerialized):
|
||||
@@ -52,7 +52,7 @@ class SerializedNotImplemented(BaseSerialized):
|
||||
type: Literal["not_implemented"]
|
||||
"""The type of the object. Must be `'not_implemented'`."""
|
||||
repr: str | None
|
||||
"""The representation of the object. Optional."""
|
||||
"""The representation of the object."""
|
||||
|
||||
|
||||
def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
@@ -61,7 +61,7 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
Args:
|
||||
value: The value.
|
||||
key: The key.
|
||||
model: The pydantic model.
|
||||
model: The Pydantic model.
|
||||
|
||||
Returns:
|
||||
Whether the value is different from the default.
|
||||
@@ -93,18 +93,18 @@ class Serializable(BaseModel, ABC):
|
||||
It relies on the following methods and properties:
|
||||
|
||||
- `is_lc_serializable`: Is this class serializable?
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the langchain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the `Reviver` class in `langchain_core.load.load` for more details.
|
||||
During deserialization an additional mapping is handle
|
||||
classes that have moved or been renamed across package versions.
|
||||
During deserialization an additional mapping is handle classes that have moved
|
||||
or been renamed across package versions.
|
||||
- `lc_secrets`: A map of constructor argument names to secret ids.
|
||||
- `lc_attributes`: List of additional attribute names that should be included
|
||||
as part of the serialized representation.
|
||||
as part of the serialized representation.
|
||||
"""
|
||||
|
||||
# Remove default BaseModel init docstring.
|
||||
@@ -116,12 +116,12 @@ class Serializable(BaseModel, ABC):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""Is this class serializable?
|
||||
|
||||
By design, even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
|
||||
Returns:
|
||||
Whether the class is serializable. Default is False.
|
||||
Whether the class is serializable. Default is `False`.
|
||||
"""
|
||||
return False
|
||||
|
||||
@@ -133,7 +133,7 @@ class Serializable(BaseModel, ABC):
|
||||
namespace is ["langchain", "llms", "openai"]
|
||||
|
||||
Returns:
|
||||
The namespace as a list of strings.
|
||||
The namespace.
|
||||
"""
|
||||
return cls.__module__.split(".")
|
||||
|
||||
@@ -141,8 +141,7 @@ class Serializable(BaseModel, ABC):
|
||||
def lc_secrets(self) -> dict[str, str]:
|
||||
"""A map of constructor argument names to secret ids.
|
||||
|
||||
For example,
|
||||
{"openai_api_key": "OPENAI_API_KEY"}
|
||||
For example, `{"openai_api_key": "OPENAI_API_KEY"}`
|
||||
"""
|
||||
return {}
|
||||
|
||||
@@ -151,6 +150,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""List of attribute names that should be included in the serialized kwargs.
|
||||
|
||||
These attributes must be accepted by the constructor.
|
||||
|
||||
Default is an empty dictionary.
|
||||
"""
|
||||
return {}
|
||||
@@ -194,7 +194,7 @@ class Serializable(BaseModel, ABC):
|
||||
ValueError: If the class has deprecated attributes.
|
||||
|
||||
Returns:
|
||||
A json serializable object or a SerializedNotImplemented object.
|
||||
A json serializable object or a `SerializedNotImplemented` object.
|
||||
"""
|
||||
if not self.is_lc_serializable():
|
||||
return self.to_json_not_implemented()
|
||||
@@ -269,7 +269,7 @@ class Serializable(BaseModel, ABC):
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented.
|
||||
`SerializedNotImplemented`.
|
||||
"""
|
||||
return to_json_not_implemented(self)
|
||||
|
||||
@@ -284,8 +284,8 @@ def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool:
|
||||
|
||||
Returns:
|
||||
Whether the field is useful. If the field is required, it is useful.
|
||||
If the field is not required, it is useful if the value is not None.
|
||||
If the field is not required and the value is None, it is useful if the
|
||||
If the field is not required, it is useful if the value is not `None`.
|
||||
If the field is not required and the value is `None`, it is useful if the
|
||||
default value is different from the value.
|
||||
"""
|
||||
field = type(inst).model_fields.get(key)
|
||||
@@ -344,10 +344,10 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
|
||||
"""Serialize a "not implemented" object.
|
||||
|
||||
Args:
|
||||
obj: object to serialize.
|
||||
obj: Object to serialize.
|
||||
|
||||
Returns:
|
||||
SerializedNotImplemented
|
||||
`SerializedNotImplemented`
|
||||
"""
|
||||
id_: list[str] = []
|
||||
try:
|
||||
|
||||
@@ -1,39 +1,135 @@
|
||||
"""Derivations of standard content blocks from Groq content."""
|
||||
|
||||
import warnings
|
||||
import json
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _populate_extras(
|
||||
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
|
||||
) -> types.ContentBlock:
|
||||
"""Mutate a block, populating extras."""
|
||||
if standard_block.get("type") == "non_standard":
|
||||
return standard_block
|
||||
|
||||
for key, value in block.items():
|
||||
if key not in known_fields:
|
||||
if "extras" not in standard_block:
|
||||
# Below type-ignores are because mypy thinks a non-standard block can
|
||||
# get here, although we exclude them above.
|
||||
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
|
||||
standard_block["extras"][key] = value # type: ignore[typeddict-item]
|
||||
|
||||
return standard_block
|
||||
|
||||
|
||||
def _parse_code_json(s: str) -> dict:
|
||||
"""Extract Python code from Groq built-in tool content.
|
||||
|
||||
Extracts the value of the 'code' field from a string of the form:
|
||||
{"code": some_arbitrary_text_with_unescaped_quotes}
|
||||
|
||||
As Groq may not escape quotes in the executed tools, e.g.:
|
||||
```
|
||||
'{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
|
||||
```
|
||||
""" # noqa: E501
|
||||
m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
|
||||
if not m:
|
||||
msg = (
|
||||
"Could not extract Python code from Groq tool arguments. "
|
||||
"Expected a JSON object with a 'code' field."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
raise ValueError(msg)
|
||||
return {"code": m.group(1)}
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message chunk with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Convert groq message content to v1 format."""
|
||||
content_blocks: list[types.ContentBlock] = []
|
||||
|
||||
if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
|
||||
content_blocks.append(reasoning_block)
|
||||
|
||||
if executed_tools := message.additional_kwargs.get("executed_tools"):
|
||||
for idx, executed_tool in enumerate(executed_tools):
|
||||
args: dict[str, Any] | None = None
|
||||
if arguments := executed_tool.get("arguments"):
|
||||
try:
|
||||
args = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
if executed_tool.get("type") == "python":
|
||||
try:
|
||||
args = _parse_code_json(arguments)
|
||||
except ValueError:
|
||||
continue
|
||||
elif (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
# GPT-OSS
|
||||
args = {"code": arguments}
|
||||
else:
|
||||
continue
|
||||
if isinstance(args, dict):
|
||||
name = ""
|
||||
if executed_tool.get("type") == "search":
|
||||
name = "web_search"
|
||||
elif executed_tool.get("type") == "python" or (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
name = "code_interpreter"
|
||||
server_tool_call: types.ServerToolCall = {
|
||||
"type": "server_tool_call",
|
||||
"name": name,
|
||||
"id": str(idx),
|
||||
"args": args,
|
||||
}
|
||||
content_blocks.append(server_tool_call)
|
||||
if tool_output := executed_tool.get("output"):
|
||||
tool_result: types.ServerToolResult = {
|
||||
"type": "server_tool_result",
|
||||
"tool_call_id": str(idx),
|
||||
"output": tool_output,
|
||||
"status": "success",
|
||||
}
|
||||
known_fields = {"type", "arguments", "index", "output"}
|
||||
_populate_extras(tool_result, executed_tool, known_fields)
|
||||
content_blocks.append(tool_result)
|
||||
|
||||
if isinstance(message.content, str) and message.content:
|
||||
content_blocks.append({"type": "text", "text": message.content})
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
content_blocks.append( # noqa: PERF401
|
||||
{
|
||||
"type": "tool_call",
|
||||
"name": tool_call["name"],
|
||||
"args": tool_call["args"],
|
||||
"id": tool_call.get("id"),
|
||||
}
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
return content_blocks
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message chunk with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def _register_groq_translator() -> None:
|
||||
"""Register the Groq translator with the central registry.
|
||||
"""Register the groq translator with the central registry.
|
||||
|
||||
Run automatically when the module is imported.
|
||||
"""
|
||||
|
||||
@@ -877,7 +877,7 @@ def is_data_content_block(block: dict) -> bool:
|
||||
block: The content block to check.
|
||||
|
||||
Returns:
|
||||
True if the content block is a data content block, False otherwise.
|
||||
`True` if the content block is a data content block, `False` otherwise.
|
||||
|
||||
"""
|
||||
if block.get("type") not in _get_data_content_block_types():
|
||||
|
||||
@@ -31,13 +31,13 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
`OutputParserException`: If the output is not valid JSON.
|
||||
"""
|
||||
generation = result[0]
|
||||
if not isinstance(generation, ChatGeneration):
|
||||
@@ -56,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
"""Parse an output as the Json object."""
|
||||
"""Parse an output as the JSON object."""
|
||||
|
||||
strict: bool = False
|
||||
"""Whether to allow non-JSON-compliant strings.
|
||||
@@ -82,13 +82,13 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the output is not valid JSON.
|
||||
OutputParserExcept`ion: If the output is not valid JSON.
|
||||
"""
|
||||
if len(result) != 1:
|
||||
msg = f"Expected exactly one result, but got {len(result)}"
|
||||
@@ -155,7 +155,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
|
||||
|
||||
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
"""Parse an output as the element of the Json object."""
|
||||
"""Parse an output as the element of the JSON object."""
|
||||
|
||||
key_name: str
|
||||
"""The name of the key to return."""
|
||||
@@ -165,7 +165,7 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -177,16 +177,15 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
||||
|
||||
|
||||
class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
"""Parse an output as a pydantic object.
|
||||
"""Parse an output as a Pydantic object.
|
||||
|
||||
This parser is used to parse the output of a ChatModel that uses
|
||||
OpenAI function format to invoke functions.
|
||||
This parser is used to parse the output of a chat model that uses OpenAI function
|
||||
format to invoke functions.
|
||||
|
||||
The parser extracts the function call invocation and matches
|
||||
them to the pydantic schema provided.
|
||||
The parser extracts the function call invocation and matches them to the Pydantic
|
||||
schema provided.
|
||||
|
||||
An exception will be raised if the function call does not match
|
||||
the provided schema.
|
||||
An exception will be raised if the function call does not match the provided schema.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -221,7 +220,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
"""
|
||||
|
||||
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
||||
"""The pydantic schema to parse the output with.
|
||||
"""The Pydantic schema to parse the output with.
|
||||
|
||||
If multiple schemas are provided, then the function name will be used to
|
||||
determine which schema to use.
|
||||
@@ -230,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_schema(cls, values: dict) -> Any:
|
||||
"""Validate the pydantic schema.
|
||||
"""Validate the Pydantic schema.
|
||||
|
||||
Args:
|
||||
values: The values to validate.
|
||||
@@ -239,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
The validated values.
|
||||
|
||||
Raises:
|
||||
ValueError: If the schema is not a pydantic schema.
|
||||
`ValueError`: If the schema is not a Pydantic schema.
|
||||
"""
|
||||
schema = values["pydantic_schema"]
|
||||
if "args_only" not in values:
|
||||
@@ -262,10 +261,10 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Raises:
|
||||
ValueError: If the pydantic schema is not valid.
|
||||
`ValueError`: If the Pydantic schema is not valid.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
@@ -288,13 +287,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
elif issubclass(pydantic_schema, BaseModelV1):
|
||||
pydantic_args = pydantic_schema.parse_raw(args)
|
||||
else:
|
||||
msg = f"Unsupported pydantic schema: {pydantic_schema}"
|
||||
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
|
||||
raise ValueError(msg)
|
||||
return pydantic_args
|
||||
|
||||
|
||||
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
"""Parse an output as an attribute of a pydantic object."""
|
||||
"""Parse an output as an attribute of a Pydantic object."""
|
||||
|
||||
attr_name: str
|
||||
"""The name of the attribute to return."""
|
||||
@@ -305,7 +304,7 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects. Default is False.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
|
||||
|
||||
|
||||
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
"""Parse an output using a pydantic model."""
|
||||
"""Parse an output using a Pydantic model."""
|
||||
|
||||
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
|
||||
"""The pydantic model to parse."""
|
||||
"""The Pydantic model to parse."""
|
||||
|
||||
def _parse_obj(self, obj: dict) -> TBaseModel:
|
||||
try:
|
||||
@@ -45,21 +45,20 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> TBaseModel | None:
|
||||
"""Parse the result of an LLM call to a pydantic object.
|
||||
"""Parse the result of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
result: The result of the LLM call.
|
||||
partial: Whether to parse partial JSON objects.
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the result is not valid JSON
|
||||
or does not conform to the pydantic model.
|
||||
`OutputParserException`: If the result is not valid JSON
|
||||
or does not conform to the Pydantic model.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
try:
|
||||
json_object = super().parse_result(result)
|
||||
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
raise
|
||||
|
||||
def parse(self, text: str) -> TBaseModel:
|
||||
"""Parse the output of an LLM call to a pydantic object.
|
||||
"""Parse the output of an LLM call to a Pydantic object.
|
||||
|
||||
Args:
|
||||
text: The output of the LLM call.
|
||||
|
||||
Returns:
|
||||
The parsed pydantic object.
|
||||
The parsed Pydantic object.
|
||||
"""
|
||||
return super().parse(text)
|
||||
|
||||
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> type[TBaseModel]:
|
||||
"""Return the pydantic model."""
|
||||
"""Return the Pydantic model."""
|
||||
return self.pydantic_object
|
||||
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ class LLMResult(BaseModel):
|
||||
other: Another `LLMResult` object to compare against.
|
||||
|
||||
Returns:
|
||||
True if the generations and `llm_output` are equal, False otherwise.
|
||||
`True` if the generations and `llm_output` are equal, `False` otherwise.
|
||||
"""
|
||||
if not isinstance(other, LLMResult):
|
||||
return NotImplemented
|
||||
|
||||
@@ -135,7 +135,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
|
||||
n_messages: PositiveInt | None = None
|
||||
"""Maximum number of messages to include. If `None`, then will include all.
|
||||
Defaults to `None`."""
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, variable_name: str, *, optional: bool = False, **kwargs: Any
|
||||
@@ -241,7 +241,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
Defaults to `None`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
@@ -414,7 +414,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
template_format: format of the template.
|
||||
Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
|
||||
partial_variables: A dictionary of variables that can be used too partially.
|
||||
Defaults to `None`.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -139,7 +139,7 @@ def load_prompt(path: str | Path, encoding: str | None = None) -> BasePromptTemp
|
||||
|
||||
Args:
|
||||
path: Path to the prompt file.
|
||||
encoding: Encoding of the file. Defaults to `None`.
|
||||
encoding: Encoding of the file.
|
||||
|
||||
Returns:
|
||||
A PromptTemplate object.
|
||||
|
||||
@@ -281,7 +281,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`. Defaults to `None`.
|
||||
`"foo {variable2}"`.
|
||||
**kwargs: Any other arguments to pass to the prompt template.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from string import Formatter
|
||||
from typing import Any, Literal
|
||||
|
||||
@@ -149,9 +149,7 @@ def mustache_template_vars(
|
||||
Defs = dict[str, "Defs"]
|
||||
|
||||
|
||||
def mustache_schema(
|
||||
template: str,
|
||||
) -> type[BaseModel]:
|
||||
def mustache_schema(template: str) -> type[BaseModel]:
|
||||
"""Get the variables from a mustache template.
|
||||
|
||||
Args:
|
||||
@@ -175,6 +173,11 @@ def mustache_schema(
|
||||
fields[prefix] = False
|
||||
elif type_ in {"variable", "no escape"}:
|
||||
fields[prefix + tuple(key.split("."))] = True
|
||||
|
||||
for fkey, fval in fields.items():
|
||||
fields[fkey] = fval and not any(
|
||||
is_subsequence(fkey, k) for k in fields if k != fkey
|
||||
)
|
||||
defs: Defs = {} # None means leaf node
|
||||
while fields:
|
||||
field, is_leaf = fields.popitem()
|
||||
@@ -327,3 +330,12 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
def pretty_print(self) -> None:
|
||||
"""Print a pretty representation of the prompt."""
|
||||
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
|
||||
|
||||
|
||||
def is_subsequence(child: Sequence, parent: Sequence) -> bool:
|
||||
"""Return True if child is subsequence of parent."""
|
||||
if len(child) == 0 or len(parent) == 0:
|
||||
return False
|
||||
if len(parent) < len(child):
|
||||
return False
|
||||
return all(child[i] == parent[i] for i in range(len(child)))
|
||||
|
||||
@@ -144,7 +144,7 @@ class StructuredPrompt(ChatPromptTemplate):
|
||||
|
||||
Args:
|
||||
others: The language model to pipe the structured prompt to.
|
||||
name: The name of the pipeline. Defaults to `None`.
|
||||
name: The name of the pipeline.
|
||||
|
||||
Returns:
|
||||
A RunnableSequence object.
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from importlib import metadata
|
||||
|
||||
from pydantic.v1 import * # noqa: F403
|
||||
|
||||
from langchain_core._api.deprecation import warn_deprecated
|
||||
|
||||
try:
|
||||
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
||||
except metadata.PackageNotFoundError:
|
||||
_PYDANTIC_MAJOR_VERSION = 0
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from pydantic.v1.dataclasses import * # noqa: F403
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from pydantic.v1.main import * # noqa: F403
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -44,7 +44,7 @@ class BaseRateLimiter(abc.ABC):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -63,7 +63,7 @@ class BaseRateLimiter(abc.ABC):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
|
||||
@@ -210,7 +210,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
@@ -234,7 +234,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
the attempt. Defaults to `True`.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
|
||||
@@ -7,7 +7,6 @@ the backbone of a retriever, but there are other types of retrievers as well.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from inspect import signature
|
||||
from typing import TYPE_CHECKING, Any
|
||||
@@ -15,8 +14,6 @@ from typing import TYPE_CHECKING, Any
|
||||
from pydantic import ConfigDict
|
||||
from typing_extensions import Self, TypedDict, override
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.callbacks import Callbacks
|
||||
from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.runnables import (
|
||||
@@ -121,14 +118,14 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
_new_arg_supported: bool = False
|
||||
_expects_other_args: bool = False
|
||||
tags: list[str] | None = None
|
||||
"""Optional list of tags associated with the retriever. Defaults to `None`.
|
||||
"""Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
use case.
|
||||
"""
|
||||
metadata: dict[str, Any] | None = None
|
||||
"""Optional metadata associated with the retriever. Defaults to `None`.
|
||||
"""Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
@@ -138,35 +135,6 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
@override
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
# Version upgrade for old retrievers that implemented the public
|
||||
# methods directly.
|
||||
if cls.get_relevant_documents != BaseRetriever.get_relevant_documents:
|
||||
warnings.warn(
|
||||
"Retrievers must implement abstract `_get_relevant_documents` method"
|
||||
" instead of `get_relevant_documents`",
|
||||
DeprecationWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
swap = cls.get_relevant_documents
|
||||
cls.get_relevant_documents = ( # type: ignore[method-assign]
|
||||
BaseRetriever.get_relevant_documents
|
||||
)
|
||||
cls._get_relevant_documents = swap # type: ignore[method-assign]
|
||||
if (
|
||||
hasattr(cls, "aget_relevant_documents")
|
||||
and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents
|
||||
):
|
||||
warnings.warn(
|
||||
"Retrievers must implement abstract `_aget_relevant_documents` method"
|
||||
" instead of `aget_relevant_documents`",
|
||||
DeprecationWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
aswap = cls.aget_relevant_documents
|
||||
cls.aget_relevant_documents = ( # type: ignore[method-assign]
|
||||
BaseRetriever.aget_relevant_documents
|
||||
)
|
||||
cls._aget_relevant_documents = aswap # type: ignore[method-assign]
|
||||
parameters = signature(cls._get_relevant_documents).parameters
|
||||
cls._new_arg_supported = parameters.get("run_manager") is not None
|
||||
if (
|
||||
@@ -207,7 +175,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Args:
|
||||
input: The query string.
|
||||
config: Configuration for the retriever. Defaults to `None`.
|
||||
config: Configuration for the retriever.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
@@ -268,7 +236,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Args:
|
||||
input: The query string.
|
||||
config: Configuration for the retriever. Defaults to `None`.
|
||||
config: Configuration for the retriever.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
@@ -348,91 +316,3 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
query,
|
||||
run_manager=run_manager.get_sync(),
|
||||
)
|
||||
|
||||
@deprecated(since="0.1.46", alternative="invoke", removal="1.0")
|
||||
def get_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
callbacks: Callbacks = None,
|
||||
tags: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
run_name: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> list[Document]:
|
||||
"""Retrieve documents relevant to a query.
|
||||
|
||||
Users should favor using `.invoke` or `.batch` rather than
|
||||
`get_relevant_documents directly`.
|
||||
|
||||
Args:
|
||||
query: string to find relevant documents for.
|
||||
callbacks: Callback manager or list of callbacks. Defaults to `None`.
|
||||
tags: Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
metadata: Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
run_name: Optional name for the run. Defaults to `None`.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
"""
|
||||
config: RunnableConfig = {}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
if tags:
|
||||
config["tags"] = tags
|
||||
if metadata:
|
||||
config["metadata"] = metadata
|
||||
if run_name:
|
||||
config["run_name"] = run_name
|
||||
return self.invoke(query, config, **kwargs)
|
||||
|
||||
@deprecated(since="0.1.46", alternative="ainvoke", removal="1.0")
|
||||
async def aget_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
callbacks: Callbacks = None,
|
||||
tags: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
run_name: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> list[Document]:
|
||||
"""Asynchronously get documents relevant to a query.
|
||||
|
||||
Users should favor using `.ainvoke` or `.abatch` rather than
|
||||
`aget_relevant_documents directly`.
|
||||
|
||||
Args:
|
||||
query: string to find relevant documents for.
|
||||
callbacks: Callback manager or list of callbacks.
|
||||
tags: Optional list of tags associated with the retriever.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
metadata: Optional metadata associated with the retriever.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
Defaults to `None`.
|
||||
run_name: Optional name for the run. Defaults to `None`.
|
||||
**kwargs: Additional arguments to pass to the retriever.
|
||||
|
||||
Returns:
|
||||
List of relevant documents.
|
||||
"""
|
||||
config: RunnableConfig = {}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
if tags:
|
||||
config["tags"] = tags
|
||||
if metadata:
|
||||
config["metadata"] = metadata
|
||||
if run_name:
|
||||
config["run_name"] = run_name
|
||||
return await self.ainvoke(query, config, **kwargs)
|
||||
|
||||
@@ -304,7 +304,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
TypeError: If the input type cannot be inferred.
|
||||
"""
|
||||
# First loop through all parent classes and if any of them is
|
||||
# a pydantic model, we will pick up the generic parameterization
|
||||
# a Pydantic model, we will pick up the generic parameterization
|
||||
# from that model via the __pydantic_generic_metadata__ attribute.
|
||||
for base in self.__class__.mro():
|
||||
if hasattr(base, "__pydantic_generic_metadata__"):
|
||||
@@ -312,7 +312,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
if "args" in metadata and len(metadata["args"]) == 2:
|
||||
return metadata["args"][0]
|
||||
|
||||
# If we didn't find a pydantic model in the parent classes,
|
||||
# If we didn't find a Pydantic model in the parent classes,
|
||||
# then loop through __orig_bases__. This corresponds to
|
||||
# Runnables that are not pydantic models.
|
||||
for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined]
|
||||
@@ -390,7 +390,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
self.get_name("Input"),
|
||||
root=root_type,
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -433,7 +433,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
def output_schema(self) -> type[BaseModel]:
|
||||
"""Output schema.
|
||||
|
||||
The type of output this `Runnable` produces specified as a pydantic model.
|
||||
The type of output this `Runnable` produces specified as a Pydantic model.
|
||||
"""
|
||||
return self.get_output_schema()
|
||||
|
||||
@@ -468,7 +468,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
self.get_name("Output"),
|
||||
root=root_type,
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -776,11 +776,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
||||
+ "{question}"
|
||||
)
|
||||
llm = FakeStreamingListLLM(responses=["foo-lish"])
|
||||
model = FakeStreamingListLLM(responses=["foo-lish"])
|
||||
|
||||
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
|
||||
chain: Runnable = prompt | model | {"str": StrOutputParser()}
|
||||
|
||||
chain_with_assign = chain.assign(hello=itemgetter("str") | llm)
|
||||
chain_with_assign = chain.assign(hello=itemgetter("str") | model)
|
||||
|
||||
print(chain_with_assign.input_schema.model_json_schema())
|
||||
# {'title': 'PromptInput', 'type': 'object', 'properties':
|
||||
@@ -821,7 +821,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
|
||||
Returns:
|
||||
The output of the `Runnable`.
|
||||
@@ -841,7 +841,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
|
||||
Returns:
|
||||
The output of the `Runnable`.
|
||||
@@ -869,7 +869,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work
|
||||
to do in parallel, and other keys. Please refer to the
|
||||
`RunnableConfig` for more details. Defaults to `None`.
|
||||
`RunnableConfig` for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
@@ -936,7 +936,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
@@ -1003,7 +1003,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
@@ -1067,7 +1067,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
The config supports standard keys like `'tags'`, `'metadata'` for
|
||||
tracing purposes, `'max_concurrency'` for controlling how much work to
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details. Defaults to `None`.
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
@@ -1120,7 +1120,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1141,7 +1141,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1273,22 +1273,20 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
A `StreamEvent` is a dictionary with the following schema:
|
||||
|
||||
- `event`: **str** - Event names are of the format:
|
||||
- `event`: Event names are of the format:
|
||||
`on_[runnable_type]_(start|stream|end)`.
|
||||
- `name`: **str** - The name of the `Runnable` that generated the event.
|
||||
- `run_id`: **str** - randomly generated ID associated with the given
|
||||
execution of the `Runnable` that emitted the event. A child `Runnable` that gets
|
||||
invoked as part of the execution of a parent `Runnable` is assigned its own
|
||||
unique ID.
|
||||
- `parent_ids`: **list[str]** - The IDs of the parent runnables that generated
|
||||
the event. The root `Runnable` will have an empty list. The order of the parent
|
||||
IDs is from the root to the immediate parent. Only available for v2 version of
|
||||
the API. The v1 version of the API will return an empty list.
|
||||
- `tags`: **list[str] | None** - The tags of the `Runnable` that generated
|
||||
the event.
|
||||
- `metadata`: **dict[str, Any] | None** - The metadata of the `Runnable` that
|
||||
generated the event.
|
||||
- `data`: **dict[str, Any]**
|
||||
- `name`: The name of the `Runnable` that generated the event.
|
||||
- `run_id`: Randomly generated ID associated with the given execution of the
|
||||
`Runnable` that emitted the event. A child `Runnable` that gets invoked as
|
||||
part of the execution of a parent `Runnable` is assigned its own unique ID.
|
||||
- `parent_ids`: The IDs of the parent runnables that generated the event. The
|
||||
root `Runnable` will have an empty list. The order of the parent IDs is from
|
||||
the root to the immediate parent. Only available for v2 version of the API.
|
||||
The v1 version of the API will return an empty list.
|
||||
- `tags`: The tags of the `Runnable` that generated the event.
|
||||
- `metadata`: The metadata of the `Runnable` that generated the event.
|
||||
- `data`: The data associated with the event. The contents of this field
|
||||
depend on the type of event. See the table below for more details.
|
||||
|
||||
Below is a table that illustrates some events that might be emitted by various
|
||||
chains. Metadata fields have been omitted from the table for brevity.
|
||||
@@ -1297,39 +1295,23 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
!!! note
|
||||
This reference table is for the v2 version of the schema.
|
||||
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| event | name | chunk | input | output |
|
||||
+==========================+==================+=====================================+===================================================+=====================================================+
|
||||
| `on_chat_model_start` | [model name] | | `{"messages": [[SystemMessage, HumanMessage]]}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chat_model_stream` | [model name] | `AIMessageChunk(content="hello")` | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chat_model_end` | [model name] | | `{"messages": [[SystemMessage, HumanMessage]]}` | `AIMessageChunk(content="hello world")` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_llm_start` | [model name] | | `{'input': 'hello'}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_llm_stream` | [model name] | `'Hello' ` | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_llm_end` | [model name] | | `'Hello human!'` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chain_start` | format_docs | | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chain_stream` | format_docs | `'hello world!, goodbye world!'` | | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_chain_end` | format_docs | | `[Document(...)]` | `'hello world!, goodbye world!'` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_tool_start` | some_tool | | `{"x": 1, "y": "2"}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_tool_end` | some_tool | | | `{"x": 1, "y": "2"}` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_retriever_start` | [retriever name] | | `{"query": "hello"}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_retriever_end` | [retriever name] | | `{"query": "hello"}` | `[Document(...), ..]` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_prompt_start` | [template_name] | | `{"question": "hello"}` | |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| `on_prompt_end` | [template_name] | | `{"question": "hello"}` | `ChatPromptValue(messages: [SystemMessage, ...])` |
|
||||
+--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+
|
||||
| event | name | chunk | input | output |
|
||||
| ---------------------- | -------------------- | ----------------------------------- | ------------------------------------------------- | --------------------------------------------------- |
|
||||
| `on_chat_model_start` | `'[model name]'` | | `{"messages": [[SystemMessage, HumanMessage]]}` | |
|
||||
| `on_chat_model_stream` | `'[model name]'` | `AIMessageChunk(content="hello")` | | |
|
||||
| `on_chat_model_end` | `'[model name]'` | | `{"messages": [[SystemMessage, HumanMessage]]}` | `AIMessageChunk(content="hello world")` |
|
||||
| `on_llm_start` | `'[model name]'` | | `{'input': 'hello'}` | |
|
||||
| `on_llm_stream` | `'[model name]'` | `'Hello' ` | | |
|
||||
| `on_llm_end` | `'[model name]'` | | `'Hello human!'` | |
|
||||
| `on_chain_start` | `'format_docs'` | | | |
|
||||
| `on_chain_stream` | `'format_docs'` | `'hello world!, goodbye world!'` | | |
|
||||
| `on_chain_end` | `'format_docs'` | | `[Document(...)]` | `'hello world!, goodbye world!'` |
|
||||
| `on_tool_start` | `'some_tool'` | | `{"x": 1, "y": "2"}` | |
|
||||
| `on_tool_end` | `'some_tool'` | | | `{"x": 1, "y": "2"}` |
|
||||
| `on_retriever_start` | `'[retriever name]'` | | `{"query": "hello"}` | |
|
||||
| `on_retriever_end` | `'[retriever name]'` | | `{"query": "hello"}` | `[Document(...), ..]` |
|
||||
| `on_prompt_start` | `'[template_name]'` | | `{"question": "hello"}` | |
|
||||
| `on_prompt_end` | `'[template_name]'` | | `{"question": "hello"}` | `ChatPromptValue(messages: [SystemMessage, ...])` |
|
||||
|
||||
In addition to the standard events, users can also dispatch custom events (see example below).
|
||||
|
||||
@@ -1337,13 +1319,10 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
A custom event has following format:
|
||||
|
||||
+-----------+------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Attribute | Type | Description |
|
||||
+===========+======+===========================================================================================================+
|
||||
| name | str | A user defined name for the event. |
|
||||
+-----------+------+-----------------------------------------------------------------------------------------------------------+
|
||||
| data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |
|
||||
+-----------+------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Attribute | Type | Description |
|
||||
| ----------- | ------ | --------------------------------------------------------------------------------------------------------- |
|
||||
| `name` | `str` | A user defined name for the event. |
|
||||
| `data` | `Any` | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |
|
||||
|
||||
Here are declarations associated with the standard events shown above:
|
||||
|
||||
@@ -1526,7 +1505,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: An iterator of inputs to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1571,7 +1550,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: An async iterator of inputs to the `Runnable`.
|
||||
config: The config to use for the `Runnable`. Defaults to `None`.
|
||||
config: The config to use for the `Runnable`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1619,16 +1598,16 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
from langchain_ollama import ChatOllama
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
|
||||
llm = ChatOllama(model="llama3.1")
|
||||
model = ChatOllama(model="llama3.1")
|
||||
|
||||
# Without bind
|
||||
chain = llm | StrOutputParser()
|
||||
chain = model | StrOutputParser()
|
||||
|
||||
chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
|
||||
# Output is 'One two three four five.'
|
||||
|
||||
# With bind
|
||||
chain = llm.bind(stop=["three"]) | StrOutputParser()
|
||||
chain = model.bind(stop=["three"]) | StrOutputParser()
|
||||
|
||||
chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
|
||||
# Output is 'One two'
|
||||
@@ -1682,11 +1661,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called before the `Runnable` starts running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_end: Called after the `Runnable` finishes running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_error: Called if the `Runnable` throws an error, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -1750,11 +1729,11 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called asynchronously before the `Runnable` starts running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_end: Called asynchronously after the `Runnable` finishes running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_error: Called asynchronously if the `Runnable` throws an error,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -1833,8 +1812,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
"""Bind input and output types to a `Runnable`, returning a new `Runnable`.
|
||||
|
||||
Args:
|
||||
input_type: The input type to bind to the `Runnable`. Defaults to `None`.
|
||||
output_type: The output type to bind to the `Runnable`. Defaults to `None`.
|
||||
input_type: The input type to bind to the `Runnable`.
|
||||
output_type: The output type to bind to the `Runnable`.
|
||||
|
||||
Returns:
|
||||
A new Runnable with the types bound.
|
||||
@@ -1955,7 +1934,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
to fallbacks as part of the input under the specified key.
|
||||
If `None`, exceptions will not be passed to fallbacks.
|
||||
If used, the base `Runnable` and its fallbacks must accept a
|
||||
dictionary as input. Defaults to `None`.
|
||||
dictionary as input.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` that will try the original `Runnable`, and then each
|
||||
@@ -2462,10 +2441,10 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
pass `arg_types` to just specify the required arguments and their types.
|
||||
|
||||
Args:
|
||||
args_schema: The schema for the tool. Defaults to `None`.
|
||||
name: The name of the tool. Defaults to `None`.
|
||||
description: The description of the tool. Defaults to `None`.
|
||||
arg_types: A dictionary of argument names to types. Defaults to `None`.
|
||||
args_schema: The schema for the tool.
|
||||
name: The name of the tool.
|
||||
description: The description of the tool.
|
||||
arg_types: A dictionary of argument names to types.
|
||||
|
||||
Returns:
|
||||
A `BaseTool` instance.
|
||||
@@ -2888,10 +2867,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
steps: The steps to include in the sequence.
|
||||
name: The name of the `Runnable`. Defaults to `None`.
|
||||
first: The first `Runnable` in the sequence. Defaults to `None`.
|
||||
middle: The middle `Runnable` objects in the sequence. Defaults to `None`.
|
||||
last: The last Runnable in the sequence. Defaults to `None`.
|
||||
name: The name of the `Runnable`.
|
||||
first: The first `Runnable` in the sequence.
|
||||
middle: The middle `Runnable` objects in the sequence.
|
||||
last: The last Runnable in the sequence.
|
||||
|
||||
Raises:
|
||||
ValueError: If the sequence has less than 2 steps.
|
||||
@@ -2960,7 +2939,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
"""Get the input schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The input schema of the `Runnable`.
|
||||
@@ -2975,7 +2954,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
"""Get the output schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The output schema of the `Runnable`.
|
||||
@@ -3002,7 +2981,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
"""Get the graph representation of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The graph representation of the `Runnable`.
|
||||
@@ -3629,7 +3608,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Create a `RunnableParallel`.
|
||||
|
||||
Args:
|
||||
steps__: The steps to include. Defaults to `None`.
|
||||
steps__: The steps to include.
|
||||
**kwargs: Additional steps to include.
|
||||
|
||||
"""
|
||||
@@ -3664,8 +3643,8 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the name of the `Runnable`.
|
||||
|
||||
Args:
|
||||
suffix: The suffix to use. Defaults to `None`.
|
||||
name: The name to use. Defaults to `None`.
|
||||
suffix: The suffix to use.
|
||||
name: The name to use.
|
||||
|
||||
Returns:
|
||||
The name of the `Runnable`.
|
||||
@@ -3689,7 +3668,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the input schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The input schema of the `Runnable`.
|
||||
@@ -3720,7 +3699,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the output schema of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The output schema of the `Runnable`.
|
||||
@@ -3747,7 +3726,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
"""Get the graph representation of the `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The graph representation of the `Runnable`.
|
||||
@@ -4157,8 +4136,8 @@ class RunnableGenerator(Runnable[Input, Output]):
|
||||
|
||||
Args:
|
||||
transform: The transform function.
|
||||
atransform: The async transform function. Defaults to `None`.
|
||||
name: The name of the `Runnable`. Defaults to `None`.
|
||||
atransform: The async transform function.
|
||||
name: The name of the `Runnable`.
|
||||
|
||||
Raises:
|
||||
TypeError: If the transform is not a generator function.
|
||||
@@ -4435,8 +4414,8 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
Args:
|
||||
func: Either sync or async callable
|
||||
afunc: An async callable that takes an input and returns an output.
|
||||
Defaults to `None`.
|
||||
name: The name of the `Runnable`. Defaults to `None`.
|
||||
|
||||
name: The name of the `Runnable`.
|
||||
|
||||
Raises:
|
||||
TypeError: If the `func` is not a callable type.
|
||||
@@ -4493,10 +4472,10 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
|
||||
@override
|
||||
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
|
||||
"""The pydantic schema for the input to this `Runnable`.
|
||||
"""The Pydantic schema for the input to this `Runnable`.
|
||||
|
||||
Args:
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
|
||||
Returns:
|
||||
The input schema for this `Runnable`.
|
||||
@@ -4830,7 +4809,7 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to this `Runnable`.
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -4861,7 +4840,7 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to this `Runnable`.
|
||||
config: The config to use. Defaults to `None`.
|
||||
config: The config to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -5127,7 +5106,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
|
||||
None,
|
||||
),
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -5150,7 +5129,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
|
||||
self.get_name("Output"),
|
||||
root=list[schema], # type: ignore[valid-type]
|
||||
# create model needs access to appropriate type annotations to be
|
||||
# able to construct the pydantic model.
|
||||
# able to construct the Pydantic model.
|
||||
# When we create the model, we pass information about the namespace
|
||||
# where the model is being created, so the type annotations can
|
||||
# be resolved correctly as well.
|
||||
@@ -5303,11 +5282,11 @@ class RunnableEach(RunnableEachBase[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called before the `Runnable` starts running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_end: Called after the `Runnable` finishes running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_error: Called if the `Runnable` throws an error, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -5336,11 +5315,11 @@ class RunnableEach(RunnableEachBase[Input, Output]):
|
||||
|
||||
Args:
|
||||
on_start: Called asynchronously before the `Runnable` starts running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_end: Called asynchronously after the `Runnable` finishes running,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
on_error: Called asynchronously if the `Runnable` throws an error,
|
||||
with the `Run` object. Defaults to `None`.
|
||||
with the `Run` object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -5387,13 +5366,13 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
|
||||
custom_input_type: Any | None = None
|
||||
"""Override the input type of the underlying `Runnable` with a custom type.
|
||||
|
||||
The type can be a pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
The type can be a Pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
"""
|
||||
# Union[Type[Output], BaseModel] + things like list[str]
|
||||
custom_output_type: Any | None = None
|
||||
"""Override the output type of the underlying `Runnable` with a custom type.
|
||||
|
||||
The type can be a pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
The type can be a Pydantic model, or a type annotation (e.g., `list[str]`).
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
@@ -5420,16 +5399,16 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
|
||||
kwargs: optional kwargs to pass to the underlying `Runnable`, when running
|
||||
the underlying `Runnable` (e.g., via `invoke`, `batch`,
|
||||
`transform`, or `stream` or async variants)
|
||||
Defaults to `None`.
|
||||
|
||||
config: optional config to bind to the underlying `Runnable`.
|
||||
Defaults to `None`.
|
||||
|
||||
config_factories: optional list of config factories to apply to the
|
||||
config before binding to the underlying `Runnable`.
|
||||
Defaults to `None`.
|
||||
|
||||
custom_input_type: Specify to override the input type of the underlying
|
||||
`Runnable` with a custom type. Defaults to `None`.
|
||||
`Runnable` with a custom type.
|
||||
custom_output_type: Specify to override the output type of the underlying
|
||||
`Runnable` with a custom type. Defaults to `None`.
|
||||
`Runnable` with a custom type.
|
||||
**other_kwargs: Unpacked into the base class.
|
||||
"""
|
||||
super().__init__(
|
||||
@@ -5866,11 +5845,11 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
|
||||
|
||||
Args:
|
||||
on_start: Called before the `Runnable` starts running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_end: Called after the `Runnable` finishes running, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
on_error: Called if the `Runnable` throws an error, with the `Run`
|
||||
object. Defaults to `None`.
|
||||
object.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` with the listeners bound.
|
||||
@@ -6077,10 +6056,10 @@ def chain(
|
||||
@chain
|
||||
def my_func(fields):
|
||||
prompt = PromptTemplate("Hello, {name}!")
|
||||
llm = OpenAI()
|
||||
model = OpenAI()
|
||||
formatted = prompt.invoke(**fields)
|
||||
|
||||
for chunk in llm.stream(formatted):
|
||||
for chunk in model.stream(formatted):
|
||||
yield chunk
|
||||
```
|
||||
"""
|
||||
|
||||
@@ -191,7 +191,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Returns:
|
||||
@@ -301,7 +301,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Yields:
|
||||
@@ -385,7 +385,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
Args:
|
||||
input: The input to the Runnable.
|
||||
config: The configuration for the Runnable. Defaults to `None`.
|
||||
config: The configuration for the Runnable.
|
||||
**kwargs: Additional keyword arguments to pass to the Runnable.
|
||||
|
||||
Yields:
|
||||
|
||||
@@ -193,7 +193,7 @@ def ensure_config(config: RunnableConfig | None = None) -> RunnableConfig:
|
||||
"""Ensure that a config is a dict with all keys present.
|
||||
|
||||
Args:
|
||||
config: The config to ensure. Defaults to `None`.
|
||||
config: The config to ensure.
|
||||
|
||||
Returns:
|
||||
The ensured config.
|
||||
@@ -412,7 +412,7 @@ def call_func_with_variable_args(
|
||||
func: The function to call.
|
||||
input: The input to the function.
|
||||
config: The config to pass to the function.
|
||||
run_manager: The run manager to pass to the function. Defaults to `None`.
|
||||
run_manager: The run manager to pass to the function.
|
||||
**kwargs: The keyword arguments to pass to the function.
|
||||
|
||||
Returns:
|
||||
@@ -446,7 +446,7 @@ def acall_func_with_variable_args(
|
||||
func: The function to call.
|
||||
input: The input to the function.
|
||||
config: The config to pass to the function.
|
||||
run_manager: The run manager to pass to the function. Defaults to `None`.
|
||||
run_manager: The run manager to pass to the function.
|
||||
**kwargs: The keyword arguments to pass to the function.
|
||||
|
||||
Returns:
|
||||
@@ -535,7 +535,7 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
Args:
|
||||
fn: The function to map.
|
||||
*iterables: The iterables to map over.
|
||||
timeout: The timeout for the map. Defaults to `None`.
|
||||
timeout: The timeout for the map.
|
||||
chunksize: The chunksize for the map. Defaults to 1.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -123,7 +123,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
||||
"""Prepare the Runnable for invocation.
|
||||
|
||||
Args:
|
||||
config: The configuration to use. Defaults to `None`.
|
||||
config: The configuration to use.
|
||||
|
||||
Returns:
|
||||
The prepared Runnable and configuration.
|
||||
|
||||
@@ -594,7 +594,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
Returns:
|
||||
If the attribute is anything other than a method that outputs a Runnable,
|
||||
returns getattr(self.runnable, name). If the attribute is a method that
|
||||
does return a new Runnable (e.g. llm.bind_tools([...]) outputs a new
|
||||
does return a new Runnable (e.g. model.bind_tools([...]) outputs a new
|
||||
RunnableBinding) then self.runnable and each of the runnables in
|
||||
self.fallbacks is replaced with getattr(x, name).
|
||||
|
||||
@@ -605,15 +605,15 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
|
||||
gpt_4o = ChatOpenAI(model="gpt-4o")
|
||||
claude_3_sonnet = ChatAnthropic(model="claude-3-7-sonnet-20250219")
|
||||
llm = gpt_4o.with_fallbacks([claude_3_sonnet])
|
||||
model = gpt_4o.with_fallbacks([claude_3_sonnet])
|
||||
|
||||
llm.model_name
|
||||
model.model_name
|
||||
# -> "gpt-4o"
|
||||
|
||||
# .bind_tools() is called on both ChatOpenAI and ChatAnthropic
|
||||
# Equivalent to:
|
||||
# gpt_4o.bind_tools([...]).with_fallbacks([claude_3_sonnet.bind_tools([...])])
|
||||
llm.bind_tools([...])
|
||||
model.bind_tools([...])
|
||||
# -> RunnableWithFallbacks(
|
||||
runnable=RunnableBinding(bound=ChatOpenAI(...), kwargs={"tools": [...]}),
|
||||
fallbacks=[RunnableBinding(bound=ChatAnthropic(...), kwargs={"tools": [...]})],
|
||||
|
||||
@@ -52,7 +52,7 @@ def is_uuid(value: str) -> bool:
|
||||
value: The string to check.
|
||||
|
||||
Returns:
|
||||
True if the string is a valid UUID, False otherwise.
|
||||
`True` if the string is a valid UUID, `False` otherwise.
|
||||
"""
|
||||
try:
|
||||
UUID(value)
|
||||
@@ -69,7 +69,7 @@ class Edge(NamedTuple):
|
||||
target: str
|
||||
"""The target node id."""
|
||||
data: Stringifiable | None = None
|
||||
"""Optional data associated with the edge. Defaults to `None`."""
|
||||
"""Optional data associated with the edge. """
|
||||
conditional: bool = False
|
||||
"""Whether the edge is conditional. Defaults to `False`."""
|
||||
|
||||
@@ -77,8 +77,8 @@ class Edge(NamedTuple):
|
||||
"""Return a copy of the edge with optional new source and target nodes.
|
||||
|
||||
Args:
|
||||
source: The new source node id. Defaults to `None`.
|
||||
target: The new target node id. Defaults to `None`.
|
||||
source: The new source node id.
|
||||
target: The new target node id.
|
||||
|
||||
Returns:
|
||||
A copy of the edge with the new source and target nodes.
|
||||
@@ -101,7 +101,7 @@ class Node(NamedTuple):
|
||||
data: type[BaseModel] | RunnableType | None
|
||||
"""The data of the node."""
|
||||
metadata: dict[str, Any] | None
|
||||
"""Optional metadata for the node. Defaults to `None`."""
|
||||
"""Optional metadata for the node. """
|
||||
|
||||
def copy(
|
||||
self,
|
||||
@@ -112,8 +112,8 @@ class Node(NamedTuple):
|
||||
"""Return a copy of the node with optional new id and name.
|
||||
|
||||
Args:
|
||||
id: The new node id. Defaults to `None`.
|
||||
name: The new node name. Defaults to `None`.
|
||||
id: The new node id.
|
||||
name: The new node name.
|
||||
|
||||
Returns:
|
||||
A copy of the node with the new id and name.
|
||||
@@ -132,7 +132,7 @@ class Branch(NamedTuple):
|
||||
condition: Callable[..., str]
|
||||
"""A callable that returns a string representation of the condition."""
|
||||
ends: dict[str, str] | None
|
||||
"""Optional dictionary of end node ids for the branches. Defaults to `None`."""
|
||||
"""Optional dictionary of end node ids for the branches. """
|
||||
|
||||
|
||||
class CurveStyle(Enum):
|
||||
@@ -321,8 +321,8 @@ class Graph:
|
||||
|
||||
Args:
|
||||
data: The data of the node.
|
||||
id: The id of the node. Defaults to `None`.
|
||||
metadata: Optional metadata for the node. Defaults to `None`.
|
||||
id: The id of the node.
|
||||
metadata: Optional metadata for the node.
|
||||
|
||||
Returns:
|
||||
The node that was added to the graph.
|
||||
@@ -361,7 +361,7 @@ class Graph:
|
||||
Args:
|
||||
source: The source node of the edge.
|
||||
target: The target node of the edge.
|
||||
data: Optional data associated with the edge. Defaults to `None`.
|
||||
data: Optional data associated with the edge.
|
||||
conditional: Whether the edge is conditional. Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
@@ -549,8 +549,8 @@ class Graph:
|
||||
|
||||
Args:
|
||||
output_file_path: The path to save the image to. If `None`, the image
|
||||
is not saved. Defaults to `None`.
|
||||
fontname: The name of the font to use. Defaults to `None`.
|
||||
is not saved.
|
||||
fontname: The name of the font to use.
|
||||
labels: Optional labels for nodes and edges in the graph. Defaults to
|
||||
`None`.
|
||||
|
||||
@@ -592,7 +592,7 @@ class Graph:
|
||||
Defaults to 9.
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph. Defaults to `None`.
|
||||
added to the beginning of the mermaid graph.
|
||||
|
||||
See more here: https://mermaid.js.org/config/configuration.html.
|
||||
|
||||
@@ -652,7 +652,7 @@ class Graph:
|
||||
wrap_label_n_words: The number of words to wrap the node labels at.
|
||||
Defaults to 9.
|
||||
output_file_path: The path to save the image to. If `None`, the image
|
||||
is not saved. Defaults to `None`.
|
||||
is not saved.
|
||||
draw_method: The method to use to draw the graph.
|
||||
Defaults to MermaidDrawMethod.API.
|
||||
background_color: The color of the background. Defaults to "white".
|
||||
@@ -663,7 +663,7 @@ class Graph:
|
||||
Defaults to 1.0.
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph. Defaults to `None`.
|
||||
added to the beginning of the mermaid graph.
|
||||
|
||||
See more here: https://mermaid.js.org/config/configuration.html.
|
||||
|
||||
@@ -679,7 +679,7 @@ class Graph:
|
||||
}
|
||||
```
|
||||
base_url: The base URL of the Mermaid server for rendering via API.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
The PNG image as bytes.
|
||||
|
||||
@@ -58,15 +58,15 @@ def draw_mermaid(
|
||||
Args:
|
||||
nodes: List of node ids.
|
||||
edges: List of edges, object with a source, target and data.
|
||||
first_node: Id of the first node. Defaults to `None`.
|
||||
last_node: Id of the last node. Defaults to `None`.
|
||||
first_node: Id of the first node.
|
||||
last_node: Id of the last node.
|
||||
with_styles: Whether to include styles in the graph. Defaults to `True`.
|
||||
curve_style: Curve style for the edges. Defaults to CurveStyle.LINEAR.
|
||||
node_styles: Node colors for different types. Defaults to NodeStyles().
|
||||
wrap_label_n_words: Words to wrap the edge labels. Defaults to 9.
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph. Defaults to `None`.
|
||||
added to the beginning of the mermaid graph.
|
||||
|
||||
See more here: https://mermaid.js.org/config/configuration.html.
|
||||
|
||||
@@ -286,13 +286,13 @@ def draw_mermaid_png(
|
||||
|
||||
Args:
|
||||
mermaid_syntax: Mermaid graph syntax.
|
||||
output_file_path: Path to save the PNG image. Defaults to `None`.
|
||||
output_file_path: Path to save the PNG image.
|
||||
draw_method: Method to draw the graph. Defaults to MermaidDrawMethod.API.
|
||||
background_color: Background color of the image. Defaults to "white".
|
||||
padding: Padding around the image. Defaults to 10.
|
||||
max_retries: Maximum number of retries (MermaidDrawMethod.API). Defaults to 1.
|
||||
retry_delay: Delay between retries (MermaidDrawMethod.API). Defaults to 1.0.
|
||||
base_url: Base URL for the Mermaid.ink API. Defaults to `None`.
|
||||
base_url: Base URL for the Mermaid.ink API.
|
||||
|
||||
Returns:
|
||||
PNG image bytes.
|
||||
|
||||
@@ -45,7 +45,7 @@ class PngDrawer:
|
||||
}
|
||||
}
|
||||
The keys are the original labels, and the values are the new labels.
|
||||
Defaults to `None`.
|
||||
|
||||
"""
|
||||
self.fontname = fontname or "arial"
|
||||
self.labels = labels or LabelsDict(nodes={}, edges={})
|
||||
@@ -104,7 +104,7 @@ class PngDrawer:
|
||||
viz: The graphviz object.
|
||||
source: The source node.
|
||||
target: The target node.
|
||||
label: The label for the edge. Defaults to `None`.
|
||||
label: The label for the edge.
|
||||
conditional: Whether the edge is conditional. Defaults to `False`.
|
||||
"""
|
||||
viz.add_edge(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Module contains typedefs that are used with Runnables."""
|
||||
"""Module contains typedefs that are used with `Runnable` objects."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -14,43 +14,43 @@ class EventData(TypedDict, total=False):
|
||||
"""Data associated with a streaming event."""
|
||||
|
||||
input: Any
|
||||
"""The input passed to the Runnable that generated the event.
|
||||
"""The input passed to the `Runnable` that generated the event.
|
||||
|
||||
Inputs will sometimes be available at the *START* of the Runnable, and
|
||||
sometimes at the *END* of the Runnable.
|
||||
Inputs will sometimes be available at the *START* of the `Runnable`, and
|
||||
sometimes at the *END* of the `Runnable`.
|
||||
|
||||
If a Runnable is able to stream its inputs, then its input by definition
|
||||
won't be known until the *END* of the Runnable when it has finished streaming
|
||||
If a `Runnable` is able to stream its inputs, then its input by definition
|
||||
won't be known until the *END* of the `Runnable` when it has finished streaming
|
||||
its inputs.
|
||||
"""
|
||||
error: NotRequired[BaseException]
|
||||
"""The error that occurred during the execution of the Runnable.
|
||||
"""The error that occurred during the execution of the `Runnable`.
|
||||
|
||||
This field is only available if the Runnable raised an exception.
|
||||
This field is only available if the `Runnable` raised an exception.
|
||||
|
||||
!!! version-added "Added in version 1.0.0"
|
||||
"""
|
||||
output: Any
|
||||
"""The output of the Runnable that generated the event.
|
||||
"""The output of the `Runnable` that generated the event.
|
||||
|
||||
Outputs will only be available at the *END* of the Runnable.
|
||||
Outputs will only be available at the *END* of the `Runnable`.
|
||||
|
||||
For most Runnables, this field can be inferred from the `chunk` field,
|
||||
though there might be some exceptions for special cased Runnables (e.g., like
|
||||
For most `Runnable` objects, this field can be inferred from the `chunk` field,
|
||||
though there might be some exceptions for special a cased `Runnable` (e.g., like
|
||||
chat models), which may return more information.
|
||||
"""
|
||||
chunk: Any
|
||||
"""A streaming chunk from the output that generated the event.
|
||||
|
||||
chunks support addition in general, and adding them up should result
|
||||
in the output of the Runnable that generated the event.
|
||||
in the output of the `Runnable` that generated the event.
|
||||
"""
|
||||
|
||||
|
||||
class BaseStreamEvent(TypedDict):
|
||||
"""Streaming event.
|
||||
|
||||
Schema of a streaming event which is produced from the astream_events method.
|
||||
Schema of a streaming event which is produced from the `astream_events` method.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -94,45 +94,45 @@ class BaseStreamEvent(TypedDict):
|
||||
"""
|
||||
|
||||
event: str
|
||||
"""Event names are of the format: on_[runnable_type]_(start|stream|end).
|
||||
"""Event names are of the format: `on_[runnable_type]_(start|stream|end)`.
|
||||
|
||||
Runnable types are one of:
|
||||
|
||||
- **llm** - used by non chat models
|
||||
- **chat_model** - used by chat models
|
||||
- **prompt** -- e.g., ChatPromptTemplate
|
||||
- **tool** -- from tools defined via @tool decorator or inheriting
|
||||
from Tool/BaseTool
|
||||
- **chain** - most Runnables are of this type
|
||||
- **prompt** -- e.g., `ChatPromptTemplate`
|
||||
- **tool** -- from tools defined via `@tool` decorator or inheriting
|
||||
from `Tool`/`BaseTool`
|
||||
- **chain** - most `Runnable` objects are of this type
|
||||
|
||||
Further, the events are categorized as one of:
|
||||
|
||||
- **start** - when the Runnable starts
|
||||
- **stream** - when the Runnable is streaming
|
||||
- **end* - when the Runnable ends
|
||||
- **start** - when the `Runnable` starts
|
||||
- **stream** - when the `Runnable` is streaming
|
||||
- **end* - when the `Runnable` ends
|
||||
|
||||
start, stream and end are associated with slightly different `data` payload.
|
||||
|
||||
Please see the documentation for `EventData` for more details.
|
||||
"""
|
||||
run_id: str
|
||||
"""An randomly generated ID to keep track of the execution of the given Runnable.
|
||||
"""An randomly generated ID to keep track of the execution of the given `Runnable`.
|
||||
|
||||
Each child Runnable that gets invoked as part of the execution of a parent Runnable
|
||||
is assigned its own unique ID.
|
||||
Each child `Runnable` that gets invoked as part of the execution of a parent
|
||||
`Runnable` is assigned its own unique ID.
|
||||
"""
|
||||
tags: NotRequired[list[str]]
|
||||
"""Tags associated with the Runnable that generated this event.
|
||||
"""Tags associated with the `Runnable` that generated this event.
|
||||
|
||||
Tags are always inherited from parent Runnables.
|
||||
Tags are always inherited from parent `Runnable` objects.
|
||||
|
||||
Tags can either be bound to a Runnable using `.with_config({"tags": ["hello"]})`
|
||||
Tags can either be bound to a `Runnable` using `.with_config({"tags": ["hello"]})`
|
||||
or passed at run time using `.astream_events(..., {"tags": ["hello"]})`.
|
||||
"""
|
||||
metadata: NotRequired[dict[str, Any]]
|
||||
"""Metadata associated with the Runnable that generated this event.
|
||||
"""Metadata associated with the `Runnable` that generated this event.
|
||||
|
||||
Metadata can either be bound to a Runnable using
|
||||
Metadata can either be bound to a `Runnable` using
|
||||
|
||||
`.with_config({"metadata": { "foo": "bar" }})`
|
||||
|
||||
@@ -146,8 +146,8 @@ class BaseStreamEvent(TypedDict):
|
||||
|
||||
Root Events will have an empty list.
|
||||
|
||||
For example, if a Runnable A calls Runnable B, then the event generated by Runnable
|
||||
B will have Runnable A's ID in the parent_ids field.
|
||||
For example, if a `Runnable` A calls `Runnable` B, then the event generated by
|
||||
`Runnable` B will have `Runnable` A's ID in the `parent_ids` field.
|
||||
|
||||
The order of the parent IDs is from the root parent to the immediate parent.
|
||||
|
||||
@@ -164,7 +164,7 @@ class StandardStreamEvent(BaseStreamEvent):
|
||||
The contents of the event data depend on the event type.
|
||||
"""
|
||||
name: str
|
||||
"""The name of the Runnable that generated the event."""
|
||||
"""The name of the `Runnable` that generated the event."""
|
||||
|
||||
|
||||
class CustomStreamEvent(BaseStreamEvent):
|
||||
|
||||
@@ -80,7 +80,7 @@ def accepts_run_manager(callable: Callable[..., Any]) -> bool: # noqa: A002
|
||||
callable: The callable to check.
|
||||
|
||||
Returns:
|
||||
True if the callable accepts a run_manager argument, False otherwise.
|
||||
`True` if the callable accepts a run_manager argument, `False` otherwise.
|
||||
"""
|
||||
try:
|
||||
return signature(callable).parameters.get("run_manager") is not None
|
||||
@@ -95,7 +95,7 @@ def accepts_config(callable: Callable[..., Any]) -> bool: # noqa: A002
|
||||
callable: The callable to check.
|
||||
|
||||
Returns:
|
||||
True if the callable accepts a config argument, False otherwise.
|
||||
`True` if the callable accepts a config argument, `False` otherwise.
|
||||
"""
|
||||
try:
|
||||
return signature(callable).parameters.get("config") is not None
|
||||
@@ -110,7 +110,7 @@ def accepts_context(callable: Callable[..., Any]) -> bool: # noqa: A002
|
||||
callable: The callable to check.
|
||||
|
||||
Returns:
|
||||
True if the callable accepts a context argument, False otherwise.
|
||||
`True` if the callable accepts a context argument, `False` otherwise.
|
||||
"""
|
||||
try:
|
||||
return signature(callable).parameters.get("context") is not None
|
||||
@@ -123,7 +123,7 @@ def asyncio_accepts_context() -> bool:
|
||||
"""Cache the result of checking if asyncio.create_task accepts a `context` arg.
|
||||
|
||||
Returns:
|
||||
True if `asyncio.create_task` accepts a context argument, False otherwise.
|
||||
True if `asyncio.create_task` accepts a context argument, `False` otherwise.
|
||||
"""
|
||||
return accepts_context(asyncio.create_task)
|
||||
|
||||
@@ -552,11 +552,11 @@ class ConfigurableField(NamedTuple):
|
||||
id: str
|
||||
"""The unique identifier of the field."""
|
||||
name: str | None = None
|
||||
"""The name of the field. Defaults to `None`."""
|
||||
"""The name of the field. """
|
||||
description: str | None = None
|
||||
"""The description of the field. Defaults to `None`."""
|
||||
"""The description of the field. """
|
||||
annotation: Any | None = None
|
||||
"""The annotation of the field. Defaults to `None`."""
|
||||
"""The annotation of the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
|
||||
@@ -575,9 +575,9 @@ class ConfigurableFieldSingleOption(NamedTuple):
|
||||
default: str
|
||||
"""The default value for the field."""
|
||||
name: str | None = None
|
||||
"""The name of the field. Defaults to `None`."""
|
||||
"""The name of the field. """
|
||||
description: str | None = None
|
||||
"""The description of the field. Defaults to `None`."""
|
||||
"""The description of the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
|
||||
@@ -596,9 +596,9 @@ class ConfigurableFieldMultiOption(NamedTuple):
|
||||
default: Sequence[str]
|
||||
"""The default values for the field."""
|
||||
name: str | None = None
|
||||
"""The name of the field. Defaults to `None`."""
|
||||
"""The name of the field. """
|
||||
description: str | None = None
|
||||
"""The description of the field. Defaults to `None`."""
|
||||
"""The description of the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
|
||||
@@ -620,15 +620,15 @@ class ConfigurableFieldSpec(NamedTuple):
|
||||
annotation: Any
|
||||
"""The annotation of the field."""
|
||||
name: str | None = None
|
||||
"""The name of the field. Defaults to `None`."""
|
||||
"""The name of the field. """
|
||||
description: str | None = None
|
||||
"""The description of the field. Defaults to `None`."""
|
||||
"""The description of the field. """
|
||||
default: Any = None
|
||||
"""The default value for the field. Defaults to `None`."""
|
||||
"""The default value for the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
dependencies: list[str] | None = None
|
||||
"""The dependencies of the field. Defaults to `None`."""
|
||||
"""The dependencies of the field. """
|
||||
|
||||
|
||||
def get_unique_config_specs(
|
||||
@@ -727,7 +727,7 @@ def is_async_generator(
|
||||
func: The function to check.
|
||||
|
||||
Returns:
|
||||
True if the function is an async generator, False otherwise.
|
||||
`True` if the function is an async generator, `False` otherwise.
|
||||
"""
|
||||
return inspect.isasyncgenfunction(func) or (
|
||||
hasattr(func, "__call__") # noqa: B004
|
||||
@@ -744,7 +744,7 @@ def is_async_callable(
|
||||
func: The function to check.
|
||||
|
||||
Returns:
|
||||
True if the function is async, False otherwise.
|
||||
`True` if the function is async, `False` otherwise.
|
||||
"""
|
||||
return asyncio.iscoroutinefunction(func) or (
|
||||
hasattr(func, "__call__") # noqa: B004
|
||||
|
||||
@@ -209,7 +209,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
"""Get an iterator over keys that match the given prefix.
|
||||
|
||||
Args:
|
||||
prefix: The prefix to match. Defaults to `None`.
|
||||
prefix: The prefix to match.
|
||||
|
||||
Yields:
|
||||
The keys that match the given prefix.
|
||||
@@ -225,7 +225,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
"""Async get an async iterator over keys that match the given prefix.
|
||||
|
||||
Args:
|
||||
prefix: The prefix to match. Defaults to `None`.
|
||||
prefix: The prefix to match.
|
||||
|
||||
Yields:
|
||||
The keys that match the given prefix.
|
||||
|
||||
@@ -92,7 +92,7 @@ def _is_annotated_type(typ: type[Any]) -> bool:
|
||||
typ: The type to check.
|
||||
|
||||
Returns:
|
||||
True if the type is an Annotated type, False otherwise.
|
||||
`True` if the type is an Annotated type, `False` otherwise.
|
||||
"""
|
||||
return get_origin(typ) is typing.Annotated
|
||||
|
||||
@@ -226,7 +226,7 @@ def _is_pydantic_annotation(annotation: Any, pydantic_version: str = "v2") -> bo
|
||||
pydantic_version: The Pydantic version to check against ("v1" or "v2").
|
||||
|
||||
Returns:
|
||||
True if the annotation is a Pydantic model, False otherwise.
|
||||
`True` if the annotation is a Pydantic model, `False` otherwise.
|
||||
"""
|
||||
base_model_class = BaseModelV1 if pydantic_version == "v1" else BaseModel
|
||||
try:
|
||||
@@ -245,7 +245,7 @@ def _function_annotations_are_pydantic_v1(
|
||||
func: The function being checked.
|
||||
|
||||
Returns:
|
||||
True if all Pydantic annotations are from V1, False otherwise.
|
||||
True if all Pydantic annotations are from V1, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the function contains mixed V1 and V2 annotations.
|
||||
@@ -285,17 +285,17 @@ def create_schema_from_function(
|
||||
error_on_invalid_docstring: bool = False,
|
||||
include_injected: bool = True,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a pydantic schema from a function's signature.
|
||||
"""Create a Pydantic schema from a function's signature.
|
||||
|
||||
Args:
|
||||
model_name: Name to assign to the generated pydantic schema.
|
||||
model_name: Name to assign to the generated Pydantic schema.
|
||||
func: Function to generate the schema from.
|
||||
filter_args: Optional list of arguments to exclude from the schema.
|
||||
Defaults to FILTERED_ARGS.
|
||||
Defaults to `FILTERED_ARGS`.
|
||||
parse_docstring: Whether to parse the function's docstring for descriptions
|
||||
for each argument. Defaults to `False`.
|
||||
error_on_invalid_docstring: if `parse_docstring` is provided, configure
|
||||
whether to raise ValueError on invalid Google Style docstrings.
|
||||
whether to raise `ValueError` on invalid Google Style docstrings.
|
||||
Defaults to `False`.
|
||||
include_injected: Whether to include injected arguments in the schema.
|
||||
Defaults to `True`, since we want to include them in the schema
|
||||
@@ -312,7 +312,7 @@ def create_schema_from_function(
|
||||
# https://docs.pydantic.dev/latest/usage/validation_decorator/
|
||||
with warnings.catch_warnings():
|
||||
# We are using deprecated functionality here.
|
||||
# This code should be re-written to simply construct a pydantic model
|
||||
# This code should be re-written to simply construct a Pydantic model
|
||||
# using inspect.signature and create_model.
|
||||
warnings.simplefilter("ignore", category=PydanticDeprecationWarning)
|
||||
validated = validate_arguments(func, config=_SchemaConfig) # type: ignore[operator]
|
||||
@@ -460,13 +460,13 @@ class ChildTool(BaseTool):
|
||||
"""Callbacks to be called during tool execution."""
|
||||
|
||||
tags: list[str] | None = None
|
||||
"""Optional list of tags associated with the tool. Defaults to `None`.
|
||||
"""Optional list of tags associated with the tool.
|
||||
These tags will be associated with each call to this tool,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a tool with its use case.
|
||||
"""
|
||||
metadata: dict[str, Any] | None = None
|
||||
"""Optional metadata associated with the tool. Defaults to `None`.
|
||||
"""Optional metadata associated with the tool.
|
||||
This metadata will be associated with each call to this tool,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a tool with its use case.
|
||||
@@ -517,7 +517,7 @@ class ChildTool(BaseTool):
|
||||
"""Check if the tool accepts only a single input argument.
|
||||
|
||||
Returns:
|
||||
True if the tool has only one input argument, False otherwise.
|
||||
`True` if the tool has only one input argument, `False` otherwise.
|
||||
"""
|
||||
keys = {k for k in self.args if k != "kwargs"}
|
||||
return len(keys) == 1
|
||||
@@ -767,16 +767,16 @@ class ChildTool(BaseTool):
|
||||
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
verbose: Whether to log the tool's progress. Defaults to `None`.
|
||||
verbose: Whether to log the tool's progress.
|
||||
start_color: The color to use when starting the tool. Defaults to 'green'.
|
||||
color: The color to use when ending the tool. Defaults to 'green'.
|
||||
callbacks: Callbacks to be called during tool execution. Defaults to `None`.
|
||||
tags: Optional list of tags associated with the tool. Defaults to `None`.
|
||||
metadata: Optional metadata associated with the tool. Defaults to `None`.
|
||||
run_name: The name of the run. Defaults to `None`.
|
||||
run_id: The id of the run. Defaults to `None`.
|
||||
config: The configuration for the tool. Defaults to `None`.
|
||||
tool_call_id: The id of the tool call. Defaults to `None`.
|
||||
callbacks: Callbacks to be called during tool execution.
|
||||
tags: Optional list of tags associated with the tool.
|
||||
metadata: Optional metadata associated with the tool.
|
||||
run_name: The name of the run.
|
||||
run_id: The id of the run.
|
||||
config: The configuration for the tool.
|
||||
tool_call_id: The id of the tool call.
|
||||
**kwargs: Keyword arguments to be passed to tool callbacks (event handler)
|
||||
|
||||
Returns:
|
||||
@@ -879,16 +879,16 @@ class ChildTool(BaseTool):
|
||||
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
verbose: Whether to log the tool's progress. Defaults to `None`.
|
||||
verbose: Whether to log the tool's progress.
|
||||
start_color: The color to use when starting the tool. Defaults to 'green'.
|
||||
color: The color to use when ending the tool. Defaults to 'green'.
|
||||
callbacks: Callbacks to be called during tool execution. Defaults to `None`.
|
||||
tags: Optional list of tags associated with the tool. Defaults to `None`.
|
||||
metadata: Optional metadata associated with the tool. Defaults to `None`.
|
||||
run_name: The name of the run. Defaults to `None`.
|
||||
run_id: The id of the run. Defaults to `None`.
|
||||
config: The configuration for the tool. Defaults to `None`.
|
||||
tool_call_id: The id of the tool call. Defaults to `None`.
|
||||
callbacks: Callbacks to be called during tool execution.
|
||||
tags: Optional list of tags associated with the tool.
|
||||
metadata: Optional metadata associated with the tool.
|
||||
run_name: The name of the run.
|
||||
run_id: The id of the run.
|
||||
config: The configuration for the tool.
|
||||
tool_call_id: The id of the tool call.
|
||||
**kwargs: Keyword arguments to be passed to tool callbacks
|
||||
|
||||
Returns:
|
||||
@@ -981,7 +981,7 @@ def _is_tool_call(x: Any) -> bool:
|
||||
x: The input to check.
|
||||
|
||||
Returns:
|
||||
True if the input is a tool call, False otherwise.
|
||||
`True` if the input is a tool call, `False` otherwise.
|
||||
"""
|
||||
return isinstance(x, dict) and x.get("type") == "tool_call"
|
||||
|
||||
@@ -1128,7 +1128,7 @@ def _is_message_content_type(obj: Any) -> bool:
|
||||
obj: The object to check.
|
||||
|
||||
Returns:
|
||||
True if the object is valid message content, False otherwise.
|
||||
`True` if the object is valid message content, `False` otherwise.
|
||||
"""
|
||||
return isinstance(obj, str) or (
|
||||
isinstance(obj, list) and all(_is_message_content_block(e) for e in obj)
|
||||
@@ -1144,7 +1144,7 @@ def _is_message_content_block(obj: Any) -> bool:
|
||||
obj: The object to check.
|
||||
|
||||
Returns:
|
||||
True if the object is a valid content block, False otherwise.
|
||||
`True` if the object is a valid content block, `False` otherwise.
|
||||
"""
|
||||
if isinstance(obj, str):
|
||||
return True
|
||||
@@ -1248,7 +1248,7 @@ def _is_injected_arg_type(
|
||||
injected_type: The specific injected type to check for.
|
||||
|
||||
Returns:
|
||||
True if the type is an injected argument, False otherwise.
|
||||
`True` if the type is an injected argument, `False` otherwise.
|
||||
"""
|
||||
injected_type = injected_type or InjectedToolArg
|
||||
return any(
|
||||
|
||||
@@ -101,7 +101,7 @@ def tool(
|
||||
return_direct: Whether to return directly from the tool rather
|
||||
than continuing the agent loop. Defaults to `False`.
|
||||
args_schema: optional argument schema for user to specify.
|
||||
Defaults to `None`.
|
||||
|
||||
infer_schema: Whether to infer the schema of the arguments from
|
||||
the function's signature. This also makes the resultant tool
|
||||
accept a dictionary input to its `run()` function.
|
||||
@@ -397,10 +397,10 @@ def convert_runnable_to_tool(
|
||||
|
||||
Args:
|
||||
runnable: The runnable to convert.
|
||||
args_schema: The schema for the tool's input arguments. Defaults to `None`.
|
||||
name: The name of the tool. Defaults to `None`.
|
||||
description: The description of the tool. Defaults to `None`.
|
||||
arg_types: The types of the arguments. Defaults to `None`.
|
||||
args_schema: The schema for the tool's input arguments.
|
||||
name: The name of the tool.
|
||||
description: The description of the tool.
|
||||
arg_types: The types of the arguments.
|
||||
|
||||
Returns:
|
||||
The tool.
|
||||
|
||||
@@ -81,7 +81,7 @@ def create_retriever_tool(
|
||||
so should be unique and somewhat descriptive.
|
||||
description: The description for the tool. This will be passed to the language
|
||||
model, so should be descriptive.
|
||||
document_prompt: The prompt to use for the document. Defaults to `None`.
|
||||
document_prompt: The prompt to use for the document.
|
||||
document_separator: The separator to use between documents. Defaults to "\n\n".
|
||||
response_format: The tool response format. If "content" then the output of
|
||||
the tool is interpreted as the contents of a ToolMessage. If
|
||||
|
||||
@@ -69,7 +69,7 @@ class Tool(BaseTool):
|
||||
def _to_args_and_kwargs(
|
||||
self, tool_input: str | dict, tool_call_id: str | None
|
||||
) -> tuple[tuple, dict]:
|
||||
"""Convert tool input to pydantic model.
|
||||
"""Convert tool input to Pydantic model.
|
||||
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
@@ -79,8 +79,7 @@ class Tool(BaseTool):
|
||||
ToolException: If the tool input is invalid.
|
||||
|
||||
Returns:
|
||||
the pydantic model args and kwargs.
|
||||
|
||||
The Pydantic model args and kwargs.
|
||||
"""
|
||||
args, kwargs = super()._to_args_and_kwargs(tool_input, tool_call_id)
|
||||
# For backwards compatibility. The tool must be run with a single input
|
||||
@@ -178,8 +177,8 @@ class Tool(BaseTool):
|
||||
name: The name of the tool.
|
||||
description: The description of the tool.
|
||||
return_direct: Whether to return the output directly. Defaults to `False`.
|
||||
args_schema: The schema of the tool's input arguments. Defaults to `None`.
|
||||
coroutine: The asynchronous version of the function. Defaults to `None`.
|
||||
args_schema: The schema of the tool's input arguments.
|
||||
coroutine: The asynchronous version of the function.
|
||||
**kwargs: Additional arguments to pass to the tool.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -150,7 +150,7 @@ class StructuredTool(BaseTool):
|
||||
Defaults to the function docstring.
|
||||
return_direct: Whether to return the result directly or as a callback.
|
||||
Defaults to `False`.
|
||||
args_schema: The schema of the tool's input arguments. Defaults to `None`.
|
||||
args_schema: The schema of the tool's input arguments.
|
||||
infer_schema: Whether to infer the schema from the function's signature.
|
||||
Defaults to `True`.
|
||||
response_format: The tool response format. If "content" then the output of
|
||||
|
||||
@@ -67,9 +67,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
serialized: The serialized model.
|
||||
messages: The messages to start the chat with.
|
||||
run_id: The run ID.
|
||||
tags: The tags for the run. Defaults to `None`.
|
||||
parent_run_id: The parent run ID. Defaults to `None`.
|
||||
metadata: The metadata for the run. Defaults to `None`.
|
||||
tags: The tags for the run.
|
||||
parent_run_id: The parent run ID.
|
||||
metadata: The metadata for the run.
|
||||
name: The name of the run.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
@@ -108,9 +108,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
serialized: The serialized model.
|
||||
prompts: The prompts to start the LLM with.
|
||||
run_id: The run ID.
|
||||
tags: The tags for the run. Defaults to `None`.
|
||||
parent_run_id: The parent run ID. Defaults to `None`.
|
||||
metadata: The metadata for the run. Defaults to `None`.
|
||||
tags: The tags for the run.
|
||||
parent_run_id: The parent run ID.
|
||||
metadata: The metadata for the run.
|
||||
name: The name of the run.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
@@ -145,9 +145,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
|
||||
Args:
|
||||
token: The token.
|
||||
chunk: The chunk. Defaults to `None`.
|
||||
chunk: The chunk.
|
||||
run_id: The run ID.
|
||||
parent_run_id: The parent run ID. Defaults to `None`.
|
||||
parent_run_id: The parent run ID.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
@@ -255,10 +255,10 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
serialized: The serialized chain.
|
||||
inputs: The inputs for the chain.
|
||||
run_id: The run ID.
|
||||
tags: The tags for the run. Defaults to `None`.
|
||||
parent_run_id: The parent run ID. Defaults to `None`.
|
||||
metadata: The metadata for the run. Defaults to `None`.
|
||||
run_type: The type of the run. Defaults to `None`.
|
||||
tags: The tags for the run.
|
||||
parent_run_id: The parent run ID.
|
||||
metadata: The metadata for the run.
|
||||
run_type: The type of the run.
|
||||
name: The name of the run.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
@@ -294,7 +294,7 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
Args:
|
||||
outputs: The outputs for the chain.
|
||||
run_id: The run ID.
|
||||
inputs: The inputs for the chain. Defaults to `None`.
|
||||
inputs: The inputs for the chain.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
@@ -322,7 +322,7 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
|
||||
Args:
|
||||
error: The error.
|
||||
inputs: The inputs for the chain. Defaults to `None`.
|
||||
inputs: The inputs for the chain.
|
||||
run_id: The run ID.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
@@ -357,9 +357,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
serialized: The serialized tool.
|
||||
input_str: The input string.
|
||||
run_id: The run ID.
|
||||
tags: The tags for the run. Defaults to `None`.
|
||||
parent_run_id: The parent run ID. Defaults to `None`.
|
||||
metadata: The metadata for the run. Defaults to `None`.
|
||||
tags: The tags for the run.
|
||||
parent_run_id: The parent run ID.
|
||||
metadata: The metadata for the run.
|
||||
name: The name of the run.
|
||||
inputs: The inputs for the tool.
|
||||
**kwargs: Additional arguments.
|
||||
@@ -446,9 +446,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
||||
serialized: The serialized retriever.
|
||||
query: The query.
|
||||
run_id: The run ID.
|
||||
parent_run_id: The parent run ID. Defaults to `None`.
|
||||
tags: The tags for the run. Defaults to `None`.
|
||||
metadata: The metadata for the run. Defaults to `None`.
|
||||
parent_run_id: The parent run ID.
|
||||
tags: The tags for the run.
|
||||
metadata: The metadata for the run.
|
||||
name: The name of the run.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
|
||||
@@ -48,9 +48,9 @@ def tracing_v2_enabled(
|
||||
|
||||
Args:
|
||||
project_name: The name of the project. Defaults to `'default'`.
|
||||
example_id: The ID of the example. Defaults to `None`.
|
||||
tags: The tags to add to the run. Defaults to `None`.
|
||||
client: The client of the langsmith. Defaults to `None`.
|
||||
example_id: The ID of the example.
|
||||
tags: The tags to add to the run.
|
||||
client: The client of the langsmith.
|
||||
|
||||
Yields:
|
||||
The LangChain tracer.
|
||||
|
||||
@@ -134,10 +134,10 @@ class LangChainTracer(BaseTracer):
|
||||
serialized: The serialized model.
|
||||
messages: The messages.
|
||||
run_id: The run ID.
|
||||
tags: The tags. Defaults to `None`.
|
||||
parent_run_id: The parent run ID. Defaults to `None`.
|
||||
metadata: The metadata. Defaults to `None`.
|
||||
name: The name. Defaults to `None`.
|
||||
tags: The tags.
|
||||
parent_run_id: The parent run ID.
|
||||
metadata: The metadata.
|
||||
name: The name.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -190,7 +190,7 @@ class RunLog(RunLogPatch):
|
||||
other: The other `RunLog` to compare to.
|
||||
|
||||
Returns:
|
||||
True if the `RunLog`s are equal, False otherwise.
|
||||
`True` if the `RunLog`s are equal, `False` otherwise.
|
||||
"""
|
||||
# First compare that the state is the same
|
||||
if not isinstance(other, RunLog):
|
||||
@@ -288,7 +288,7 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
|
||||
*ops: The operations to send to the stream.
|
||||
|
||||
Returns:
|
||||
True if the patch was sent successfully, False if the stream is closed.
|
||||
`True` if the patch was sent successfully, False if the stream is closed.
|
||||
"""
|
||||
# We will likely want to wrap this in try / except at some point
|
||||
# to handle exceptions that might arise at run time.
|
||||
@@ -368,7 +368,7 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
|
||||
run: The Run to check.
|
||||
|
||||
Returns:
|
||||
True if the run should be included, False otherwise.
|
||||
`True` if the run should be included, `False` otherwise.
|
||||
"""
|
||||
if run.id == self.root_id:
|
||||
return False
|
||||
|
||||
@@ -203,7 +203,7 @@ class Tee(Generic[T]):
|
||||
iterable: The iterable to split.
|
||||
n: The number of iterators to create. Defaults to 2.
|
||||
lock: The lock to synchronise access to the shared buffers.
|
||||
Defaults to `None`.
|
||||
|
||||
"""
|
||||
self._iterator = iterable.__aiter__() # before 3.10 aiter() doesn't exist
|
||||
self._buffers: list[deque[T]] = [deque() for _ in range(n)]
|
||||
|
||||
@@ -13,7 +13,7 @@ def env_var_is_set(env_var: str) -> bool:
|
||||
env_var: The name of the environment variable.
|
||||
|
||||
Returns:
|
||||
True if the environment variable is set, False otherwise.
|
||||
`True` if the environment variable is set, `False` otherwise.
|
||||
"""
|
||||
return env_var in os.environ and os.environ[env_var] not in {
|
||||
"",
|
||||
@@ -38,7 +38,7 @@ def get_from_dict_or_env(
|
||||
env_key: The environment variable to look up if the key is not
|
||||
in the dictionary.
|
||||
default: The default value to return if the key is not in the dictionary
|
||||
or the environment. Defaults to `None`.
|
||||
or the environment.
|
||||
|
||||
Returns:
|
||||
The dict value or the environment variable value.
|
||||
@@ -64,7 +64,7 @@ def get_from_env(key: str, env_key: str, default: str | None = None) -> str:
|
||||
env_key: The environment variable to look up if the key is not
|
||||
in the dictionary.
|
||||
default: The default value to return if the key is not in the dictionary
|
||||
or the environment. Defaults to `None`.
|
||||
or the environment.
|
||||
|
||||
Returns:
|
||||
The value of the key.
|
||||
|
||||
@@ -27,7 +27,7 @@ from pydantic.v1 import create_model as create_model_v1
|
||||
from typing_extensions import TypedDict, is_typeddict
|
||||
|
||||
import langchain_core
|
||||
from langchain_core._api import beta, deprecated
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
|
||||
from langchain_core.utils.json_schema import dereference_refs
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
@@ -168,42 +168,6 @@ def _convert_pydantic_to_openai_function(
|
||||
)
|
||||
|
||||
|
||||
convert_pydantic_to_openai_function = deprecated(
|
||||
"0.1.16",
|
||||
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
||||
removal="1.0",
|
||||
)(_convert_pydantic_to_openai_function)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"0.1.16",
|
||||
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
||||
removal="1.0",
|
||||
)
|
||||
def convert_pydantic_to_openai_tool(
|
||||
model: type[BaseModel],
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
) -> ToolDescription:
|
||||
"""Converts a Pydantic model to a function description for the OpenAI API.
|
||||
|
||||
Args:
|
||||
model: The Pydantic model to convert.
|
||||
name: The name of the function. If not provided, the title of the schema will be
|
||||
used.
|
||||
description: The description of the function. If not provided, the description
|
||||
of the schema will be used.
|
||||
|
||||
Returns:
|
||||
The tool description.
|
||||
"""
|
||||
function = _convert_pydantic_to_openai_function(
|
||||
model, name=name, description=description
|
||||
)
|
||||
return {"type": "function", "function": function}
|
||||
|
||||
|
||||
def _get_python_function_name(function: Callable) -> str:
|
||||
"""Get the name of a Python function."""
|
||||
return function.__name__
|
||||
@@ -240,13 +204,6 @@ def _convert_python_function_to_openai_function(
|
||||
)
|
||||
|
||||
|
||||
convert_python_function_to_openai_function = deprecated(
|
||||
"0.1.16",
|
||||
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
||||
removal="1.0",
|
||||
)(_convert_python_function_to_openai_function)
|
||||
|
||||
|
||||
def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescription:
|
||||
visited: dict = {}
|
||||
|
||||
@@ -368,31 +325,6 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
|
||||
}
|
||||
|
||||
|
||||
format_tool_to_openai_function = deprecated(
|
||||
"0.1.16",
|
||||
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
||||
removal="1.0",
|
||||
)(_format_tool_to_openai_function)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"0.1.16",
|
||||
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
||||
removal="1.0",
|
||||
)
|
||||
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
|
||||
"""Format tool into the OpenAI function API.
|
||||
|
||||
Args:
|
||||
tool: The tool to format.
|
||||
|
||||
Returns:
|
||||
The tool description.
|
||||
"""
|
||||
function = _format_tool_to_openai_function(tool)
|
||||
return {"type": "function", "function": function}
|
||||
|
||||
|
||||
def convert_to_openai_function(
|
||||
function: dict[str, Any] | type | Callable | BaseTool,
|
||||
*,
|
||||
@@ -665,7 +597,7 @@ def tool_example_to_messages(
|
||||
tool_calls: Tool calls represented as Pydantic BaseModels
|
||||
tool_outputs: Tool call outputs.
|
||||
Does not need to be provided. If not provided, a placeholder value
|
||||
will be inserted. Defaults to `None`.
|
||||
will be inserted.
|
||||
ai_response: If provided, content for a final `AIMessage`.
|
||||
|
||||
Returns:
|
||||
@@ -713,7 +645,7 @@ def tool_example_to_messages(
|
||||
"type": "function",
|
||||
"function": {
|
||||
# The name of the function right now corresponds to the name
|
||||
# of the pydantic model. This is implicit in the API right now,
|
||||
# of the Pydantic model. This is implicit in the API right now,
|
||||
# and will be improved over time.
|
||||
"name": tool_call.__class__.__name__,
|
||||
"arguments": tool_call.model_dump_json(),
|
||||
|
||||
@@ -65,9 +65,9 @@ def print_text(
|
||||
|
||||
Args:
|
||||
text: The text to print.
|
||||
color: The color to use. Defaults to `None`.
|
||||
color: The color to use.
|
||||
end: The end character to use. Defaults to "".
|
||||
file: The file to write to. Defaults to `None`.
|
||||
file: The file to write to.
|
||||
"""
|
||||
text_to_print = get_colored_text(text, color) if color else text
|
||||
print(text_to_print, end=end, file=file)
|
||||
|
||||
@@ -7,6 +7,6 @@ def is_interactive_env() -> bool:
|
||||
"""Determine if running within IPython or Jupyter.
|
||||
|
||||
Returns:
|
||||
True if running in an interactive environment, False otherwise.
|
||||
True if running in an interactive environment, `False` otherwise.
|
||||
"""
|
||||
return hasattr(sys, "ps2")
|
||||
|
||||
@@ -139,7 +139,7 @@ class Tee(Generic[T]):
|
||||
iterable: The iterable to split.
|
||||
n: The number of iterators to create. Defaults to 2.
|
||||
lock: The lock to synchronise access to the shared buffers.
|
||||
Defaults to `None`.
|
||||
|
||||
"""
|
||||
self._iterator = iter(iterable)
|
||||
self._buffers: list[deque[T]] = [deque() for _ in range(n)]
|
||||
|
||||
@@ -78,7 +78,7 @@ def is_pydantic_v1_subclass(cls: type) -> bool:
|
||||
"""Check if the given class is Pydantic v1-like.
|
||||
|
||||
Returns:
|
||||
True if the given class is a subclass of Pydantic `BaseModel` 1.x.
|
||||
`True` if the given class is a subclass of Pydantic `BaseModel` 1.x.
|
||||
"""
|
||||
return issubclass(cls, BaseModelV1)
|
||||
|
||||
@@ -87,7 +87,7 @@ def is_pydantic_v2_subclass(cls: type) -> bool:
|
||||
"""Check if the given class is Pydantic v2-like.
|
||||
|
||||
Returns:
|
||||
True if the given class is a subclass of Pydantic BaseModel 2.x.
|
||||
`True` if the given class is a subclass of Pydantic BaseModel 2.x.
|
||||
"""
|
||||
return issubclass(cls, BaseModel)
|
||||
|
||||
@@ -101,7 +101,7 @@ def is_basemodel_subclass(cls: type) -> bool:
|
||||
* pydantic.v1.BaseModel in Pydantic 2.x
|
||||
|
||||
Returns:
|
||||
True if the given class is a subclass of Pydantic `BaseModel`.
|
||||
`True` if the given class is a subclass of Pydantic `BaseModel`.
|
||||
"""
|
||||
# Before we can use issubclass on the cls we need to check if it is a class
|
||||
if not inspect.isclass(cls) or isinstance(cls, GenericAlias):
|
||||
@@ -119,7 +119,7 @@ def is_basemodel_instance(obj: Any) -> bool:
|
||||
* pydantic.v1.BaseModel in Pydantic 2.x
|
||||
|
||||
Returns:
|
||||
True if the given class is an instance of Pydantic `BaseModel`.
|
||||
`True` if the given class is an instance of Pydantic `BaseModel`.
|
||||
"""
|
||||
return isinstance(obj, (BaseModel, BaseModelV1))
|
||||
|
||||
@@ -206,7 +206,7 @@ def _create_subset_model_v1(
|
||||
descriptions: dict | None = None,
|
||||
fn_description: str | None = None,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a pydantic model with only a subset of model's fields."""
|
||||
"""Create a Pydantic model with only a subset of model's fields."""
|
||||
fields = {}
|
||||
|
||||
for field_name in field_names:
|
||||
@@ -235,7 +235,7 @@ def _create_subset_model_v2(
|
||||
descriptions: dict | None = None,
|
||||
fn_description: str | None = None,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a pydantic model with a subset of the model fields."""
|
||||
"""Create a Pydantic model with a subset of the model fields."""
|
||||
descriptions_ = descriptions or {}
|
||||
fields = {}
|
||||
for field_name in field_names:
|
||||
@@ -438,9 +438,9 @@ def create_model(
|
||||
/,
|
||||
**field_definitions: Any,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a pydantic model with the given field definitions.
|
||||
"""Create a Pydantic model with the given field definitions.
|
||||
|
||||
Please use create_model_v2 instead of this function.
|
||||
Please use `create_model_v2` instead of this function.
|
||||
|
||||
Args:
|
||||
model_name: The name of the model.
|
||||
@@ -511,7 +511,7 @@ def create_model_v2(
|
||||
field_definitions: dict[str, Any] | None = None,
|
||||
root: Any | None = None,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a pydantic model with the given field definitions.
|
||||
"""Create a Pydantic model with the given field definitions.
|
||||
|
||||
Attention:
|
||||
Please do not use outside of langchain packages. This API
|
||||
@@ -522,7 +522,7 @@ def create_model_v2(
|
||||
module_name: The name of the module where the model is defined.
|
||||
This is used by Pydantic to resolve any forward references.
|
||||
field_definitions: The field definitions for the model.
|
||||
root: Type for a root model (RootModel)
|
||||
root: Type for a root model (`RootModel`)
|
||||
|
||||
Returns:
|
||||
The created model.
|
||||
|
||||
@@ -123,8 +123,8 @@ def guard_import(
|
||||
|
||||
Args:
|
||||
module_name: The name of the module to import.
|
||||
pip_name: The name of the module to install with pip. Defaults to `None`.
|
||||
package: The package to import the module from. Defaults to `None`.
|
||||
pip_name: The name of the module to install with pip.
|
||||
package: The package to import the module from.
|
||||
|
||||
Returns:
|
||||
The imported module.
|
||||
@@ -155,11 +155,11 @@ def check_package_version(
|
||||
|
||||
Args:
|
||||
package: The name of the package.
|
||||
lt_version: The version must be less than this. Defaults to `None`.
|
||||
lte_version: The version must be less than or equal to this. Defaults to `None`.
|
||||
gt_version: The version must be greater than this. Defaults to `None`.
|
||||
lt_version: The version must be less than this.
|
||||
lte_version: The version must be less than or equal to this.
|
||||
gt_version: The version must be greater than this.
|
||||
gte_version: The version must be greater than or equal to this.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: If the package version does not meet the requirements.
|
||||
|
||||
@@ -57,7 +57,7 @@ def _content_blocks_equal_ignore_id(
|
||||
expected: Expected content to compare against (string or list of blocks).
|
||||
|
||||
Returns:
|
||||
True if content matches (excluding `id` fields), False otherwise.
|
||||
True if content matches (excluding `id` fields), `False` otherwise.
|
||||
|
||||
"""
|
||||
if isinstance(actual, str) or isinstance(expected, str):
|
||||
|
||||
@@ -486,8 +486,10 @@ def test_provider_warns() -> None:
|
||||
# implemented.
|
||||
# This test should be removed when all major providers support content block
|
||||
# standardization.
|
||||
message = AIMessage("Hello.", response_metadata={"model_provider": "groq"})
|
||||
with pytest.warns(match="not yet fully supported for Groq"):
|
||||
message = AIMessage(
|
||||
"Hello.", response_metadata={"model_provider": "google_vertexai"}
|
||||
)
|
||||
with pytest.warns(match="not yet fully supported for Google VertexAI"):
|
||||
content_blocks = message.content_blocks
|
||||
|
||||
assert content_blocks == [{"type": "text", "text": "Hello."}]
|
||||
|
||||
32
libs/core/tests/unit_tests/prompts/test_string.py
Normal file
32
libs/core/tests/unit_tests/prompts/test_string.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import pytest
|
||||
from packaging import version
|
||||
|
||||
from langchain_core.prompts.string import mustache_schema
|
||||
from langchain_core.utils.pydantic import PYDANTIC_VERSION
|
||||
|
||||
PYDANTIC_VERSION_AT_LEAST_29 = version.parse("2.9") <= PYDANTIC_VERSION
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not PYDANTIC_VERSION_AT_LEAST_29,
|
||||
reason=(
|
||||
"Only test with most recent version of pydantic. "
|
||||
"Pydantic introduced small fixes to generated JSONSchema on minor versions."
|
||||
),
|
||||
)
|
||||
def test_mustache_schema_parent_child() -> None:
|
||||
template = "{{x.y}} {{x}}"
|
||||
expected = {
|
||||
"$defs": {
|
||||
"x": {
|
||||
"properties": {"y": {"default": None, "title": "Y", "type": "string"}},
|
||||
"title": "x",
|
||||
"type": "object",
|
||||
}
|
||||
},
|
||||
"properties": {"x": {"$ref": "#/$defs/x", "default": None}},
|
||||
"title": "PromptInput",
|
||||
"type": "object",
|
||||
}
|
||||
actual = mustache_schema(template).model_json_schema()
|
||||
assert expected == actual
|
||||
@@ -1934,11 +1934,10 @@ def test_args_schema_as_pydantic(pydantic_model: Any) -> None:
|
||||
|
||||
|
||||
def test_args_schema_explicitly_typed() -> None:
|
||||
"""This should test that one can type the args schema as a pydantic model.
|
||||
|
||||
Please note that this will test using pydantic 2 even though BaseTool
|
||||
is a pydantic 1 model!
|
||||
"""This should test that one can type the args schema as a Pydantic model.
|
||||
|
||||
Please note that this will test using pydantic 2 even though `BaseTool`
|
||||
is a Pydantic 1 model!
|
||||
"""
|
||||
|
||||
class Foo(BaseModel):
|
||||
@@ -1981,7 +1980,7 @@ def test_args_schema_explicitly_typed() -> None:
|
||||
|
||||
@pytest.mark.parametrize("pydantic_model", TEST_MODELS)
|
||||
def test_structured_tool_with_different_pydantic_versions(pydantic_model: Any) -> None:
|
||||
"""This should test that one can type the args schema as a pydantic model."""
|
||||
"""This should test that one can type the args schema as a Pydantic model."""
|
||||
|
||||
def foo(a: int, b: str) -> str:
|
||||
"""Hahaha."""
|
||||
|
||||
@@ -7,9 +7,9 @@ from langchain_core.tracers.context import collect_runs
|
||||
|
||||
|
||||
def test_collect_runs() -> None:
|
||||
llm = FakeListLLM(responses=["hello"])
|
||||
model = FakeListLLM(responses=["hello"])
|
||||
with collect_runs() as cb:
|
||||
llm.invoke("hi")
|
||||
model.invoke("hi")
|
||||
assert cb.traced_runs
|
||||
assert len(cb.traced_runs) == 1
|
||||
assert isinstance(cb.traced_runs[0].id, uuid.UUID)
|
||||
|
||||
@@ -124,7 +124,7 @@ class BaseSingleActionAgent(BaseModel):
|
||||
along with observations.
|
||||
|
||||
Returns:
|
||||
AgentFinish: Agent finish object.
|
||||
Agent finish object.
|
||||
|
||||
Raises:
|
||||
ValueError: If `early_stopping_method` is not supported.
|
||||
@@ -155,7 +155,7 @@ class BaseSingleActionAgent(BaseModel):
|
||||
kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
BaseSingleActionAgent: Agent object.
|
||||
Agent object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -169,7 +169,7 @@ class BaseSingleActionAgent(BaseModel):
|
||||
"""Return dictionary representation of agent.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary representation of agent.
|
||||
Dictionary representation of agent.
|
||||
"""
|
||||
_dict = super().model_dump()
|
||||
try:
|
||||
@@ -233,7 +233,7 @@ class BaseMultiActionAgent(BaseModel):
|
||||
"""Get allowed tools.
|
||||
|
||||
Returns:
|
||||
list[str] | None: Allowed tools.
|
||||
Allowed tools.
|
||||
"""
|
||||
return None
|
||||
|
||||
@@ -297,7 +297,7 @@ class BaseMultiActionAgent(BaseModel):
|
||||
along with observations.
|
||||
|
||||
Returns:
|
||||
AgentFinish: Agent finish object.
|
||||
Agent finish object.
|
||||
|
||||
Raises:
|
||||
ValueError: If `early_stopping_method` is not supported.
|
||||
@@ -388,8 +388,7 @@ class MultiActionAgentOutputParser(
|
||||
text: Text to parse.
|
||||
|
||||
Returns:
|
||||
Union[List[AgentAction], AgentFinish]:
|
||||
List of agent actions or agent finish.
|
||||
List of agent actions or agent finish.
|
||||
"""
|
||||
|
||||
|
||||
@@ -812,7 +811,7 @@ class Agent(BaseSingleActionAgent):
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Full inputs for the LLMChain.
|
||||
Full inputs for the LLMChain.
|
||||
"""
|
||||
thoughts = self._construct_scratchpad(intermediate_steps)
|
||||
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
|
||||
@@ -834,7 +833,7 @@ class Agent(BaseSingleActionAgent):
|
||||
values: Values to validate.
|
||||
|
||||
Returns:
|
||||
Dict: Validated values.
|
||||
Validated values.
|
||||
|
||||
Raises:
|
||||
ValueError: If `agent_scratchpad` is not in prompt.input_variables
|
||||
@@ -875,7 +874,7 @@ class Agent(BaseSingleActionAgent):
|
||||
tools: Tools to use.
|
||||
|
||||
Returns:
|
||||
BasePromptTemplate: Prompt template.
|
||||
Prompt template.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@@ -910,7 +909,7 @@ class Agent(BaseSingleActionAgent):
|
||||
kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
Agent: Agent object.
|
||||
Agent object.
|
||||
"""
|
||||
cls._validate_tools(tools)
|
||||
llm_chain = LLMChain(
|
||||
@@ -942,7 +941,7 @@ class Agent(BaseSingleActionAgent):
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
AgentFinish: Agent finish object.
|
||||
Agent finish object.
|
||||
|
||||
Raises:
|
||||
ValueError: If `early_stopping_method` is not in ['force', 'generate'].
|
||||
@@ -1082,7 +1081,7 @@ class AgentExecutor(Chain):
|
||||
kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
AgentExecutor: Agent executor object.
|
||||
Agent executor object.
|
||||
"""
|
||||
return cls(
|
||||
agent=agent,
|
||||
@@ -1099,7 +1098,7 @@ class AgentExecutor(Chain):
|
||||
values: Values to validate.
|
||||
|
||||
Returns:
|
||||
Dict: Validated values.
|
||||
Validated values.
|
||||
|
||||
Raises:
|
||||
ValueError: If allowed tools are different than provided tools.
|
||||
@@ -1126,7 +1125,7 @@ class AgentExecutor(Chain):
|
||||
values: Values to validate.
|
||||
|
||||
Returns:
|
||||
Dict: Validated values.
|
||||
Validated values.
|
||||
"""
|
||||
agent = values.get("agent")
|
||||
if agent and isinstance(agent, Runnable):
|
||||
@@ -1209,7 +1208,7 @@ class AgentExecutor(Chain):
|
||||
async_: Whether to run async. (Ignored)
|
||||
|
||||
Returns:
|
||||
AgentExecutorIterator: Agent executor iterator object.
|
||||
Agent executor iterator object.
|
||||
"""
|
||||
return AgentExecutorIterator(
|
||||
self,
|
||||
@@ -1244,7 +1243,7 @@ class AgentExecutor(Chain):
|
||||
name: Name of tool.
|
||||
|
||||
Returns:
|
||||
BaseTool: Tool object.
|
||||
Tool object.
|
||||
"""
|
||||
return {tool.name: tool for tool in self.tools}[name]
|
||||
|
||||
@@ -1759,7 +1758,7 @@ class AgentExecutor(Chain):
|
||||
kwargs: Additional arguments.
|
||||
|
||||
Yields:
|
||||
AddableDict: Addable dictionary.
|
||||
Addable dictionary.
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
iterator = AgentExecutorIterator(
|
||||
@@ -1790,7 +1789,7 @@ class AgentExecutor(Chain):
|
||||
kwargs: Additional arguments.
|
||||
|
||||
Yields:
|
||||
AddableDict: Addable dictionary.
|
||||
Addable dictionary.
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
iterator = AgentExecutorIterator(
|
||||
|
||||
@@ -58,7 +58,7 @@ def create_vectorstore_agent(
|
||||
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
|
||||
vector_store = InMemoryVectorStore.from_texts(
|
||||
[
|
||||
@@ -74,7 +74,7 @@ def create_vectorstore_agent(
|
||||
"Fetches information about pets.",
|
||||
)
|
||||
|
||||
agent = create_react_agent(llm, [tool])
|
||||
agent = create_react_agent(model, [tool])
|
||||
|
||||
for step in agent.stream(
|
||||
{"messages": [("human", "What are dogs known for?")]},
|
||||
@@ -86,7 +86,7 @@ def create_vectorstore_agent(
|
||||
Args:
|
||||
llm: LLM that will be used by the agent
|
||||
toolkit: Set of tools for the agent
|
||||
callback_manager: Object to handle the callback [ Defaults to `None`. ]
|
||||
callback_manager: Object to handle the callback
|
||||
prefix: The prefix prompt for the agent. If not provided uses default PREFIX.
|
||||
verbose: If you want to see the content of the scratchpad.
|
||||
[ Defaults to `False` ]
|
||||
@@ -156,7 +156,7 @@ def create_vectorstore_router_agent(
|
||||
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
|
||||
pet_vector_store = InMemoryVectorStore.from_texts(
|
||||
[
|
||||
@@ -187,7 +187,7 @@ def create_vectorstore_router_agent(
|
||||
),
|
||||
]
|
||||
|
||||
agent = create_react_agent(llm, tools)
|
||||
agent = create_react_agent(model, tools)
|
||||
|
||||
for step in agent.stream(
|
||||
{"messages": [("human", "Tell me about carrots.")]},
|
||||
@@ -200,7 +200,7 @@ def create_vectorstore_router_agent(
|
||||
llm: LLM that will be used by the agent
|
||||
toolkit: Set of tools for the agent which have routing capability with multiple
|
||||
vector stores
|
||||
callback_manager: Object to handle the callback [ Defaults to `None`. ]
|
||||
callback_manager: Object to handle the callback
|
||||
prefix: The prefix prompt for the router agent.
|
||||
If not provided uses default ROUTER_PREFIX.
|
||||
verbose: If you want to see the content of the scratchpad.
|
||||
|
||||
@@ -91,8 +91,8 @@ class ConversationalChatAgent(Agent):
|
||||
Defaults to the PREFIX.
|
||||
human_message: The human message to use.
|
||||
Defaults to the SUFFIX.
|
||||
input_variables: The input variables to use. Defaults to `None`.
|
||||
output_parser: The output parser to use. Defaults to `None`.
|
||||
input_variables: The input variables to use.
|
||||
output_parser: The output parser to use.
|
||||
|
||||
Returns:
|
||||
A PromptTemplate.
|
||||
|
||||
@@ -16,7 +16,7 @@ def format_log_to_str(
|
||||
Defaults to "Thought: ".
|
||||
|
||||
Returns:
|
||||
str: The scratchpad.
|
||||
The scratchpad.
|
||||
"""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
|
||||
@@ -14,7 +14,7 @@ def format_log_to_messages(
|
||||
Defaults to "{observation}".
|
||||
|
||||
Returns:
|
||||
List[BaseMessage]: The scratchpad.
|
||||
The scratchpad.
|
||||
"""
|
||||
thoughts: list[BaseMessage] = []
|
||||
for action, observation in intermediate_steps:
|
||||
|
||||
@@ -38,14 +38,14 @@ def initialize_agent(
|
||||
tools: List of tools this agent has access to.
|
||||
llm: Language model to use as the agent.
|
||||
agent: Agent type to use. If `None` and agent_path is also None, will default
|
||||
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to `None`.
|
||||
to AgentType.ZERO_SHOT_REACT_DESCRIPTION.
|
||||
callback_manager: CallbackManager to use. Global callback manager is used if
|
||||
not provided. Defaults to `None`.
|
||||
not provided.
|
||||
agent_path: Path to serialized agent to use. If `None` and agent is also None,
|
||||
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to `None`.
|
||||
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION.
|
||||
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
|
||||
Defaults to `None`.
|
||||
tags: Tags to apply to the traced runs. Defaults to `None`.
|
||||
|
||||
tags: Tags to apply to the traced runs.
|
||||
kwargs: Additional keyword arguments passed to the agent executor.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -98,7 +98,7 @@ class ZeroShotAgent(Agent):
|
||||
format_instructions: Instructions on how to use the tools.
|
||||
Defaults to FORMAT_INSTRUCTIONS
|
||||
input_variables: List of input variables the final prompt will expect.
|
||||
Defaults to `None`.
|
||||
|
||||
|
||||
Returns:
|
||||
A PromptTemplate with the template assembled from the pieces here.
|
||||
@@ -129,13 +129,13 @@ class ZeroShotAgent(Agent):
|
||||
Args:
|
||||
llm: The LLM to use as the agent LLM.
|
||||
tools: The tools to use.
|
||||
callback_manager: The callback manager to use. Defaults to `None`.
|
||||
output_parser: The output parser to use. Defaults to `None`.
|
||||
callback_manager: The callback manager to use.
|
||||
output_parser: The output parser to use.
|
||||
prefix: The prefix to use. Defaults to PREFIX.
|
||||
suffix: The suffix to use. Defaults to SUFFIX.
|
||||
format_instructions: The format instructions to use.
|
||||
Defaults to FORMAT_INSTRUCTIONS.
|
||||
input_variables: The input variables to use. Defaults to `None`.
|
||||
input_variables: The input variables to use.
|
||||
kwargs: Additional parameters to pass to the agent.
|
||||
"""
|
||||
cls._validate_tools(tools)
|
||||
|
||||
@@ -314,7 +314,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
run_metadata: Metadata to associate with new run.
|
||||
attachments: A list of files attached to the message, and the
|
||||
tools they should be added to.
|
||||
config: Runnable config. Defaults to `None`.
|
||||
config: Runnable config.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
@@ -446,7 +446,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
max_completion_tokens: Allow setting max_completion_tokens for this run.
|
||||
max_prompt_tokens: Allow setting max_prompt_tokens for this run.
|
||||
run_metadata: Metadata to associate with new run.
|
||||
config: Runnable config. Defaults to `None`.
|
||||
config: Runnable config.
|
||||
kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -106,7 +106,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations.
|
||||
callbacks: Callbacks to use. Defaults to `None`.
|
||||
callbacks: Callbacks to use.
|
||||
with_functions: Whether to use functions. Defaults to `True`.
|
||||
**kwargs: User inputs.
|
||||
|
||||
@@ -146,7 +146,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations.
|
||||
callbacks: Callbacks to use. Defaults to `None`.
|
||||
callbacks: Callbacks to use.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -261,8 +261,8 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
Args:
|
||||
llm: The LLM to use as the agent.
|
||||
tools: The tools to use.
|
||||
callback_manager: The callback manager to use. Defaults to `None`.
|
||||
extra_prompt_messages: Extra prompt messages to use. Defaults to `None`.
|
||||
callback_manager: The callback manager to use.
|
||||
extra_prompt_messages: Extra prompt messages to use.
|
||||
system_message: The system message to use.
|
||||
Defaults to a default system message.
|
||||
kwargs: Additional parameters to pass to the agent.
|
||||
|
||||
@@ -40,7 +40,7 @@ def StreamlitCallbackHandler( # noqa: N802
|
||||
Defaults to `True`.
|
||||
thought_labeler
|
||||
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
|
||||
will use the default thought labeling logic. Defaults to `None`.
|
||||
will use the default thought labeling logic.
|
||||
|
||||
Returns:
|
||||
-------
|
||||
|
||||
@@ -42,7 +42,7 @@ def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
|
||||
limit_to_domains: The allowed domains.
|
||||
|
||||
Returns:
|
||||
True if the URL is in the allowed domains, False otherwise.
|
||||
`True` if the URL is in the allowed domains, `False` otherwise.
|
||||
"""
|
||||
scheme, domain = _extract_scheme_and_domain(url)
|
||||
|
||||
@@ -143,7 +143,7 @@ try:
|
||||
description: Limit the number of results
|
||||
\"\"\"
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
toolkit = RequestsToolkit(
|
||||
requests_wrapper=TextRequestsWrapper(headers={}), # no auth required
|
||||
allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS,
|
||||
@@ -152,7 +152,7 @@ try:
|
||||
|
||||
api_request_chain = (
|
||||
API_URL_PROMPT.partial(api_docs=api_spec)
|
||||
| llm.bind_tools(tools, tool_choice="any")
|
||||
| model.bind_tools(tools, tool_choice="any")
|
||||
)
|
||||
|
||||
class ChainState(TypedDict):
|
||||
@@ -169,7 +169,7 @@ try:
|
||||
return {"messages": [response]}
|
||||
|
||||
async def acall_model(state: ChainState, config: RunnableConfig):
|
||||
response = await llm.ainvoke(state["messages"], config)
|
||||
response = await model.ainvoke(state["messages"], config)
|
||||
return {"messages": [response]}
|
||||
|
||||
graph_builder = StateGraph(ChainState)
|
||||
|
||||
@@ -73,14 +73,14 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
|
||||
"""
|
||||
|
||||
memory: BaseMemory | None = None
|
||||
"""Optional memory object. Defaults to `None`.
|
||||
"""Optional memory object.
|
||||
Memory is a class that gets called at the start
|
||||
and at the end of every chain. At the start, memory loads variables and passes
|
||||
them along in the chain. At the end, it saves any returned variables.
|
||||
There are many different types of memory - please see memory docs
|
||||
for the full catalog."""
|
||||
callbacks: Callbacks = Field(default=None, exclude=True)
|
||||
"""Optional list of callback handlers (or callback manager). Defaults to `None`.
|
||||
"""Optional list of callback handlers (or callback manager).
|
||||
Callback handlers are called throughout the lifecycle of a call to a chain,
|
||||
starting with on_chain_start, ending with on_chain_end or on_chain_error.
|
||||
Each custom chain can optionally call additional callback methods, see Callback docs
|
||||
@@ -90,13 +90,13 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
|
||||
will be printed to the console. Defaults to the global `verbose` value,
|
||||
accessible via `langchain.globals.get_verbose()`."""
|
||||
tags: list[str] | None = None
|
||||
"""Optional list of tags associated with the chain. Defaults to `None`.
|
||||
"""Optional list of tags associated with the chain.
|
||||
These tags will be associated with each call to this chain,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a chain with its use case.
|
||||
"""
|
||||
metadata: dict[str, Any] | None = None
|
||||
"""Optional metadata associated with the chain. Defaults to `None`.
|
||||
"""Optional metadata associated with the chain.
|
||||
This metadata will be associated with each call to this chain,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a chain with its use case.
|
||||
@@ -394,7 +394,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
|
||||
tags: List of string tags to pass to all callbacks. These will be passed in
|
||||
addition to tags passed to the chain during construction, but only
|
||||
these runtime tags will propagate to calls to other objects.
|
||||
metadata: Optional metadata associated with the chain. Defaults to `None`.
|
||||
metadata: Optional metadata associated with the chain.
|
||||
run_name: Optional name for this run of the chain.
|
||||
include_run_info: Whether to include run info in the response. Defaults
|
||||
to False.
|
||||
@@ -446,7 +446,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
|
||||
tags: List of string tags to pass to all callbacks. These will be passed in
|
||||
addition to tags passed to the chain during construction, but only
|
||||
these runtime tags will propagate to calls to other objects.
|
||||
metadata: Optional metadata associated with the chain. Defaults to `None`.
|
||||
metadata: Optional metadata associated with the chain.
|
||||
run_name: Optional name for this run of the chain.
|
||||
include_run_info: Whether to include run info in the response. Defaults
|
||||
to False.
|
||||
|
||||
@@ -53,16 +53,16 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
|
||||
input_variables=["page_content"], template="{page_content}"
|
||||
)
|
||||
document_variable_name = "context"
|
||||
llm = OpenAI()
|
||||
model = OpenAI()
|
||||
# The prompt here should take as an input variable the
|
||||
# `document_variable_name`
|
||||
prompt = PromptTemplate.from_template("Summarize this content: {context}")
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
llm_chain = LLMChain(llm=model, prompt=prompt)
|
||||
# We now define how to combine these summaries
|
||||
reduce_prompt = PromptTemplate.from_template(
|
||||
"Combine these summaries: {context}"
|
||||
)
|
||||
reduce_llm_chain = LLMChain(llm=llm, prompt=reduce_prompt)
|
||||
reduce_llm_chain = LLMChain(llm=model, prompt=reduce_prompt)
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=reduce_llm_chain,
|
||||
document_prompt=document_prompt,
|
||||
@@ -79,7 +79,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
|
||||
# which is specifically aimed at collapsing documents BEFORE
|
||||
# the final call.
|
||||
prompt = PromptTemplate.from_template("Collapse this content: {context}")
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
llm_chain = LLMChain(llm=model, prompt=prompt)
|
||||
collapse_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_prompt=document_prompt,
|
||||
|
||||
@@ -42,7 +42,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
||||
from langchain_classic.output_parsers.regex import RegexParser
|
||||
|
||||
document_variable_name = "context"
|
||||
llm = OpenAI()
|
||||
model = OpenAI()
|
||||
# The prompt here should take as an input variable the
|
||||
# `document_variable_name`
|
||||
# The actual prompt will need to be a lot more complex, this is just
|
||||
@@ -61,7 +61,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
||||
input_variables=["context"],
|
||||
output_parser=output_parser,
|
||||
)
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
llm_chain = LLMChain(llm=model, prompt=prompt)
|
||||
chain = MapRerankDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
|
||||
@@ -171,11 +171,11 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
|
||||
input_variables=["page_content"], template="{page_content}"
|
||||
)
|
||||
document_variable_name = "context"
|
||||
llm = OpenAI()
|
||||
model = OpenAI()
|
||||
# The prompt here should take as an input variable the
|
||||
# `document_variable_name`
|
||||
prompt = PromptTemplate.from_template("Summarize this content: {context}")
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
llm_chain = LLMChain(llm=model, prompt=prompt)
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_prompt=document_prompt,
|
||||
@@ -188,7 +188,7 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
|
||||
# which is specifically aimed at collapsing documents BEFORE
|
||||
# the final call.
|
||||
prompt = PromptTemplate.from_template("Collapse this content: {context}")
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
llm_chain = LLMChain(llm=model, prompt=prompt)
|
||||
collapse_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_prompt=document_prompt,
|
||||
|
||||
@@ -55,11 +55,11 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
||||
input_variables=["page_content"], template="{page_content}"
|
||||
)
|
||||
document_variable_name = "context"
|
||||
llm = OpenAI()
|
||||
model = OpenAI()
|
||||
# The prompt here should take as an input variable the
|
||||
# `document_variable_name`
|
||||
prompt = PromptTemplate.from_template("Summarize this content: {context}")
|
||||
initial_llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
initial_llm_chain = LLMChain(llm=model, prompt=prompt)
|
||||
initial_response_name = "prev_response"
|
||||
# The prompt here should take as an input variable the
|
||||
# `document_variable_name` as well as `initial_response_name`
|
||||
@@ -67,7 +67,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
||||
"Here's your first summary: {prev_response}. "
|
||||
"Now add to it based on the following context: {context}"
|
||||
)
|
||||
refine_llm_chain = LLMChain(llm=llm, prompt=prompt_refine)
|
||||
refine_llm_chain = LLMChain(llm=model, prompt=prompt_refine)
|
||||
chain = RefineDocumentsChain(
|
||||
initial_llm_chain=initial_llm_chain,
|
||||
refine_llm_chain=refine_llm_chain,
|
||||
|
||||
@@ -68,8 +68,8 @@ def create_stuff_documents_chain(
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[("system", "What are everyone's favorite colors:\n\n{context}")]
|
||||
)
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo")
|
||||
chain = create_stuff_documents_chain(llm, prompt)
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo")
|
||||
chain = create_stuff_documents_chain(model, prompt)
|
||||
|
||||
docs = [
|
||||
Document(page_content="Jesse loves red but not yellow"),
|
||||
@@ -132,11 +132,11 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
||||
input_variables=["page_content"], template="{page_content}"
|
||||
)
|
||||
document_variable_name = "context"
|
||||
llm = OpenAI()
|
||||
model = OpenAI()
|
||||
# The prompt here should take as an input variable the
|
||||
# `document_variable_name`
|
||||
prompt = PromptTemplate.from_template("Summarize this content: {context}")
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
llm_chain = LLMChain(llm=model, prompt=prompt)
|
||||
chain = StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_prompt=document_prompt,
|
||||
|
||||
@@ -58,7 +58,7 @@ class ConstitutionalChain(Chain):
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
from typing_extensions import Annotated, TypedDict
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
model = ChatOpenAI(model="gpt-4o-mini")
|
||||
|
||||
class Critique(TypedDict):
|
||||
"""Generate a critique, if needed."""
|
||||
@@ -86,9 +86,9 @@ class ConstitutionalChain(Chain):
|
||||
"Revision Request: {revision_request}"
|
||||
)
|
||||
|
||||
chain = llm | StrOutputParser()
|
||||
critique_chain = critique_prompt | llm.with_structured_output(Critique)
|
||||
revision_chain = revision_prompt | llm | StrOutputParser()
|
||||
chain = model | StrOutputParser()
|
||||
critique_chain = critique_prompt | model.with_structured_output(Critique)
|
||||
revision_chain = revision_prompt | model | StrOutputParser()
|
||||
|
||||
|
||||
class State(TypedDict):
|
||||
@@ -170,16 +170,16 @@ class ConstitutionalChain(Chain):
|
||||
from langchain_classic.chains.constitutional_ai.models \
|
||||
import ConstitutionalPrinciple
|
||||
|
||||
llm = OpenAI()
|
||||
llmodelm = OpenAI()
|
||||
|
||||
qa_prompt = PromptTemplate(
|
||||
template="Q: {question} A:",
|
||||
input_variables=["question"],
|
||||
)
|
||||
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
|
||||
qa_chain = LLMChain(llm=model, prompt=qa_prompt)
|
||||
|
||||
constitutional_chain = ConstitutionalChain.from_llm(
|
||||
llm=llm,
|
||||
llm=model,
|
||||
chain=qa_chain,
|
||||
constitutional_principles=[
|
||||
ConstitutionalPrinciple(
|
||||
|
||||
@@ -47,9 +47,9 @@ class ConversationChain(LLMChain):
|
||||
return store[session_id]
|
||||
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
chain = RunnableWithMessageHistory(llm, get_session_history)
|
||||
chain = RunnableWithMessageHistory(model, get_session_history)
|
||||
chain.invoke(
|
||||
"Hi I'm Bob.",
|
||||
config={"configurable": {"session_id": "1"}},
|
||||
@@ -85,9 +85,9 @@ class ConversationChain(LLMChain):
|
||||
return store[session_id]
|
||||
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
chain = RunnableWithMessageHistory(llm, get_session_history)
|
||||
chain = RunnableWithMessageHistory(model, get_session_history)
|
||||
chain.invoke(
|
||||
"Hi I'm Bob.",
|
||||
config={"configurable": {"session_id": "1"}},
|
||||
|
||||
@@ -283,7 +283,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
||||
|
||||
retriever = ... # Your retriever
|
||||
|
||||
llm = ChatOpenAI()
|
||||
model = ChatOpenAI()
|
||||
|
||||
# Contextualize question
|
||||
contextualize_q_system_prompt = (
|
||||
@@ -301,7 +301,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
||||
]
|
||||
)
|
||||
history_aware_retriever = create_history_aware_retriever(
|
||||
llm, retriever, contextualize_q_prompt
|
||||
model, retriever, contextualize_q_prompt
|
||||
)
|
||||
|
||||
# Answer question
|
||||
@@ -324,7 +324,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
||||
# Below we use create_stuff_documents_chain to feed all retrieved context
|
||||
# into the LLM. Note that we can also use StuffDocumentsChain and other
|
||||
# instances of BaseCombineDocumentsChain.
|
||||
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
|
||||
question_answer_chain = create_stuff_documents_chain(model, qa_prompt)
|
||||
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
|
||||
|
||||
# Usage:
|
||||
@@ -371,8 +371,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
||||
"Follow up question: {question}"
|
||||
)
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
llm = OpenAI()
|
||||
question_generator_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
model = OpenAI()
|
||||
question_generator_chain = LLMChain(llm=model, prompt=prompt)
|
||||
chain = ConversationalRetrievalChain(
|
||||
combine_docs_chain=combine_docs_chain,
|
||||
retriever=retriever,
|
||||
|
||||
@@ -38,10 +38,10 @@ def create_history_aware_retriever(
|
||||
from langchain_classic import hub
|
||||
|
||||
rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase")
|
||||
llm = ChatOpenAI()
|
||||
model = ChatOpenAI()
|
||||
retriever = ...
|
||||
chat_retriever_chain = create_history_aware_retriever(
|
||||
llm, retriever, rephrase_prompt
|
||||
model, retriever, rephrase_prompt
|
||||
)
|
||||
|
||||
chain.invoke({"input": "...", "chat_history": })
|
||||
|
||||
@@ -55,8 +55,8 @@ class LLMChain(Chain):
|
||||
|
||||
prompt_template = "Tell me a {adjective} joke"
|
||||
prompt = PromptTemplate(input_variables=["adjective"], template=prompt_template)
|
||||
llm = OpenAI()
|
||||
chain = prompt | llm | StrOutputParser()
|
||||
model = OpenAI()
|
||||
chain = prompt | model | StrOutputParser()
|
||||
|
||||
chain.invoke("your adjective here")
|
||||
```
|
||||
@@ -69,7 +69,7 @@ class LLMChain(Chain):
|
||||
|
||||
prompt_template = "Tell me a {adjective} joke"
|
||||
prompt = PromptTemplate(input_variables=["adjective"], template=prompt_template)
|
||||
llm = LLMChain(llm=OpenAI(), prompt=prompt)
|
||||
model = LLMChain(llm=OpenAI(), prompt=prompt)
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
@@ -80,8 +80,8 @@ class LLMCheckerChain(Chain):
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain_classic.chains import LLMCheckerChain
|
||||
|
||||
llm = OpenAI(temperature=0.7)
|
||||
checker_chain = LLMCheckerChain.from_llm(llm)
|
||||
model = OpenAI(temperature=0.7)
|
||||
checker_chain = LLMCheckerChain.from_llm(model)
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
@@ -84,9 +84,9 @@ class LLMMathChain(Chain):
|
||||
)
|
||||
)
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
tools = [calculator]
|
||||
llm_with_tools = llm.bind_tools(tools, tool_choice="any")
|
||||
model_with_tools = model.bind_tools(tools, tool_choice="any")
|
||||
|
||||
class ChainState(TypedDict):
|
||||
\"\"\"LangGraph state.\"\"\"
|
||||
@@ -95,11 +95,11 @@ class LLMMathChain(Chain):
|
||||
|
||||
async def acall_chain(state: ChainState, config: RunnableConfig):
|
||||
last_message = state["messages"][-1]
|
||||
response = await llm_with_tools.ainvoke(state["messages"], config)
|
||||
response = await model_with_tools.ainvoke(state["messages"], config)
|
||||
return {"messages": [response]}
|
||||
|
||||
async def acall_model(state: ChainState, config: RunnableConfig):
|
||||
response = await llm.ainvoke(state["messages"], config)
|
||||
response = await model.ainvoke(state["messages"], config)
|
||||
return {"messages": [response]}
|
||||
|
||||
graph_builder = StateGraph(ChainState)
|
||||
|
||||
@@ -83,8 +83,8 @@ class LLMSummarizationCheckerChain(Chain):
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain_classic.chains import LLMSummarizationCheckerChain
|
||||
|
||||
llm = OpenAI(temperature=0.0)
|
||||
checker_chain = LLMSummarizationCheckerChain.from_llm(llm)
|
||||
model = OpenAI(temperature=0.0)
|
||||
checker_chain = LLMSummarizationCheckerChain.from_llm(model)
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
@@ -84,8 +84,8 @@ class NatBotChain(Chain):
|
||||
"""Load with default LLMChain."""
|
||||
msg = (
|
||||
"This method is no longer implemented. Please use from_llm."
|
||||
"llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)"
|
||||
"For example, NatBotChain.from_llm(llm, objective)"
|
||||
"model = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)"
|
||||
"For example, NatBotChain.from_llm(model, objective)"
|
||||
)
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
@@ -107,7 +107,7 @@ def create_openai_fn_chain(
|
||||
fav_food: str | None = Field(None, description="The dog's favorite food")
|
||||
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4", temperature=0)
|
||||
model = ChatOpenAI(model="gpt-4", temperature=0)
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a world class algorithm for recording entities."),
|
||||
@@ -115,7 +115,7 @@ def create_openai_fn_chain(
|
||||
("human", "Tip: Make sure to answer in the correct format"),
|
||||
]
|
||||
)
|
||||
chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt)
|
||||
chain = create_openai_fn_chain([RecordPerson, RecordDog], model, prompt)
|
||||
chain.run("Harry was a chubby brown beagle who loved chicken")
|
||||
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
|
||||
|
||||
@@ -191,7 +191,7 @@ def create_structured_output_chain(
|
||||
color: str = Field(..., description="The dog's color")
|
||||
fav_food: str | None = Field(None, description="The dog's favorite food")
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
|
||||
model = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "You are a world class algorithm for extracting information in structured formats."),
|
||||
@@ -199,7 +199,7 @@ def create_structured_output_chain(
|
||||
("human", "Tip: Make sure to answer in the correct format"),
|
||||
]
|
||||
)
|
||||
chain = create_structured_output_chain(Dog, llm, prompt)
|
||||
chain = create_structured_output_chain(Dog, model, prompt)
|
||||
chain.run("Harry was a chubby brown beagle who loved chicken")
|
||||
# -> Dog(name="Harry", color="brown", fav_food="chicken")
|
||||
|
||||
|
||||
@@ -83,12 +83,12 @@ def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable:
|
||||
from langchain_classic.chains import create_citation_fuzzy_match_runnable
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
model = ChatOpenAI(model="gpt-4o-mini")
|
||||
|
||||
context = "Alice has blue eyes. Bob has brown eyes. Charlie has green eyes."
|
||||
question = "What color are Bob's eyes?"
|
||||
|
||||
chain = create_citation_fuzzy_match_runnable(llm)
|
||||
chain = create_citation_fuzzy_match_runnable(model)
|
||||
chain.invoke({"question": question, "context": context})
|
||||
```
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user