mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 09:10:27 +00:00
Compare commits
74 Commits
replace_ap
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d623133a5 | ||
|
|
8fbf192c2a | ||
|
|
241a382fba | ||
|
|
c194ee2046 | ||
|
|
85567f1dc3 | ||
|
|
6f4978041e | ||
|
|
f1fca4f46f | ||
|
|
2b899fe961 | ||
|
|
3152d25811 | ||
|
|
3b8cb3d4b6 | ||
|
|
15047ae28a | ||
|
|
888fa3a2fb | ||
|
|
90346b8a35 | ||
|
|
2d5efd7b29 | ||
|
|
1d2273597a | ||
|
|
9dd494ddcd | ||
|
|
2fa07b19f6 | ||
|
|
a022e3c14d | ||
|
|
e0e11423d9 | ||
|
|
34de8ec1f3 | ||
|
|
3d288fd610 | ||
|
|
055cccde28 | ||
|
|
361514d11d | ||
|
|
90b68059f5 | ||
|
|
87ad5276e4 | ||
|
|
5489df75d7 | ||
|
|
c6b3f5b888 | ||
|
|
15db024811 | ||
|
|
6d73003b17 | ||
|
|
13259a109a | ||
|
|
aa78be574a | ||
|
|
d0dd1b30d1 | ||
|
|
0338a15192 | ||
|
|
e10d99b728 | ||
|
|
c9018f81ec | ||
|
|
31718492c7 | ||
|
|
2209878f48 | ||
|
|
dd77dbe3ab | ||
|
|
eb19e12527 | ||
|
|
551e86a517 | ||
|
|
8734c05f64 | ||
|
|
0c8cbfb7de | ||
|
|
89c3428d85 | ||
|
|
707e96c541 | ||
|
|
26e0a00c4c | ||
|
|
d0f8f00e7e | ||
|
|
a39132787c | ||
|
|
296994ebf0 | ||
|
|
b5b31eec88 | ||
|
|
8f6851c349 | ||
|
|
0788461abd | ||
|
|
3bfd1f6d8a | ||
|
|
d83c3a12bf | ||
|
|
79200cf3c2 | ||
|
|
bcb6789888 | ||
|
|
89b7933ef1 | ||
|
|
4da5a8081f | ||
|
|
53e9f00804 | ||
|
|
6e25e185f6 | ||
|
|
68ceeb64f6 | ||
|
|
edae976b81 | ||
|
|
9f4366bc9d | ||
|
|
99e0a60aab | ||
|
|
d38729fbac | ||
|
|
ff0d21cfd5 | ||
|
|
9140a7cb86 | ||
|
|
41fe18bc80 | ||
|
|
9105573cb3 | ||
|
|
fff87e95d1 | ||
|
|
9beb29a34c | ||
|
|
ca00f5aed9 | ||
|
|
637777b8e7 | ||
|
|
1cf851e054 | ||
|
|
961f965f0c |
8
.github/scripts/check_diff.py
vendored
8
.github/scripts/check_diff.py
vendored
@@ -132,21 +132,21 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
if job == "codspeed":
|
||||
py_versions = ["3.12"] # 3.13 is not yet supported
|
||||
elif dir_ == "libs/core":
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13"]
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
# custom logic for specific directories
|
||||
|
||||
elif dir_ == "libs/langchain" and job == "extended-tests":
|
||||
py_versions = ["3.10", "3.13"]
|
||||
py_versions = ["3.10", "3.14"]
|
||||
elif dir_ == "libs/langchain_v1":
|
||||
py_versions = ["3.10", "3.13"]
|
||||
elif dir_ in {"libs/cli"}:
|
||||
elif dir_ in {"libs/cli", "libs/partners/chroma", "libs/partners/nomic"}:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
|
||||
elif dir_ == ".":
|
||||
# unable to install with 3.13 because tokenizers doesn't support 3.13 yet
|
||||
py_versions = ["3.10", "3.12"]
|
||||
else:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
py_versions = ["3.10", "3.14"]
|
||||
|
||||
return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]
|
||||
|
||||
|
||||
2
.github/workflows/check_diffs.yml
vendored
2
.github/workflows/check_diffs.yml
vendored
@@ -186,7 +186,7 @@ jobs:
|
||||
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: "📦 Install UV Package Manager"
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
|
||||
@@ -163,9 +163,11 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
@@ -163,9 +163,11 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
Please see the following guides for migrating LangChain code:
|
||||
|
||||
* Migrate to [LangChain v1.0](https://docs.langchain.com/oss/python/migrate/langchain-v1)
|
||||
* Migrate to [LangChain v0.3](https://python.langchain.com/docs/versions/v0_3/)
|
||||
* Migrate to [LangChain v0.2](https://python.langchain.com/docs/versions/v0_2/)
|
||||
* Migrating from [LangChain 0.0.x Chains](https://python.langchain.com/docs/versions/migrating_chains/)
|
||||
|
||||
@@ -12,13 +12,16 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://opensource.org/licenses/MIT" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/l/langchain-core?style=flat-square" alt="PyPI - License">
|
||||
<img src="https://img.shields.io/pypi/l/langchain" alt="PyPI - License">
|
||||
</a>
|
||||
<a href="https://pypistats.org/packages/langchain-core" target="_blank">
|
||||
<a href="https://pypistats.org/packages/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads">
|
||||
</a>
|
||||
<a href="https://pypi.org/project/langchain/#history" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/v/langchain?label=%20" alt="Version">
|
||||
</a>
|
||||
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square" alt="Open in Dev Containers">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode" alt="Open in Dev Containers">
|
||||
</a>
|
||||
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20">
|
||||
|
||||
@@ -1,6 +1,30 @@
|
||||
# langchain-cli
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful
|
||||
for getting started with LangChain Templates!
|
||||
[](https://pypi.org/project/langchain-cli/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-cli)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
## Quick Install
|
||||
|
||||
```bash
|
||||
pip install langchain-cli
|
||||
```
|
||||
|
||||
## 🤔 What is this?
|
||||
|
||||
This package implements the official CLI for LangChain. Right now, it is most useful for getting started with LangChain Templates!
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
[CLI Docs](https://github.com/langchain-ai/langchain/blob/master/libs/cli/DOCS.md)
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -1,264 +1,264 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"model = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = model.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"model = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = model.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,238 +1,238 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"model = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = model.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"model = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = model.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,204 +1,204 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__ByteStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your __ModuleName__ API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our byte store:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__ByteStore\n",
|
||||
"\n",
|
||||
"kv_store = __ModuleName__ByteStore(\n",
|
||||
" # params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen.\n",
|
||||
"\n",
|
||||
"You can set data under keys like this using the `mset` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mset(\n",
|
||||
" [\n",
|
||||
" [\"key1\", b\"value1\"],\n",
|
||||
" [\"key2\", b\"value2\"],\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can delete data using the `mdelete` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kv_store.mdelete(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"kv_store.mget(\n",
|
||||
" [\n",
|
||||
" \"key1\",\n",
|
||||
" \"key2\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this key-value store provider\n",
|
||||
"\n",
|
||||
"E.g. extra initialization. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -1,271 +1,271 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Make sure links and features are correct\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- TODO: Add feature table if it makes sense\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Add any additional deps\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Add any credentials that are needed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"- TODO: Fill in instantiation params\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import __ModuleName__\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool = __ModuleName__(...)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
|
||||
"\n",
|
||||
"- TODO: Describe what the tool args are, fill them in, run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
|
||||
"\n",
|
||||
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
|
||||
"\n",
|
||||
"- TODO: Fill in tool args and run cell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {...}, # TODO: FILL IN\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"- TODO: Add user question and run cells\n",
|
||||
"\n",
|
||||
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# !pip install -qU langchain langchain-openai\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea35fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"...\"\n",
|
||||
"\n",
|
||||
"events = agent.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ac8146c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -36,20 +36,20 @@ class Chat__ModuleName__(BaseChatModel):
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — completion params:
|
||||
model: str
|
||||
model:
|
||||
Name of __ModuleName__ model to use.
|
||||
temperature: float
|
||||
temperature:
|
||||
Sampling temperature.
|
||||
max_tokens: int | None
|
||||
max_tokens:
|
||||
Max number of tokens to generate.
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — client params:
|
||||
timeout: float | None
|
||||
timeout:
|
||||
Timeout for requests.
|
||||
max_retries: int
|
||||
max_retries:
|
||||
Max number of retries.
|
||||
api_key: str | None
|
||||
api_key:
|
||||
__ModuleName__ API key. If not passed in will be read from env var
|
||||
__MODULE_NAME___API_KEY.
|
||||
|
||||
|
||||
@@ -37,16 +37,16 @@ class __ModuleName__VectorStore(VectorStore):
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — indexing params:
|
||||
collection_name: str
|
||||
collection_name:
|
||||
Name of the collection.
|
||||
embedding_function: Embeddings
|
||||
embedding_function:
|
||||
Embedding function to use.
|
||||
|
||||
# TODO: Populate with relevant params.
|
||||
Key init args — client params:
|
||||
client: Client | None
|
||||
client:
|
||||
Client to use.
|
||||
connection_args: dict | None
|
||||
connection_args:
|
||||
Connection arguments.
|
||||
|
||||
# TODO: Replace with relevant init params.
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
# 🦜🍎️ LangChain Core
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypi.org/project/langchain-core/#history)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-core)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
|
||||
## Quick Install
|
||||
|
||||
@@ -9,16 +16,14 @@
|
||||
pip install langchain-core
|
||||
```
|
||||
|
||||
## What is it?
|
||||
## 🤔 What is this?
|
||||
|
||||
LangChain Core contains the base abstractions that power the the LangChain ecosystem.
|
||||
LangChain Core contains the base abstractions that power the LangChain ecosystem.
|
||||
|
||||
These abstractions are designed to be as modular and simple as possible.
|
||||
|
||||
The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
|
||||
|
||||
For full documentation see the [API reference](https://reference.langchain.com/python/).
|
||||
|
||||
## ⛰️ Why build on top of LangChain Core?
|
||||
|
||||
The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
|
||||
@@ -27,12 +32,16 @@ The LangChain ecosystem is built on top of `langchain-core`. Some of the benefit
|
||||
- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
|
||||
- **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_core/).
|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning Policy](https://docs.langchain.com/oss/python/versioning).
|
||||
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing).
|
||||
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
||||
|
||||
@@ -84,7 +84,7 @@ class AgentAction(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "agent"]`
|
||||
@@ -112,7 +112,7 @@ class AgentActionMessageLog(AgentAction):
|
||||
if (tool, tool_input) cannot be used to fully recreate the LLM
|
||||
prediction, and you need that LLM prediction (for future agent iteration).
|
||||
Compared to `log`, this is useful when the underlying LLM is a
|
||||
ChatModel (and therefore returns messages rather than a string)."""
|
||||
chat model (and therefore returns messages rather than a string)."""
|
||||
# Ignoring type because we're overriding the type from AgentAction.
|
||||
# And this is the correct thing to do in this case.
|
||||
# The type literal is used for serialization purposes.
|
||||
@@ -161,7 +161,7 @@ class AgentFinish(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "agent"]`
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
"""Cache classes.
|
||||
"""`caches` provides an optional caching layer for language models.
|
||||
|
||||
!!! warning
|
||||
Beta Feature!
|
||||
This is a beta feature! Please be wary of deploying experimental code to production
|
||||
unless you've taken appropriate precautions.
|
||||
|
||||
**Cache** provides an optional caching layer for LLMs.
|
||||
A cache is useful for two reasons:
|
||||
|
||||
Cache is useful for two reasons:
|
||||
|
||||
- It can save you money by reducing the number of API calls you make to the LLM
|
||||
1. It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
- It can speed up your application by reducing the number of API calls you make
|
||||
to the LLM provider.
|
||||
|
||||
Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
2. It can speed up your application by reducing the number of API calls you make to the
|
||||
LLM provider.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -34,8 +31,8 @@ class BaseCache(ABC):
|
||||
|
||||
The cache interface consists of the following methods:
|
||||
|
||||
- lookup: Look up a value based on a prompt and llm_string.
|
||||
- update: Update the cache based on a prompt and llm_string.
|
||||
- lookup: Look up a value based on a prompt and `llm_string`.
|
||||
- update: Update the cache based on a prompt and `llm_string`.
|
||||
- clear: Clear the cache.
|
||||
|
||||
In addition, the cache interface provides an async version of each method.
|
||||
@@ -47,14 +44,14 @@ class BaseCache(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -63,27 +60,27 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the lookup method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
|
||||
@@ -92,14 +89,14 @@ class BaseCache(ABC):
|
||||
"""Clear cache that can take additional keyword arguments."""
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
@@ -108,29 +105,29 @@ class BaseCache(ABC):
|
||||
representation.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
The cached value is a list of Generations (or subclasses).
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
The cached value is a list of `Generation` (or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.lookup, prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the look up method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
This is used to capture the invocation parameters of the LLM
|
||||
(e.g., model name, temperature, stop tokens, max tokens, etc.).
|
||||
These invocation parameters are serialized into a string
|
||||
representation.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
return await run_in_executor(None, self.update, prompt, llm_string, return_val)
|
||||
@@ -150,10 +147,9 @@ class InMemoryCache(BaseCache):
|
||||
maxsize: The maximum number of items to store in the cache.
|
||||
If `None`, the cache has no maximum size.
|
||||
If the cache exceeds the maximum size, the oldest items are removed.
|
||||
Default is None.
|
||||
|
||||
Raises:
|
||||
ValueError: If maxsize is less than or equal to 0.
|
||||
ValueError: If `maxsize` is less than or equal to `0`.
|
||||
"""
|
||||
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
if maxsize is not None and maxsize <= 0:
|
||||
@@ -162,28 +158,28 @@ class InMemoryCache(BaseCache):
|
||||
self._maxsize = maxsize
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self._cache.get((prompt, llm_string), None)
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
if self._maxsize is not None and len(self._cache) == self._maxsize:
|
||||
@@ -196,30 +192,30 @@ class InMemoryCache(BaseCache):
|
||||
self._cache = {}
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
|
||||
"""Async look up based on prompt and llm_string.
|
||||
"""Async look up based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
On a cache miss, return `None`. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self.lookup(prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
"""Async update cache based on `prompt` and `llm_string`.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
prompt: A string representation of the prompt.
|
||||
In the case of a chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
return_val: The value to be cached. The value is a list of `Generation`
|
||||
(or subclasses).
|
||||
"""
|
||||
self.update(prompt, llm_string, return_val)
|
||||
|
||||
@@ -1001,7 +1001,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
handler: The handler to add.
|
||||
inherit: Whether to inherit the handler. Default is True.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
if handler not in self.handlers:
|
||||
self.handlers.append(handler)
|
||||
@@ -1028,7 +1028,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
handlers: The handlers to set.
|
||||
inherit: Whether to inherit the handlers. Default is True.
|
||||
inherit: Whether to inherit the handlers.
|
||||
"""
|
||||
self.handlers = []
|
||||
self.inheritable_handlers = []
|
||||
@@ -1044,7 +1044,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
handler: The handler to set.
|
||||
inherit: Whether to inherit the handler. Default is True.
|
||||
inherit: Whether to inherit the handler.
|
||||
"""
|
||||
self.set_handlers([handler], inherit=inherit)
|
||||
|
||||
@@ -1057,7 +1057,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
tags: The tags to add.
|
||||
inherit: Whether to inherit the tags. Default is True.
|
||||
inherit: Whether to inherit the tags.
|
||||
"""
|
||||
for tag in tags:
|
||||
if tag in self.tags:
|
||||
@@ -1087,7 +1087,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
Args:
|
||||
metadata: The metadata to add.
|
||||
inherit: Whether to inherit the metadata. Default is True.
|
||||
inherit: Whether to inherit the metadata.
|
||||
"""
|
||||
self.metadata.update(metadata)
|
||||
if inherit:
|
||||
|
||||
@@ -132,7 +132,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
text: The text to write to the file.
|
||||
color: Optional color for the text. Defaults to `self.color`.
|
||||
end: String appended after the text. Defaults to `""`.
|
||||
end: String appended after the text.
|
||||
file: Optional file to write to. Defaults to `self.file`.
|
||||
|
||||
Raises:
|
||||
@@ -239,7 +239,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
text: The text to write.
|
||||
color: Color override for this specific output. If `None`, uses
|
||||
`self.color`.
|
||||
end: String appended after the text. Defaults to `""`.
|
||||
end: String appended after the text.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
"""
|
||||
|
||||
@@ -104,7 +104,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
Args:
|
||||
text: The text to print.
|
||||
color: The color to use for the text.
|
||||
end: The end character to use. Defaults to "".
|
||||
end: The end character to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(text, color=color or self.color, end=end)
|
||||
|
||||
@@ -153,7 +153,7 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the sub-class has not implemented an efficient
|
||||
add_messages method.
|
||||
`add_messages` method.
|
||||
"""
|
||||
if type(self).add_messages != BaseChatMessageHistory.add_messages:
|
||||
# This means that the sub-class has implemented an efficient add_messages
|
||||
|
||||
@@ -35,38 +35,38 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Sub-classes should not implement this method directly. Instead, they
|
||||
# should implement the lazy load method.
|
||||
def load(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return list(self.lazy_load())
|
||||
|
||||
async def aload(self) -> list[Document]:
|
||||
"""Load data into Document objects.
|
||||
"""Load data into `Document` objects.
|
||||
|
||||
Returns:
|
||||
the documents.
|
||||
The documents.
|
||||
"""
|
||||
return [document async for document in self.alazy_load()]
|
||||
|
||||
def load_and_split(
|
||||
self, text_splitter: TextSplitter | None = None
|
||||
) -> list[Document]:
|
||||
"""Load Documents and split into chunks. Chunks are returned as Documents.
|
||||
"""Load Documents and split into chunks. Chunks are returned as `Document`.
|
||||
|
||||
Do not override this method. It should be considered to be deprecated!
|
||||
|
||||
Args:
|
||||
text_splitter: TextSplitter instance to use for splitting documents.
|
||||
Defaults to RecursiveCharacterTextSplitter.
|
||||
text_splitter: `TextSplitter` instance to use for splitting documents.
|
||||
Defaults to `RecursiveCharacterTextSplitter`.
|
||||
|
||||
Raises:
|
||||
ImportError: If langchain-text-splitters is not installed
|
||||
and no text_splitter is provided.
|
||||
ImportError: If `langchain-text-splitters` is not installed
|
||||
and no `text_splitter` is provided.
|
||||
|
||||
Returns:
|
||||
List of Documents.
|
||||
List of `Document`.
|
||||
"""
|
||||
if text_splitter is None:
|
||||
if not _HAS_TEXT_SPLITTERS:
|
||||
@@ -86,10 +86,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
# Attention: This method will be upgraded into an abstractmethod once it's
|
||||
# implemented in all the existing subclasses.
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
if type(self).load != BaseLoader.load:
|
||||
return iter(self.load())
|
||||
@@ -97,10 +97,10 @@ class BaseLoader(ABC): # noqa: B024
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
async def alazy_load(self) -> AsyncIterator[Document]:
|
||||
"""A lazy loader for Documents.
|
||||
"""A lazy loader for `Document`.
|
||||
|
||||
Yields:
|
||||
the documents.
|
||||
The `Document` objects.
|
||||
"""
|
||||
iterator = await run_in_executor(None, self.lazy_load)
|
||||
done = object()
|
||||
@@ -115,7 +115,7 @@ class BaseBlobParser(ABC):
|
||||
"""Abstract interface for blob parsers.
|
||||
|
||||
A blob parser provides a way to parse raw data stored in a blob into one
|
||||
or more documents.
|
||||
or more `Document` objects.
|
||||
|
||||
The parser can be composed with blob loaders, making it easy to reuse
|
||||
a parser independent of how the blob was originally loaded.
|
||||
@@ -128,25 +128,25 @@ class BaseBlobParser(ABC):
|
||||
Subclasses are required to implement this method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
Generator of documents
|
||||
Generator of `Document` objects
|
||||
"""
|
||||
|
||||
def parse(self, blob: Blob) -> list[Document]:
|
||||
"""Eagerly parse the blob into a document or documents.
|
||||
"""Eagerly parse the blob into a `Document` or `Document` objects.
|
||||
|
||||
This is a convenience method for interactive development environment.
|
||||
|
||||
Production applications should favor the lazy_parse method instead.
|
||||
Production applications should favor the `lazy_parse` method instead.
|
||||
|
||||
Subclasses should generally not over-ride this parse method.
|
||||
|
||||
Args:
|
||||
blob: Blob instance
|
||||
blob: `Blob` instance
|
||||
|
||||
Returns:
|
||||
List of documents
|
||||
List of `Document` objects
|
||||
"""
|
||||
return list(self.lazy_parse(blob))
|
||||
|
||||
@@ -76,8 +76,8 @@ class LangSmithLoader(BaseLoader):
|
||||
splits: A list of dataset splits, which are
|
||||
divisions of your dataset such as 'train', 'test', or 'validation'.
|
||||
Returns examples only from the specified splits.
|
||||
inline_s3_urls: Whether to inline S3 URLs. Defaults to `True`.
|
||||
offset: The offset to start from. Defaults to 0.
|
||||
inline_s3_urls: Whether to inline S3 URLs.
|
||||
offset: The offset to start from.
|
||||
limit: The maximum number of examples to return.
|
||||
metadata: Metadata to filter by.
|
||||
filter: A structured filter string to apply to the examples.
|
||||
|
||||
@@ -57,51 +57,51 @@ class Blob(BaseMedia):
|
||||
|
||||
Example: Initialize a blob from in-memory data
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
blob = Blob.from_data("Hello, world!")
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
|
||||
Example: Load from memory and specify mime-type and metadata
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
```
|
||||
blob = Blob.from_data(
|
||||
data="Hello, world!",
|
||||
mime_type="text/plain",
|
||||
metadata={"source": "https://example.com"},
|
||||
)
|
||||
```
|
||||
|
||||
Example: Load the blob from a file
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
```python
|
||||
from langchain_core.documents import Blob
|
||||
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
blob = Blob.from_path("path/to/file.txt")
|
||||
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
# Read the blob as a string
|
||||
print(blob.as_string())
|
||||
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
# Read the blob as bytes
|
||||
print(blob.as_bytes())
|
||||
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
```
|
||||
"""
|
||||
|
||||
data: bytes | str | None = None
|
||||
@@ -111,7 +111,7 @@ class Blob(BaseMedia):
|
||||
encoding: str = "utf-8"
|
||||
"""Encoding to use if decoding the bytes into a string.
|
||||
|
||||
Use utf-8 as default encoding, if decoding to string.
|
||||
Use `utf-8` as default encoding, if decoding to string.
|
||||
"""
|
||||
path: PathLike | None = None
|
||||
"""Location where the original content was found."""
|
||||
@@ -127,7 +127,7 @@ class Blob(BaseMedia):
|
||||
|
||||
If a path is associated with the blob, it will default to the path location.
|
||||
|
||||
Unless explicitly set via a metadata field called "source", in which
|
||||
Unless explicitly set via a metadata field called `"source"`, in which
|
||||
case that value will be used instead.
|
||||
"""
|
||||
if self.metadata and "source" in self.metadata:
|
||||
@@ -211,11 +211,11 @@ class Blob(BaseMedia):
|
||||
"""Load the blob from a path like object.
|
||||
|
||||
Args:
|
||||
path: path like object to file to be read
|
||||
path: Path-like object to file to be read
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
guess_type: If `True`, the mimetype will be guessed from the file extension,
|
||||
if a mime-type was not provided
|
||||
if a mime-type was not provided
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -248,10 +248,10 @@ class Blob(BaseMedia):
|
||||
"""Initialize the blob from in-memory data.
|
||||
|
||||
Args:
|
||||
data: the in-memory data associated with the blob
|
||||
data: The in-memory data associated with the blob
|
||||
encoding: Encoding to use if decoding the bytes into a string
|
||||
mime_type: if provided, will be set as the mime-type of the data
|
||||
path: if provided, will be set as the source from which the data came
|
||||
mime_type: If provided, will be set as the mime-type of the data
|
||||
path: If provided, will be set as the source from which the data came
|
||||
metadata: Metadata to associate with the blob
|
||||
|
||||
Returns:
|
||||
@@ -303,7 +303,7 @@ class Document(BaseMedia):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
["langchain", "schema", "document"]
|
||||
|
||||
@@ -18,7 +18,8 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
This embedding model creates embeddings by sampling from a normal distribution.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
```python
|
||||
@@ -72,7 +73,8 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
This embedding model creates embeddings by sampling from a normal distribution
|
||||
with a seed based on the hash of the text.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
!!! warning
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Instantiate:
|
||||
```python
|
||||
|
||||
@@ -154,7 +154,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -198,7 +198,7 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -285,9 +285,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
@@ -333,9 +332,8 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
|
||||
examples: List of examples to use in the prompt.
|
||||
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
|
||||
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
|
||||
k: Number of examples to select. Default is 4.
|
||||
k: Number of examples to select.
|
||||
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||
Default is 20.
|
||||
input_keys: If provided, the search is based on the input variables
|
||||
instead of all variables.
|
||||
example_keys: If provided, keys to filter examples to.
|
||||
|
||||
@@ -16,7 +16,7 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
"""Exception that output parsers should raise to signify a parsing error.
|
||||
|
||||
This exists to differentiate parsing errors from other code or execution errors
|
||||
that also may arise inside the output parser. OutputParserExceptions will be
|
||||
that also may arise inside the output parser. `OutputParserException` will be
|
||||
available to catch and handle in ways to fix the parsing error, while other
|
||||
errors will be raised.
|
||||
"""
|
||||
@@ -28,7 +28,7 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
llm_output: str | None = None,
|
||||
send_to_llm: bool = False, # noqa: FBT001,FBT002
|
||||
):
|
||||
"""Create an OutputParserException.
|
||||
"""Create an `OutputParserException`.
|
||||
|
||||
Args:
|
||||
error: The error that's being re-raised or an error message.
|
||||
@@ -37,11 +37,10 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
|
||||
llm_output: String model output which is error-ing.
|
||||
|
||||
send_to_llm: Whether to send the observation and llm_output back to an Agent
|
||||
after an OutputParserException has been raised.
|
||||
after an `OutputParserException` has been raised.
|
||||
This gives the underlying model driving the agent the context that the
|
||||
previous output was improperly structured, in the hopes that it will
|
||||
update the output to the correct format.
|
||||
Defaults to `False`.
|
||||
|
||||
Raises:
|
||||
ValueError: If `send_to_llm` is True but either observation or
|
||||
|
||||
@@ -326,8 +326,8 @@ def index(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -342,15 +342,12 @@ def index(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
@@ -667,8 +664,8 @@ async def aindex(
|
||||
record_manager: Timestamped set to keep track of which documents were
|
||||
updated.
|
||||
vector_store: VectorStore or DocumentIndex to index the documents into.
|
||||
batch_size: Batch size to use when indexing. Default is 100.
|
||||
cleanup: How to handle clean up of documents. Default is None.
|
||||
batch_size: Batch size to use when indexing.
|
||||
cleanup: How to handle clean up of documents.
|
||||
|
||||
- incremental: Cleans up all documents that haven't been updated AND
|
||||
that are associated with source ids that were seen during indexing.
|
||||
@@ -683,15 +680,12 @@ async def aindex(
|
||||
source ids that were seen during indexing.
|
||||
- None: Do not delete any documents.
|
||||
source_id_key: Optional key that helps identify the original source
|
||||
of the document. Default is None.
|
||||
of the document.
|
||||
cleanup_batch_size: Batch size to use when cleaning up documents.
|
||||
Default is 1_000.
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
metadata. Options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
!!! version-added "Added in version 0.3.66"
|
||||
|
||||
|
||||
@@ -522,10 +522,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
When an ID is specified and the content already exists in the vectorstore,
|
||||
the upsert method should update the content with the new data. If the content
|
||||
does not exist, the upsert method should add the item to the vectorstore.
|
||||
does not exist, the upsert method should add the item to the `VectorStore`.
|
||||
|
||||
Args:
|
||||
items: Sequence of documents to add to the vectorstore.
|
||||
items: Sequence of documents to add to the `VectorStore`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -545,10 +545,10 @@ class DocumentIndex(BaseRetriever):
|
||||
|
||||
When an ID is specified and the item already exists in the vectorstore,
|
||||
the upsert method should update the item with the new data. If the item
|
||||
does not exist, the upsert method should add the item to the vectorstore.
|
||||
does not exist, the upsert method should add the item to the `VectorStore`.
|
||||
|
||||
Args:
|
||||
items: Sequence of documents to add to the vectorstore.
|
||||
items: Sequence of documents to add to the `VectorStore`.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -1,43 +1,29 @@
|
||||
"""Language models.
|
||||
|
||||
**Language Model** is a type of model that can generate text or complete
|
||||
text prompts.
|
||||
LangChain has two main classes to work with language models: chat models and
|
||||
"old-fashioned" LLMs.
|
||||
|
||||
LangChain has two main classes to work with language models: **Chat Models**
|
||||
and "old-fashioned" **LLMs**.
|
||||
|
||||
**Chat Models**
|
||||
**Chat models**
|
||||
|
||||
Language models that use a sequence of messages as inputs and return chat messages
|
||||
as outputs (as opposed to using plain text). These are traditionally newer models (
|
||||
older models are generally LLMs, see below). Chat models support the assignment of
|
||||
as outputs (as opposed to using plain text). Chat models support the assignment of
|
||||
distinct roles to conversation messages, helping to distinguish messages from the AI,
|
||||
users, and instructions such as system messages.
|
||||
|
||||
The key abstraction for chat models is `BaseChatModel`. Implementations
|
||||
should inherit from this class. Please see LangChain how-to guides with more
|
||||
information on how to implement a custom chat model.
|
||||
should inherit from this class.
|
||||
|
||||
To implement a custom Chat Model, inherit from `BaseChatModel`. See
|
||||
the following guide for more information on how to implement a custom Chat Model:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_chat_model/
|
||||
See existing [chat model integrations](https://docs.langchain.com/oss/python/integrations/chat).
|
||||
|
||||
**LLMs**
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are Chat Models,
|
||||
see below).
|
||||
These are traditionally older models (newer models generally are chat models).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers
|
||||
also allow these models to take messages as input. This gives them the same interface
|
||||
as Chat Models. When messages are passed in as input, they will be formatted into a
|
||||
string under the hood before being passed to the underlying model.
|
||||
|
||||
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
|
||||
Please see the following guide for more information on how to implement a custom LLM:
|
||||
|
||||
https://python.langchain.com/docs/how_to/custom_llm/
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also
|
||||
allow these models to take messages as input. This gives them the same interface as
|
||||
chat models. When messages are passed in as input, they will be formatted into a string
|
||||
under the hood before being passed to the underlying model.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -89,7 +89,8 @@ class ParsedDataUri(TypedDict):
|
||||
def _parse_data_uri(uri: str) -> ParsedDataUri | None:
|
||||
"""Parse a data URI into its components.
|
||||
|
||||
If parsing fails, return None. If either MIME type or data is missing, return None.
|
||||
If parsing fails, return `None`. If either MIME type or data is missing, return
|
||||
`None`.
|
||||
|
||||
Example:
|
||||
```python
|
||||
|
||||
@@ -96,9 +96,16 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
||||
|
||||
|
||||
LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
|
||||
"""Input to a language model."""
|
||||
|
||||
LanguageModelOutput = BaseMessage | str
|
||||
"""Output from a language model."""
|
||||
|
||||
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
||||
"""Input/output interface for a language model."""
|
||||
|
||||
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
|
||||
"""Type variable for the output of a language model."""
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
@@ -193,14 +200,14 @@ class BaseLanguageModel(
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -230,14 +237,14 @@ class BaseLanguageModel(
|
||||
pure text generation models and `BaseMessage` objects for chat models).
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
||||
each input prompt and additional model provider-specific output.
|
||||
each input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
|
||||
@@ -262,8 +269,7 @@ class BaseLanguageModel(
|
||||
|
||||
Returns:
|
||||
A list of ids corresponding to the tokens in the text, in order they occur
|
||||
in the text.
|
||||
|
||||
in the text.
|
||||
"""
|
||||
if self.custom_get_token_ids is not None:
|
||||
return self.custom_get_token_ids(text)
|
||||
|
||||
@@ -264,21 +264,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
|
||||
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
|
||||
|
||||
| Method | Description |
|
||||
| ---------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| `bind_tools` | Create chat model that can call tools. |
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
|
||||
| Method | Description |
|
||||
| ---------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| `bind_tools` | Create chat model that can call tools. |
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
|
||||
|
||||
Creating custom chat model:
|
||||
Custom chat model implementations should inherit from this class.
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
| Method/Property | Description | Required/Optional |
|
||||
| Method/Property | Description | Required |
|
||||
| -------------------------------- | ------------------------------------------------------------------ | ----------------- |
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
@@ -287,9 +287,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
|
||||
Follow the guide for more information on how to implement a custom chat model:
|
||||
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
rate_limiter: BaseRateLimiter | None = Field(default=None, exclude=True)
|
||||
@@ -325,11 +322,12 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
Supported values:
|
||||
|
||||
- `'v0'`: provider-specific format in content (can lazily-parse with
|
||||
`.content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `.content_blocks`)
|
||||
`content_blocks`)
|
||||
- `'v1'`: standardized format in content (consistent with `content_blocks`)
|
||||
|
||||
Partner packages (e.g., `langchain-openai`) can also use this field to roll out
|
||||
new content formats in a backward-compatible way.
|
||||
Partner packages (e.g.,
|
||||
[`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
|
||||
field to roll out new content formats in a backward-compatible way.
|
||||
|
||||
!!! version-added "Added in version 1.0"
|
||||
|
||||
@@ -840,13 +838,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
messages: List of list of messages.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: The tags to apply.
|
||||
metadata: The metadata to apply.
|
||||
@@ -856,8 +854,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
@@ -958,13 +956,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
1. Take advantage of batched calls,
|
||||
2. Need more output from the model than just the top generated value,
|
||||
3. Are building chains that are agnostic to the underlying language model
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
type (e.g., pure text completion models vs chat models).
|
||||
|
||||
Args:
|
||||
messages: List of list of messages.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: The tags to apply.
|
||||
metadata: The metadata to apply.
|
||||
@@ -974,8 +972,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
to the model provider API call.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
|
||||
"""
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
@@ -1512,17 +1510,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
If `schema` is a Pydantic class then the model output will be a
|
||||
Pydantic instance of that class, and the model-generated fields will be
|
||||
validated by the Pydantic class. Otherwise the model output will be a
|
||||
dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
|
||||
for more on how to properly specify types and descriptions of
|
||||
schema fields when specifying a Pydantic or `TypedDict` class.
|
||||
dict and will not be validated.
|
||||
|
||||
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
|
||||
more on how to properly specify types and descriptions of schema fields
|
||||
when specifying a Pydantic or `TypedDict` class.
|
||||
|
||||
include_raw:
|
||||
If `False` then only the parsed structured output is returned. If
|
||||
an error occurs during model output parsing it will be raised. If `True`
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
then both the raw model response (a `BaseMessage`) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys `'raw'`, `'parsed'`, and `'parsing_error'`.
|
||||
will be caught and returned as well.
|
||||
|
||||
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
|
||||
`'parsing_error'`.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are any unsupported `kwargs`.
|
||||
@@ -1530,99 +1532,102 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
||||
`with_structured_output()`.
|
||||
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
|
||||
A `Runnable` that takes same inputs as a
|
||||
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
|
||||
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
|
||||
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
|
||||
`False` then `Runnable` outputs a `dict`.
|
||||
|
||||
If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
|
||||
an instance of `schema` (i.e., a Pydantic object).
|
||||
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
|
||||
|
||||
Otherwise, if `include_raw` is False then Runnable outputs a dict.
|
||||
- `'raw'`: `BaseMessage`
|
||||
- `'parsed'`: `None` if there was a parsing error, otherwise the type
|
||||
depends on the `schema` as described above.
|
||||
- `'parsing_error'`: `BaseException | None`
|
||||
|
||||
If `include_raw` is True, then Runnable outputs a dict with keys:
|
||||
Example: Pydantic schema (`include_raw=False`):
|
||||
|
||||
- `'raw'`: BaseMessage
|
||||
- `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
|
||||
- `'parsing_error'`: BaseException | None
|
||||
|
||||
Example: Pydantic schema (include_raw=False):
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(AnswerWithJustification)
|
||||
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
```
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
```
|
||||
|
||||
Example: Pydantic schema (include_raw=True):
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
Example: Pydantic schema (`include_raw=True`):
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(
|
||||
AnswerWithJustification, include_raw=True
|
||||
)
|
||||
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
```
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
```
|
||||
|
||||
Example: Dict schema (include_raw=False):
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
Example: `dict` schema (`include_raw=False`):
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
|
||||
answer: str
|
||||
justification: str
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(dict_schema)
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
model = ChatModel(model="model-name", temperature=0)
|
||||
structured_model = model.with_structured_output(dict_schema)
|
||||
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
```
|
||||
structured_model.invoke(
|
||||
"What weighs more a pound of bricks or a pound of feathers"
|
||||
)
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
```
|
||||
|
||||
!!! warning "Behavior changed in 0.2.26"
|
||||
Added support for TypedDict class.
|
||||
Added support for TypedDict class.
|
||||
|
||||
""" # noqa: E501
|
||||
_ = kwargs.pop("method", None)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
|
||||
|
||||
|
||||
class FakeMessagesListChatModel(BaseChatModel):
|
||||
"""Fake `ChatModel` for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[BaseMessage]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
@@ -57,7 +57,7 @@ class FakeListChatModelError(Exception):
|
||||
|
||||
|
||||
class FakeListChatModel(SimpleChatModel):
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
"""Fake chat model for testing purposes."""
|
||||
|
||||
responses: list[str]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
|
||||
@@ -74,8 +74,8 @@ def create_base_retry_decorator(
|
||||
|
||||
Args:
|
||||
error_types: List of error types to retry on.
|
||||
max_retries: Number of retries. Default is 1.
|
||||
run_manager: Callback manager for the run. Default is None.
|
||||
max_retries: Number of retries.
|
||||
run_manager: Callback manager for the run.
|
||||
|
||||
Returns:
|
||||
A retry decorator.
|
||||
@@ -91,13 +91,17 @@ def create_base_retry_decorator(
|
||||
if isinstance(run_manager, AsyncCallbackManagerForLLMRun):
|
||||
coro = run_manager.on_retry(retry_state)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
if loop.is_running():
|
||||
# TODO: Fix RUF006 - this task should have a reference
|
||||
# and be awaited somewhere
|
||||
loop.create_task(coro) # noqa: RUF006
|
||||
else:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError:
|
||||
asyncio.run(coro)
|
||||
else:
|
||||
if loop.is_running():
|
||||
# TODO: Fix RUF006 - this task should have a reference
|
||||
# and be awaited somewhere
|
||||
loop.create_task(coro) # noqa: RUF006
|
||||
else:
|
||||
asyncio.run(coro)
|
||||
except Exception as e:
|
||||
_log_error_once(f"Error in on_retry: {e}")
|
||||
else:
|
||||
@@ -153,7 +157,7 @@ def get_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -189,7 +193,7 @@ async def aget_prompts(
|
||||
Args:
|
||||
params: Dictionary of parameters.
|
||||
prompts: List of prompts.
|
||||
cache: Cache object. Default is None.
|
||||
cache: Cache object.
|
||||
|
||||
Returns:
|
||||
A tuple of existing prompts, llm_string, missing prompt indexes,
|
||||
@@ -841,7 +845,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
prompts: List of string prompts.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: List of tags to associate with each prompt. If provided, the length
|
||||
of the list must match the length of the prompts list.
|
||||
@@ -861,8 +865,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
"""
|
||||
if not isinstance(prompts, list):
|
||||
msg = (
|
||||
@@ -1111,7 +1115,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
prompts: List of string prompts.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of these substrings.
|
||||
callbacks: Callbacks to pass through. Used for executing additional
|
||||
callbacks: `Callbacks` to pass through. Used for executing additional
|
||||
functionality, such as logging or streaming, throughout generation.
|
||||
tags: List of tags to associate with each prompt. If provided, the length
|
||||
of the list must match the length of the prompts list.
|
||||
@@ -1130,8 +1134,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
`run_name` (if provided) does not match the length of prompts.
|
||||
|
||||
Returns:
|
||||
An LLMResult, which contains a list of candidate Generations for each input
|
||||
prompt and additional model provider-specific output.
|
||||
An `LLMResult`, which contains a list of candidate `Generations` for each
|
||||
input prompt and additional model provider-specific output.
|
||||
"""
|
||||
if isinstance(metadata, list):
|
||||
metadata = [
|
||||
|
||||
@@ -42,10 +42,9 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
|
||||
Args:
|
||||
obj: The object to dump.
|
||||
pretty: Whether to pretty print the json. If true, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of kwargs).
|
||||
Default is False.
|
||||
**kwargs: Additional arguments to pass to json.dumps
|
||||
pretty: Whether to pretty print the json. If `True`, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of `kwargs`).
|
||||
**kwargs: Additional arguments to pass to `json.dumps`
|
||||
|
||||
Returns:
|
||||
A json string representation of the object.
|
||||
|
||||
@@ -67,12 +67,9 @@ class Reviver:
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
"""
|
||||
self.secrets_from_env = secrets_from_env
|
||||
self.secrets_map = secrets_map or {}
|
||||
@@ -204,12 +201,9 @@ def loads(
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
@@ -249,12 +243,9 @@ def load(
|
||||
valid_namespaces: A list of additional namespaces (modules)
|
||||
to allow to be deserialized.
|
||||
secrets_from_env: Whether to load secrets from the environment.
|
||||
Defaults to `True`.
|
||||
additional_import_mappings: A dictionary of additional namespace mappings
|
||||
You can use this to override default mappings or add new mappings.
|
||||
|
||||
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
Revived LangChain objects.
|
||||
|
||||
@@ -96,7 +96,7 @@ class Serializable(BaseModel, ABC):
|
||||
By design, even if a class inherits from `Serializable`, it is not serializable
|
||||
by default. This is to prevent accidental serialization of objects that should
|
||||
not be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the langchain object.
|
||||
- `get_lc_namespace`: Get the namespace of the LangChain object.
|
||||
During deserialization, this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the `Reviver` class in `langchain_core.load.load` for more details.
|
||||
@@ -127,10 +127,10 @@ class Serializable(BaseModel, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
||||
namespace is ["langchain", "llms", "openai"]
|
||||
namespace is `["langchain", "llms", "openai"]`
|
||||
|
||||
Returns:
|
||||
The namespace.
|
||||
|
||||
@@ -148,27 +148,26 @@ class UsageMetadata(TypedDict):
|
||||
class AIMessage(BaseMessage):
|
||||
"""Message from an AI.
|
||||
|
||||
AIMessage is returned from a chat model as a response to a prompt.
|
||||
An `AIMessage` is returned from a chat model as a response to a prompt.
|
||||
|
||||
This message represents the output of the model and consists of both
|
||||
the raw output as returned by the model together standardized fields
|
||||
the raw output as returned by the model and standardized fields
|
||||
(e.g., tool calls, usage metadata) added by the LangChain framework.
|
||||
|
||||
"""
|
||||
|
||||
tool_calls: list[ToolCall] = []
|
||||
"""If provided, tool calls associated with the message."""
|
||||
"""If present, tool calls associated with the message."""
|
||||
invalid_tool_calls: list[InvalidToolCall] = []
|
||||
"""If provided, tool calls with parsing errors associated with the message."""
|
||||
"""If present, tool calls with parsing errors associated with the message."""
|
||||
usage_metadata: UsageMetadata | None = None
|
||||
"""If provided, usage metadata for a message, such as token counts.
|
||||
"""If present, usage metadata for a message, such as token counts.
|
||||
|
||||
This is a standard representation of token usage that is consistent across models.
|
||||
|
||||
"""
|
||||
|
||||
type: Literal["ai"] = "ai"
|
||||
"""The type of the message (used for deserialization). Defaults to "ai"."""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -191,7 +190,7 @@ class AIMessage(BaseMessage):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `AIMessage`.
|
||||
"""Initialize an `AIMessage`.
|
||||
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
@@ -217,7 +216,11 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized.
|
||||
|
||||
Includes all attributes, even if they are derived from other initialization
|
||||
arguments.
|
||||
"""
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -225,7 +228,7 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message.
|
||||
"""Return standard, typed `ContentBlock` dicts from the message.
|
||||
|
||||
If the message has a known model provider, use the provider-specific translator
|
||||
first before falling back to best-effort parsing. For details, see the property
|
||||
@@ -331,11 +334,10 @@ class AIMessage(BaseMessage):
|
||||
|
||||
@override
|
||||
def pretty_repr(self, html: bool = False) -> str:
|
||||
"""Return a pretty representation of the message.
|
||||
"""Return a pretty representation of the message for display.
|
||||
|
||||
Args:
|
||||
html: Whether to return an HTML-formatted string.
|
||||
Defaults to `False`.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -372,23 +374,19 @@ class AIMessage(BaseMessage):
|
||||
|
||||
|
||||
class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
"""Message chunk from an AI."""
|
||||
"""Message chunk from an AI (yielded when streaming)."""
|
||||
|
||||
# Ignoring mypy re-assignment here since we're overriding the value
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for deserialization).
|
||||
|
||||
Defaults to `AIMessageChunk`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for deserialization)."""
|
||||
|
||||
tool_call_chunks: list[ToolCallChunk] = []
|
||||
"""If provided, tool call chunks associated with the message."""
|
||||
|
||||
chunk_position: Literal["last"] | None = None
|
||||
"""Optional span represented by an aggregated AIMessageChunk.
|
||||
"""Optional span represented by an aggregated `AIMessageChunk`.
|
||||
|
||||
If a chunk with `chunk_position="last"` is aggregated into a stream,
|
||||
`tool_call_chunks` in message content will be parsed into `tool_calls`.
|
||||
@@ -396,7 +394,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
|
||||
@property
|
||||
def lc_attributes(self) -> dict:
|
||||
"""Attrs to be serialized even if they are derived from other init args."""
|
||||
"""Attributes to be serialized, even if they are derived from other initialization args.""" # noqa: E501
|
||||
return {
|
||||
"tool_calls": self.tool_calls,
|
||||
"invalid_tool_calls": self.invalid_tool_calls,
|
||||
@@ -404,7 +402,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
|
||||
@property
|
||||
def content_blocks(self) -> list[types.ContentBlock]:
|
||||
"""Return content blocks of the message."""
|
||||
"""Return standard, typed `ContentBlock` dicts from the message."""
|
||||
if self.response_metadata.get("output_version") == "v1":
|
||||
return cast("list[types.ContentBlock]", self.content)
|
||||
|
||||
@@ -545,12 +543,15 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
and call_id in id_to_tc
|
||||
):
|
||||
self.content[idx] = cast("dict[str, Any]", id_to_tc[call_id])
|
||||
if "extras" in block:
|
||||
# mypy does not account for instance check for dict above
|
||||
self.content[idx]["extras"] = block["extras"] # type: ignore[index]
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def init_server_tool_calls(self) -> Self:
|
||||
"""Parse server_tool_call_chunks."""
|
||||
"""Parse `server_tool_call_chunks`."""
|
||||
if (
|
||||
self.chunk_position == "last"
|
||||
and self.response_metadata.get("output_version") == "v1"
|
||||
|
||||
@@ -92,11 +92,11 @@ class TextAccessor(str):
|
||||
class BaseMessage(Serializable):
|
||||
"""Base abstract message class.
|
||||
|
||||
Messages are the inputs and outputs of a `ChatModel`.
|
||||
Messages are the inputs and outputs of a chat model.
|
||||
"""
|
||||
|
||||
content: str | list[str | dict]
|
||||
"""The string contents of the message."""
|
||||
"""The contents of the message."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict)
|
||||
"""Reserved for additional payload data associated with the message.
|
||||
@@ -159,12 +159,12 @@ class BaseMessage(Serializable):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `BaseMessage`.
|
||||
"""Initialize a `BaseMessage`.
|
||||
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
**kwargs: Additional arguments to pass to the parent class.
|
||||
"""
|
||||
@@ -184,7 +184,7 @@ class BaseMessage(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "messages"]`
|
||||
@@ -262,7 +262,7 @@ class BaseMessage(Serializable):
|
||||
Can be used as both property (`message.text`) and method (`message.text()`).
|
||||
|
||||
!!! deprecated
|
||||
As of langchain-core 1.0.0, calling `.text()` as a method is deprecated.
|
||||
As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
|
||||
Use `.text` as a property instead. This method will be removed in 2.0.0.
|
||||
|
||||
Returns:
|
||||
@@ -307,7 +307,7 @@ class BaseMessage(Serializable):
|
||||
|
||||
Args:
|
||||
html: Whether to format the message as HTML. If `True`, the message will be
|
||||
formatted with HTML tags. Default is False.
|
||||
formatted with HTML tags.
|
||||
|
||||
Returns:
|
||||
A pretty representation of the message.
|
||||
@@ -464,7 +464,7 @@ def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
|
||||
|
||||
Args:
|
||||
title: The title.
|
||||
bold: Whether to bold the title. Default is False.
|
||||
bold: Whether to bold the title.
|
||||
|
||||
Returns:
|
||||
The title representation.
|
||||
|
||||
@@ -28,7 +28,7 @@ dictionary with two keys:
|
||||
- `'translate_content'`: Function to translate `AIMessage` content.
|
||||
- `'translate_content_chunk'`: Function to translate `AIMessageChunk` content.
|
||||
|
||||
When calling `.content_blocks` on an `AIMessage` or `AIMessageChunk`, if
|
||||
When calling `content_blocks` on an `AIMessage` or `AIMessageChunk`, if
|
||||
`model_provider` is set in `response_metadata`, the corresponding translator
|
||||
functions will be used to parse the content into blocks. Otherwise, best-effort parsing
|
||||
in `BaseMessage` will be used.
|
||||
|
||||
@@ -31,7 +31,7 @@ def _convert_to_v1_from_anthropic_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Anthropic format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Anthropic format to v1 ContentBlocks.
|
||||
|
||||
@@ -35,7 +35,7 @@ def _convert_to_v1_from_converse_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert Bedrock Converse format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be Converse format to v1 ContentBlocks.
|
||||
|
||||
@@ -105,7 +105,7 @@ def _convert_to_v1_from_genai_input(
|
||||
Called when message isn't an `AIMessage` or `model_provider` isn't set on
|
||||
`response_metadata`.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be GenAI format to v1 ContentBlocks.
|
||||
@@ -282,7 +282,7 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
|
||||
standard content blocks for returning.
|
||||
|
||||
Args:
|
||||
message: The AIMessage or AIMessageChunk to convert.
|
||||
message: The `AIMessage` or `AIMessageChunk` to convert.
|
||||
|
||||
Returns:
|
||||
List of standard content blocks derived from the message content.
|
||||
@@ -453,9 +453,10 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"status": status, # type: ignore[typeddict-item]
|
||||
"output": item.get("code_execution_result", ""),
|
||||
}
|
||||
server_tool_result_block["extras"] = {"block_type": item_type}
|
||||
# Preserve original outcome in extras
|
||||
if outcome is not None:
|
||||
server_tool_result_block["extras"] = {"outcome": outcome}
|
||||
server_tool_result_block["extras"]["outcome"] = outcome
|
||||
converted_blocks.append(server_tool_result_block)
|
||||
else:
|
||||
# Unknown type, preserve as non-standard
|
||||
|
||||
@@ -1,37 +1,9 @@
|
||||
"""Derivations of standard content blocks from Google (VertexAI) content."""
|
||||
|
||||
import warnings
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a chunk with Google (VertexAI) content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Google "
|
||||
"VertexAI."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
from langchain_core.messages.block_translators.google_genai import (
|
||||
translate_content,
|
||||
translate_content_chunk,
|
||||
)
|
||||
|
||||
|
||||
def _register_google_vertexai_translator() -> None:
|
||||
|
||||
@@ -1,39 +1,135 @@
|
||||
"""Derivations of standard content blocks from Groq content."""
|
||||
|
||||
import warnings
|
||||
import json
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages import content as types
|
||||
|
||||
WARNED = False
|
||||
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _populate_extras(
|
||||
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
|
||||
) -> types.ContentBlock:
|
||||
"""Mutate a block, populating extras."""
|
||||
if standard_block.get("type") == "non_standard":
|
||||
return standard_block
|
||||
|
||||
for key, value in block.items():
|
||||
if key not in known_fields:
|
||||
if "extras" not in standard_block:
|
||||
# Below type-ignores are because mypy thinks a non-standard block can
|
||||
# get here, although we exclude them above.
|
||||
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
|
||||
standard_block["extras"][key] = value # type: ignore[typeddict-item]
|
||||
|
||||
return standard_block
|
||||
|
||||
|
||||
def _parse_code_json(s: str) -> dict:
|
||||
"""Extract Python code from Groq built-in tool content.
|
||||
|
||||
Extracts the value of the 'code' field from a string of the form:
|
||||
{"code": some_arbitrary_text_with_unescaped_quotes}
|
||||
|
||||
As Groq may not escape quotes in the executed tools, e.g.:
|
||||
```
|
||||
'{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
|
||||
```
|
||||
""" # noqa: E501
|
||||
m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
|
||||
if not m:
|
||||
msg = (
|
||||
"Could not extract Python code from Groq tool arguments. "
|
||||
"Expected a JSON object with a 'code' field."
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
raise ValueError(msg)
|
||||
return {"code": m.group(1)}
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
||||
"""Derive standard content blocks from a message chunk with Groq content."""
|
||||
global WARNED # noqa: PLW0603
|
||||
if not WARNED:
|
||||
warning_message = (
|
||||
"Content block standardization is not yet fully supported for Groq."
|
||||
def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Convert groq message content to v1 format."""
|
||||
content_blocks: list[types.ContentBlock] = []
|
||||
|
||||
if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
|
||||
content_blocks.append(reasoning_block)
|
||||
|
||||
if executed_tools := message.additional_kwargs.get("executed_tools"):
|
||||
for idx, executed_tool in enumerate(executed_tools):
|
||||
args: dict[str, Any] | None = None
|
||||
if arguments := executed_tool.get("arguments"):
|
||||
try:
|
||||
args = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
if executed_tool.get("type") == "python":
|
||||
try:
|
||||
args = _parse_code_json(arguments)
|
||||
except ValueError:
|
||||
continue
|
||||
elif (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
# GPT-OSS
|
||||
args = {"code": arguments}
|
||||
else:
|
||||
continue
|
||||
if isinstance(args, dict):
|
||||
name = ""
|
||||
if executed_tool.get("type") == "search":
|
||||
name = "web_search"
|
||||
elif executed_tool.get("type") == "python" or (
|
||||
executed_tool.get("type") == "function"
|
||||
and executed_tool.get("name") == "python"
|
||||
):
|
||||
name = "code_interpreter"
|
||||
server_tool_call: types.ServerToolCall = {
|
||||
"type": "server_tool_call",
|
||||
"name": name,
|
||||
"id": str(idx),
|
||||
"args": args,
|
||||
}
|
||||
content_blocks.append(server_tool_call)
|
||||
if tool_output := executed_tool.get("output"):
|
||||
tool_result: types.ServerToolResult = {
|
||||
"type": "server_tool_result",
|
||||
"tool_call_id": str(idx),
|
||||
"output": tool_output,
|
||||
"status": "success",
|
||||
}
|
||||
known_fields = {"type", "arguments", "index", "output"}
|
||||
_populate_extras(tool_result, executed_tool, known_fields)
|
||||
content_blocks.append(tool_result)
|
||||
|
||||
if isinstance(message.content, str) and message.content:
|
||||
content_blocks.append({"type": "text", "text": message.content})
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
content_blocks.append( # noqa: PERF401
|
||||
{
|
||||
"type": "tool_call",
|
||||
"name": tool_call["name"],
|
||||
"args": tool_call["args"],
|
||||
"id": tool_call.get("id"),
|
||||
}
|
||||
)
|
||||
warnings.warn(warning_message, stacklevel=2)
|
||||
WARNED = True
|
||||
raise NotImplementedError
|
||||
|
||||
return content_blocks
|
||||
|
||||
|
||||
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
||||
"""Derive standard content blocks from a message chunk with groq content."""
|
||||
return _convert_to_v1_from_groq(message)
|
||||
|
||||
|
||||
def _register_groq_translator() -> None:
|
||||
"""Register the Groq translator with the central registry.
|
||||
"""Register the groq translator with the central registry.
|
||||
|
||||
Run automatically when the module is imported.
|
||||
"""
|
||||
|
||||
@@ -10,7 +10,7 @@ def _convert_v0_multimodal_input_to_v1(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert v0 multimodal blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any v0 format
|
||||
blocks to v1 format.
|
||||
|
||||
@@ -155,7 +155,7 @@ def _convert_to_v1_from_chat_completions_input(
|
||||
) -> list[types.ContentBlock]:
|
||||
"""Convert OpenAI Chat Completions format blocks to v1 format.
|
||||
|
||||
During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
||||
block as a `'non_standard'` block with the original block stored in the `value`
|
||||
field. This function attempts to unpack those blocks and convert any blocks that
|
||||
might be OpenAI format to v1 ContentBlocks.
|
||||
|
||||
@@ -19,7 +19,7 @@ class ChatMessage(BaseMessage):
|
||||
"""The speaker / role of the Message."""
|
||||
|
||||
type: Literal["chat"] = "chat"
|
||||
"""The type of the message (used during serialization). Defaults to "chat"."""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
|
||||
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
@@ -29,11 +29,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used during serialization).
|
||||
|
||||
Defaults to `'ChatMessageChunk'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used during serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -143,7 +143,7 @@ class Citation(TypedDict):
|
||||
not the source text. This means that the indices are relative to the model's
|
||||
response, not the original document (as specified in the `url`).
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_citation` may also be used as a factory to create a `Citation`.
|
||||
Benefits include:
|
||||
|
||||
@@ -156,7 +156,9 @@ class Citation(TypedDict):
|
||||
"""Type of the content block. Used for discrimination."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
@@ -201,6 +203,7 @@ class NonStandardAnnotation(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -211,6 +214,7 @@ class NonStandardAnnotation(TypedDict):
|
||||
|
||||
|
||||
Annotation = Citation | NonStandardAnnotation
|
||||
"""A union of all defined `Annotation` types."""
|
||||
|
||||
|
||||
class TextContentBlock(TypedDict):
|
||||
@@ -219,7 +223,7 @@ class TextContentBlock(TypedDict):
|
||||
This typically represents the main text content of a message, such as the response
|
||||
from a language model or the text of a user message.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_text_block` may also be used as a factory to create a
|
||||
`TextContentBlock`. Benefits include:
|
||||
|
||||
@@ -235,6 +239,7 @@ class TextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -254,7 +259,7 @@ class TextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -264,7 +269,7 @@ class ToolCall(TypedDict):
|
||||
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
||||
and an identifier of "123".
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_tool_call` may also be used as a factory to create a
|
||||
`ToolCall`. Benefits include:
|
||||
|
||||
@@ -299,7 +304,7 @@ class ToolCall(TypedDict):
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
@@ -381,7 +386,10 @@ class InvalidToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCall(TypedDict):
|
||||
"""Tool call that is executed server-side."""
|
||||
"""Tool call that is executed server-side.
|
||||
|
||||
For example: code execution, web search, etc.
|
||||
"""
|
||||
|
||||
type: Literal["server_tool_call"]
|
||||
"""Used for discrimination."""
|
||||
@@ -403,7 +411,7 @@ class ServerToolCall(TypedDict):
|
||||
|
||||
|
||||
class ServerToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (as part of a stream)."""
|
||||
"""A chunk of a server-side tool call (yielded when streaming)."""
|
||||
|
||||
type: Literal["server_tool_call_chunk"]
|
||||
"""Used for discrimination."""
|
||||
@@ -452,7 +460,7 @@ class ServerToolResult(TypedDict):
|
||||
class ReasoningContentBlock(TypedDict):
|
||||
"""Reasoning output from a LLM.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_reasoning_block` may also be used as a factory to create a
|
||||
`ReasoningContentBlock`. Benefits include:
|
||||
|
||||
@@ -468,6 +476,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -494,7 +503,7 @@ class ReasoningContentBlock(TypedDict):
|
||||
class ImageContentBlock(TypedDict):
|
||||
"""Image data.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_image_block` may also be used as a factory to create a
|
||||
`ImageContentBlock`. Benefits include:
|
||||
|
||||
@@ -510,6 +519,7 @@ class ImageContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -541,7 +551,7 @@ class ImageContentBlock(TypedDict):
|
||||
class VideoContentBlock(TypedDict):
|
||||
"""Video data.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_video_block` may also be used as a factory to create a
|
||||
`VideoContentBlock`. Benefits include:
|
||||
|
||||
@@ -557,6 +567,7 @@ class VideoContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -588,7 +599,7 @@ class VideoContentBlock(TypedDict):
|
||||
class AudioContentBlock(TypedDict):
|
||||
"""Audio data.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_audio_block` may also be used as a factory to create an
|
||||
`AudioContentBlock`. Benefits include:
|
||||
* Automatic ID generation (when not provided)
|
||||
@@ -603,6 +614,7 @@ class AudioContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -642,9 +654,9 @@ class PlainTextContentBlock(TypedDict):
|
||||
|
||||
!!! note
|
||||
Title and context are optional fields that may be passed to the model. See
|
||||
Anthropic [example](https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
|
||||
Anthropic [example](https://docs.claude.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_plaintext_block` may also be used as a factory to create a
|
||||
`PlainTextContentBlock`. Benefits include:
|
||||
|
||||
@@ -660,6 +672,7 @@ class PlainTextContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -694,7 +707,7 @@ class PlainTextContentBlock(TypedDict):
|
||||
|
||||
|
||||
class FileContentBlock(TypedDict):
|
||||
"""File data that doesn't fit into other multimodal blocks.
|
||||
"""File data that doesn't fit into other multimodal block types.
|
||||
|
||||
This block is intended for files that are not images, audio, or plaintext. For
|
||||
example, it can be used for PDFs, Word documents, etc.
|
||||
@@ -703,7 +716,7 @@ class FileContentBlock(TypedDict):
|
||||
content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
|
||||
`PlainTextContentBlock`).
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_file_block` may also be used as a factory to create a
|
||||
`FileContentBlock`. Benefits include:
|
||||
|
||||
@@ -719,6 +732,7 @@ class FileContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -765,7 +779,7 @@ class NonStandardContentBlock(TypedDict):
|
||||
Has no `extras` field, as provider-specific data should be included in the
|
||||
`value` field.
|
||||
|
||||
!!! note
|
||||
!!! note "Factory function"
|
||||
`create_non_standard_block` may also be used as a factory to create a
|
||||
`NonStandardContentBlock`. Benefits include:
|
||||
|
||||
@@ -781,6 +795,7 @@ class NonStandardContentBlock(TypedDict):
|
||||
"""Content block identifier.
|
||||
|
||||
Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
|
||||
|
||||
@@ -801,6 +816,7 @@ DataContentBlock = (
|
||||
| PlainTextContentBlock
|
||||
| FileContentBlock
|
||||
)
|
||||
"""A union of all defined multimodal data `ContentBlock` types."""
|
||||
|
||||
ToolContentBlock = (
|
||||
ToolCall | ToolCallChunk | ServerToolCall | ServerToolCallChunk | ServerToolResult
|
||||
@@ -814,6 +830,7 @@ ContentBlock = (
|
||||
| DataContentBlock
|
||||
| ToolContentBlock
|
||||
)
|
||||
"""A union of all defined `ContentBlock` types and aliases."""
|
||||
|
||||
|
||||
KNOWN_BLOCK_TYPES = {
|
||||
|
||||
@@ -19,7 +19,7 @@ class FunctionMessage(BaseMessage):
|
||||
do not contain the `tool_call_id` field.
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
|
||||
"""The name of the function that was executed."""
|
||||
|
||||
type: Literal["function"] = "function"
|
||||
"""The type of the message (used for serialization). Defaults to `'function'`."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
|
||||
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
@@ -38,11 +38,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'FunctionMessageChunk'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@override
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
||||
|
||||
@@ -7,9 +7,9 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
|
||||
|
||||
class HumanMessage(BaseMessage):
|
||||
"""Message from a human.
|
||||
"""Message from the user.
|
||||
|
||||
`HumanMessage`s are messages that are passed in from a human to the model.
|
||||
A `HumanMessage` is a message that is passed in from a user to the model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -27,11 +27,7 @@ class HumanMessage(BaseMessage):
|
||||
"""
|
||||
|
||||
type: Literal["human"] = "human"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'human'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -71,5 +67,4 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
Defaults to "HumanMessageChunk"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -9,7 +9,7 @@ class RemoveMessage(BaseMessage):
|
||||
"""Message responsible for deleting other messages."""
|
||||
|
||||
type: Literal["remove"] = "remove"
|
||||
"""The type of the message (used for serialization). Defaults to "remove"."""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -27,11 +27,7 @@ class SystemMessage(BaseMessage):
|
||||
"""
|
||||
|
||||
type: Literal["system"] = "system"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'system'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@@ -71,8 +67,4 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'SystemMessageChunk'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
@@ -31,36 +31,34 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
|
||||
Example: A `ToolMessage` representing a result of `42` from a tool call with id
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```
|
||||
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
||||
```
|
||||
|
||||
Example: A `ToolMessage` where only part of the tool output is sent to the model
|
||||
and the full output is passed in to artifact.
|
||||
and the full output is passed in to artifact.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between "
|
||||
"x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
```
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
artifact=tool_output,
|
||||
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
||||
)
|
||||
```
|
||||
|
||||
The `tool_call_id` field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
tool call response. Useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
"""
|
||||
@@ -69,11 +67,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
"""Tool call that this message is responding to."""
|
||||
|
||||
type: Literal["tool"] = "tool"
|
||||
"""The type of the message (used for serialization).
|
||||
|
||||
Defaults to `'tool'`.
|
||||
|
||||
"""
|
||||
"""The type of the message (used for serialization)."""
|
||||
|
||||
artifact: Any = None
|
||||
"""Artifact of the Tool execution which is not meant to be sent to the model.
|
||||
@@ -82,21 +76,15 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
a subset of the full tool output is being passed as message content but the full
|
||||
output is needed in other parts of the code.
|
||||
|
||||
!!! version-added "Added in version 0.2.17"
|
||||
|
||||
"""
|
||||
|
||||
status: Literal["success", "error"] = "success"
|
||||
"""Status of the tool invocation.
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
|
||||
"""
|
||||
"""Status of the tool invocation."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
response_metadata: dict = Field(default_factory=dict, repr=False)
|
||||
"""Currently inherited from BaseMessage, but not used."""
|
||||
"""Currently inherited from `BaseMessage`, but not used."""
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -164,12 +152,12 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
content_blocks: list[types.ContentBlock] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize `ToolMessage`.
|
||||
"""Initialize a `ToolMessage`.
|
||||
|
||||
Specify `content` as positional arg or `content_blocks` for typing.
|
||||
|
||||
Args:
|
||||
content: The string contents of the message.
|
||||
content: The contents of the message.
|
||||
content_blocks: Typed standard content.
|
||||
**kwargs: Additional fields.
|
||||
"""
|
||||
@@ -215,7 +203,7 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
"""Represents an AI's request to call a tool.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -261,7 +249,7 @@ def tool_call(
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
"""A chunk of a tool call (yielded when streaming).
|
||||
|
||||
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
|
||||
@@ -86,6 +86,7 @@ AnyMessage = Annotated[
|
||||
| Annotated[ToolMessageChunk, Tag(tag="ToolMessageChunk")],
|
||||
Field(discriminator=Discriminator(_get_type)),
|
||||
]
|
||||
""""A type representing any defined `Message` or `MessageChunk` type."""
|
||||
|
||||
|
||||
def get_buffer_string(
|
||||
@@ -96,9 +97,7 @@ def get_buffer_string(
|
||||
Args:
|
||||
messages: Messages to be converted to strings.
|
||||
human_prefix: The prefix to prepend to contents of `HumanMessage`s.
|
||||
Default is `'Human'`.
|
||||
ai_prefix: The prefix to prepend to contents of `AIMessage`. Default is
|
||||
`'AI'`.
|
||||
ai_prefix: The prefix to prepend to contents of `AIMessage`.
|
||||
|
||||
Returns:
|
||||
A single string concatenation of all input messages.
|
||||
@@ -211,6 +210,7 @@ def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
||||
MessageLikeRepresentation = (
|
||||
BaseMessage | list[str] | tuple[str, str] | str | dict[str, Any]
|
||||
)
|
||||
"""A type representing the various ways a message can be represented."""
|
||||
|
||||
|
||||
def _create_message_from_message_type(
|
||||
@@ -227,10 +227,10 @@ def _create_message_from_message_type(
|
||||
Args:
|
||||
message_type: (str) the type of the message (e.g., `'human'`, `'ai'`, etc.).
|
||||
content: (str) the content string.
|
||||
name: (str) the name of the message. Default is None.
|
||||
tool_call_id: (str) the tool call id. Default is None.
|
||||
tool_calls: (list[dict[str, Any]]) the tool calls. Default is None.
|
||||
id: (str) the id of the message. Default is None.
|
||||
name: (str) the name of the message.
|
||||
tool_call_id: (str) the tool call id.
|
||||
tool_calls: (list[dict[str, Any]]) the tool calls.
|
||||
id: (str) the id of the message.
|
||||
additional_kwargs: (dict[str, Any]) additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
@@ -319,7 +319,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
|
||||
Returns:
|
||||
an instance of a message or a message template.
|
||||
An instance of a message or a message template.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: if the message type is not supported.
|
||||
@@ -425,19 +425,19 @@ def filter_messages(
|
||||
|
||||
Args:
|
||||
messages: Sequence Message-like objects to filter.
|
||||
include_names: Message names to include. Default is None.
|
||||
exclude_names: Messages names to exclude. Default is None.
|
||||
include_names: Message names to include.
|
||||
exclude_names: Messages names to exclude.
|
||||
include_types: Message types to include. Can be specified as string names
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
|
||||
exclude_types: Message types to exclude. Can be specified as string names
|
||||
(e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
|
||||
classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
|
||||
Default is None.
|
||||
include_ids: Message IDs to include. Default is None.
|
||||
exclude_ids: Message IDs to exclude. Default is None.
|
||||
exclude_tool_calls: Tool call IDs to exclude. Default is None.
|
||||
|
||||
include_ids: Message IDs to include.
|
||||
exclude_ids: Message IDs to exclude.
|
||||
exclude_tool_calls: Tool call IDs to exclude.
|
||||
Can be one of the following:
|
||||
- `True`: all `AIMessage`s with tool calls and all
|
||||
`ToolMessage` objects will be excluded.
|
||||
@@ -568,7 +568,6 @@ def merge_message_runs(
|
||||
Args:
|
||||
messages: Sequence Message-like objects to merge.
|
||||
chunk_separator: Specify the string to be inserted between message chunks.
|
||||
Defaults to `'\n'`.
|
||||
|
||||
Returns:
|
||||
list of BaseMessages with consecutive runs of message types merged into single
|
||||
@@ -703,7 +702,7 @@ def trim_messages(
|
||||
r"""Trim messages to be below a token count.
|
||||
|
||||
`trim_messages` can be used to reduce the size of a chat history to a specified
|
||||
token count or specified message count.
|
||||
token or message count.
|
||||
|
||||
In either case, if passing the trimmed chat history back into a chat model
|
||||
directly, the resulting chat history should usually satisfy the following
|
||||
@@ -714,8 +713,6 @@ def trim_messages(
|
||||
followed by a `HumanMessage`. To achieve this, set `start_on='human'`.
|
||||
In addition, generally a `ToolMessage` can only appear after an `AIMessage`
|
||||
that involved a tool call.
|
||||
Please see the following link for more information about messages:
|
||||
https://python.langchain.com/docs/concepts/#messages
|
||||
2. It includes recent messages and drops old messages in the chat history.
|
||||
To achieve this set the `strategy='last'`.
|
||||
3. Usually, the new chat history should include the `SystemMessage` if it
|
||||
@@ -745,12 +742,10 @@ def trim_messages(
|
||||
strategy: Strategy for trimming.
|
||||
- `'first'`: Keep the first `<= n_count` tokens of the messages.
|
||||
- `'last'`: Keep the last `<= n_count` tokens of the messages.
|
||||
Default is `'last'`.
|
||||
allow_partial: Whether to split a message if only part of the message can be
|
||||
included. If `strategy='last'` then the last partial contents of a message
|
||||
are included. If `strategy='first'` then the first partial contents of a
|
||||
message are included.
|
||||
Default is False.
|
||||
end_on: The message type to end on. If specified then every message after the
|
||||
last occurrence of this type is ignored. If `strategy='last'` then this
|
||||
is done before we attempt to get the last `max_tokens`. If
|
||||
@@ -759,7 +754,7 @@ def trim_messages(
|
||||
`'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g.
|
||||
`SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single
|
||||
type or a list of types.
|
||||
Default is None.
|
||||
|
||||
start_on: The message type to start on. Should only be specified if
|
||||
`strategy='last'`. If specified then every message before
|
||||
the first occurrence of this type is ignored. This is done after we trim
|
||||
@@ -768,10 +763,9 @@ def trim_messages(
|
||||
specified as string names (e.g. `'system'`, `'human'`, `'ai'`, ...) or
|
||||
as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`,
|
||||
`AIMessage`, ...). Can be a single type or a list of types.
|
||||
Default is None.
|
||||
include_system: Whether to keep the SystemMessage if there is one at index 0.
|
||||
Should only be specified if `strategy="last"`.
|
||||
Default is False.
|
||||
|
||||
include_system: Whether to keep the `SystemMessage` if there is one at index
|
||||
`0`. Should only be specified if `strategy="last"`.
|
||||
text_splitter: Function or `langchain_text_splitters.TextSplitter` for
|
||||
splitting the string contents of a message. Only used if
|
||||
`allow_partial=True`. If `strategy='last'` then the last split tokens
|
||||
@@ -782,7 +776,7 @@ def trim_messages(
|
||||
newlines.
|
||||
|
||||
Returns:
|
||||
list of trimmed `BaseMessage`.
|
||||
List of trimmed `BaseMessage`.
|
||||
|
||||
Raises:
|
||||
ValueError: if two incompatible arguments are specified or an unrecognized
|
||||
@@ -1683,12 +1677,12 @@ def count_tokens_approximately(
|
||||
Args:
|
||||
messages: List of messages to count tokens for.
|
||||
chars_per_token: Number of characters per token to use for the approximation.
|
||||
Default is 4 (one token corresponds to ~4 chars for common English text).
|
||||
You can also specify float values for more fine-grained control.
|
||||
One token corresponds to ~4 chars for common English text.
|
||||
You can also specify `float` values for more fine-grained control.
|
||||
[See more here](https://platform.openai.com/tokenizer).
|
||||
extra_tokens_per_message: Number of extra tokens to add per message.
|
||||
Default is 3 (special tokens, including beginning/end of message).
|
||||
You can also specify float values for more fine-grained control.
|
||||
extra_tokens_per_message: Number of extra tokens to add per message, e.g.
|
||||
special tokens, including beginning/end of message.
|
||||
You can also specify `float` values for more fine-grained control.
|
||||
[See more here](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb).
|
||||
count_name: Whether to include message names in the count.
|
||||
Enabled by default.
|
||||
|
||||
@@ -31,13 +31,13 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
|
||||
@abstractmethod
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -46,17 +46,17 @@ class BaseLLMOutputParser(ABC, Generic[T]):
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
result: A list of `Generation` to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
|
||||
@@ -172,7 +172,7 @@ class BaseOutputParser(
|
||||
This property is inferred from the first type argument of the class.
|
||||
|
||||
Raises:
|
||||
TypeError: If the class doesn't have an inferable OutputType.
|
||||
TypeError: If the class doesn't have an inferable `OutputType`.
|
||||
"""
|
||||
for base in self.__class__.mro():
|
||||
if hasattr(base, "__pydantic_generic_metadata__"):
|
||||
@@ -234,16 +234,16 @@ class BaseOutputParser(
|
||||
|
||||
@override
|
||||
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
|
||||
"""Parse a list of candidate model Generations into a specific format.
|
||||
"""Parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
@@ -264,20 +264,20 @@ class BaseOutputParser(
|
||||
async def aparse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
) -> T:
|
||||
"""Async parse a list of candidate model Generations into a specific format.
|
||||
"""Async parse a list of candidate model `Generation` objects into a specific format.
|
||||
|
||||
The return value is parsed from only the first Generation in the result, which
|
||||
is assumed to be the highest-likelihood Generation.
|
||||
The return value is parsed from only the first `Generation` in the result, which
|
||||
is assumed to be the highest-likelihood `Generation`.
|
||||
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
result: A list of `Generation` to be parsed. The `Generation` objects are
|
||||
assumed to be different candidate outputs for a single model input.
|
||||
partial: Whether to parse the output as a partial result. This is useful
|
||||
for parsers that can parse partial results. Default is False.
|
||||
for parsers that can parse partial results.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
return await run_in_executor(None, self.parse_result, result, partial=partial)
|
||||
|
||||
async def aparse(self, text: str) -> T:
|
||||
@@ -299,13 +299,13 @@ class BaseOutputParser(
|
||||
) -> Any:
|
||||
"""Parse the output of an LLM call with the input prompt for context.
|
||||
|
||||
The prompt is largely provided in the event the OutputParser wants
|
||||
The prompt is largely provided in the event the `OutputParser` wants
|
||||
to retry or fix the output in some way, and needs information from
|
||||
the prompt to do so.
|
||||
|
||||
Args:
|
||||
completion: String output of a language model.
|
||||
prompt: Input PromptValue.
|
||||
prompt: Input `PromptValue`.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
|
||||
@@ -62,7 +62,6 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -146,7 +146,7 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "output_parsers", "list"]`
|
||||
|
||||
@@ -238,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
The validated values.
|
||||
|
||||
Raises:
|
||||
`ValueError`: If the schema is not a Pydantic schema.
|
||||
ValueError: If the schema is not a Pydantic schema.
|
||||
"""
|
||||
schema = values["pydantic_schema"]
|
||||
if "args_only" not in values:
|
||||
@@ -264,7 +264,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
partial: Whether to parse partial JSON objects.
|
||||
|
||||
Raises:
|
||||
`ValueError`: If the Pydantic schema is not valid.
|
||||
ValueError: If the Pydantic schema is not valid.
|
||||
|
||||
Returns:
|
||||
The parsed JSON object.
|
||||
|
||||
@@ -31,10 +31,9 @@ def parse_tool_call(
|
||||
|
||||
Args:
|
||||
raw_tool_call: The raw tool call to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool call.
|
||||
@@ -105,10 +104,9 @@ def parse_tool_calls(
|
||||
|
||||
Args:
|
||||
raw_tool_calls: The raw tool calls to parse.
|
||||
partial: Whether to parse partial JSON. Default is False.
|
||||
partial: Whether to parse partial JSON.
|
||||
strict: Whether to allow non-JSON-compliant strings.
|
||||
Default is False.
|
||||
return_id: Whether to return the tool call id. Default is True.
|
||||
return_id: Whether to return the tool call id.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -165,7 +163,6 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed tool calls.
|
||||
@@ -229,7 +226,6 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the generation is not a chat generation.
|
||||
@@ -313,7 +309,6 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
||||
If `True`, the output will be a JSON object containing
|
||||
all the keys that have been returned so far.
|
||||
If `False`, the output will be the full JSON object.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
The parsed Pydantic objects.
|
||||
|
||||
@@ -19,7 +19,7 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "output_parser"]`
|
||||
|
||||
@@ -44,7 +44,7 @@ class Generation(Serializable):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "output"]`
|
||||
|
||||
@@ -24,8 +24,8 @@ from langchain_core.messages import (
|
||||
class PromptValue(Serializable, ABC):
|
||||
"""Base abstract class for inputs to any language model.
|
||||
|
||||
PromptValues can be converted to both LLM (pure text-generation) inputs and
|
||||
ChatModel inputs.
|
||||
`PromptValues` can be converted to both LLM (pure text-generation) inputs and
|
||||
chat model inputs.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@@ -35,7 +35,7 @@ class PromptValue(Serializable, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
@@ -62,7 +62,7 @@ class StringPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
@@ -99,7 +99,7 @@ class ChatPromptValue(PromptValue):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
This is used to determine the namespace of the object when serializing.
|
||||
|
||||
@@ -113,11 +113,11 @@ class ImageURL(TypedDict, total=False):
|
||||
"""Image URL."""
|
||||
|
||||
detail: Literal["auto", "low", "high"]
|
||||
"""Specifies the detail level of the image. Defaults to `'auto'`.
|
||||
"""Specifies the detail level of the image.
|
||||
|
||||
Can be `'auto'`, `'low'`, or `'high'`.
|
||||
|
||||
This follows OpenAI's Chat Completion API's image URL format.
|
||||
|
||||
"""
|
||||
|
||||
url: str
|
||||
|
||||
@@ -96,7 +96,7 @@ class BasePromptTemplate(
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "prompt_template"]`
|
||||
|
||||
@@ -147,7 +147,6 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
optional: If `True` format_messages can be called with no arguments and will
|
||||
return an empty list. If `False` then a named argument with name
|
||||
`variable_name` must be passed in, even if the value is an empty list.
|
||||
Defaults to `False`.]
|
||||
"""
|
||||
# mypy can't detect the init which is defined in the parent class
|
||||
# b/c these are BaseModel classes.
|
||||
@@ -195,7 +194,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -235,7 +234,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
|
||||
Args:
|
||||
template: a template.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
@@ -330,7 +329,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -412,7 +411,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
Args:
|
||||
template: a template.
|
||||
template_format: format of the template.
|
||||
Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
|
||||
Options are: 'f-string', 'mustache', 'jinja2'.
|
||||
partial_variables: A dictionary of variables that can be used too partially.
|
||||
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
@@ -637,7 +636,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -750,7 +749,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -905,7 +904,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
input_variables: A list of the names of the variables whose values are
|
||||
required as inputs to the prompt.
|
||||
optional_variables: A list of the names of the variables for placeholder
|
||||
@@ -971,7 +970,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "chat"]`
|
||||
@@ -1128,7 +1127,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
(message type, template); e.g., ("human", "{user_input}"),
|
||||
(4) 2-tuple of (message class, template), (5) a string which is
|
||||
shorthand for ("human", template); e.g., "{user_input}".
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
a chat prompt template.
|
||||
@@ -1287,7 +1286,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
@@ -1306,7 +1305,7 @@ def _create_template_from_message_type(
|
||||
Args:
|
||||
message_type: str the type of the message template (e.g., "human", "ai", etc.)
|
||||
template: str the template string.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
a message prompt template of the appropriate type.
|
||||
@@ -1383,7 +1382,7 @@ def _convert_to_message_template(
|
||||
|
||||
Args:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
template_format: format of the template. Defaults to "f-string".
|
||||
template_format: format of the template.
|
||||
|
||||
Returns:
|
||||
an instance of a message or a message template.
|
||||
|
||||
@@ -74,7 +74,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain_core", "prompts", "dict"]`
|
||||
@@ -85,7 +85,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
|
||||
@@ -46,7 +46,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "few_shot_with_templates"]`
|
||||
|
||||
@@ -49,7 +49,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "image"]`
|
||||
|
||||
@@ -23,7 +23,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "chat"]`
|
||||
@@ -68,7 +68,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
"""Human-readable representation.
|
||||
|
||||
Args:
|
||||
html: Whether to format as HTML. Defaults to `False`.
|
||||
html: Whether to format as HTML.
|
||||
|
||||
Returns:
|
||||
Human-readable representation.
|
||||
|
||||
@@ -66,7 +66,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "prompt"]`
|
||||
@@ -220,7 +220,7 @@ class PromptTemplate(StringPromptTemplate):
|
||||
example_separator: The separator to use in between examples. Defaults
|
||||
to two new line characters.
|
||||
prefix: String that should go before any examples. Generally includes
|
||||
examples. Default to an empty string.
|
||||
examples.
|
||||
|
||||
Returns:
|
||||
The final prompt generated.
|
||||
@@ -275,13 +275,12 @@ class PromptTemplate(StringPromptTemplate):
|
||||
Args:
|
||||
template: The template to load.
|
||||
template_format: The format of the template. Use `jinja2` for jinja2,
|
||||
`mustache` for mustache, and `f-string` for f-strings.
|
||||
Defaults to `f-string`.
|
||||
`mustache` for mustache, and `f-string` for f-strings.
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
**kwargs: Any other arguments to pass to the prompt template.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -276,7 +276,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "prompts", "base"]`
|
||||
|
||||
@@ -63,13 +63,13 @@ class StructuredPrompt(ChatPromptTemplate):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
||||
namespace is `["langchain", "llms", "openai"]`
|
||||
|
||||
Returns:
|
||||
The namespace of the langchain object.
|
||||
The namespace of the LangChain object.
|
||||
"""
|
||||
return cls.__module__.split(".")
|
||||
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from importlib import metadata
|
||||
|
||||
from pydantic.v1 import * # noqa: F403
|
||||
|
||||
from langchain_core._api.deprecation import warn_deprecated
|
||||
|
||||
try:
|
||||
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
||||
except metadata.PackageNotFoundError:
|
||||
_PYDANTIC_MAJOR_VERSION = 0
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from pydantic.v1.dataclasses import * # noqa: F403
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from pydantic.v1.main import * # noqa: F403
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
removal="1.0.0",
|
||||
alternative="pydantic.v1 or pydantic",
|
||||
message=(
|
||||
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
||||
"The langchain_core.pydantic_v1 module was a "
|
||||
"compatibility shim for pydantic v1, and should no longer be used. "
|
||||
"Please update the code to import from Pydantic directly.\n\n"
|
||||
"For example, replace imports like: "
|
||||
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
||||
"with: `from pydantic import BaseModel`\n"
|
||||
"or the v1 compatibility namespace if you are working in a code base "
|
||||
"that has not been fully upgraded to pydantic 2 yet. "
|
||||
"\tfrom pydantic.v1 import BaseModel\n"
|
||||
),
|
||||
)
|
||||
@@ -21,11 +21,8 @@ class BaseRateLimiter(abc.ABC):
|
||||
Current limitations:
|
||||
|
||||
- Rate limiting information is not surfaced in tracing or callbacks. This means
|
||||
that the total time it takes to invoke a chat model will encompass both
|
||||
the time spent waiting for tokens and the time spent making the request.
|
||||
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
that the total time it takes to invoke a chat model will encompass both
|
||||
the time spent waiting for tokens and the time spent making the request.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -33,18 +30,18 @@ class BaseRateLimiter(abc.ABC):
|
||||
"""Attempt to acquire the necessary tokens for the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -52,18 +49,18 @@ class BaseRateLimiter(abc.ABC):
|
||||
"""Attempt to acquire the necessary tokens for the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
|
||||
|
||||
@@ -84,7 +81,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
not enough tokens in the bucket, the request is blocked until there are
|
||||
enough tokens.
|
||||
|
||||
These *tokens* have NOTHING to do with LLM tokens. They are just
|
||||
These tokens have nothing to do with LLM tokens. They are just
|
||||
a way to keep track of how many requests can be made at a given time.
|
||||
|
||||
Current limitations:
|
||||
@@ -109,7 +106,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(
|
||||
model_name="claude-3-opus-20240229", rate_limiter=rate_limiter
|
||||
model_name="claude-sonnet-4-5-20250929", rate_limiter=rate_limiter
|
||||
)
|
||||
|
||||
for _ in range(5):
|
||||
@@ -118,9 +115,6 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
toc = time.time()
|
||||
print(toc - tic)
|
||||
```
|
||||
|
||||
!!! version-added "Added in version 0.2.24"
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
@@ -132,7 +126,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
) -> None:
|
||||
"""A rate limiter based on a token bucket.
|
||||
|
||||
These *tokens* have NOTHING to do with LLM tokens. They are just
|
||||
These tokens have nothing to do with LLM tokens. They are just
|
||||
a way to keep track of how many requests can be made at a given time.
|
||||
|
||||
This rate limiter is designed to work in a threaded environment.
|
||||
@@ -145,11 +139,11 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
Args:
|
||||
requests_per_second: The number of tokens to add per second to the bucket.
|
||||
The tokens represent "credit" that can be used to make requests.
|
||||
check_every_n_seconds: check whether the tokens are available
|
||||
check_every_n_seconds: Check whether the tokens are available
|
||||
every this many seconds. Can be a float to represent
|
||||
fractions of a second.
|
||||
max_bucket_size: The maximum number of tokens that can be in the bucket.
|
||||
Must be at least 1. Used to prevent bursts of requests.
|
||||
Must be at least `1`. Used to prevent bursts of requests.
|
||||
"""
|
||||
# Number of requests that we can make per second.
|
||||
self.requests_per_second = requests_per_second
|
||||
@@ -199,18 +193,18 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
"""Attempt to acquire a token from the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
@@ -223,18 +217,18 @@ class InMemoryRateLimiter(BaseRateLimiter):
|
||||
"""Attempt to acquire a token from the rate limiter. Async version.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
is set to `True`.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
If `blocking` is set to `False`, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If `True`, the method will block until the tokens are available.
|
||||
If `False`, the method will return immediately with the result of
|
||||
the attempt. Defaults to `True`.
|
||||
the attempt.
|
||||
|
||||
Returns:
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
`True` if the tokens were successfully acquired, `False` otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
|
||||
@@ -70,45 +70,45 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
|
||||
Example: A retriever that returns the first 5 documents from a list of documents
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
|
||||
class SimpleRetriever(BaseRetriever):
|
||||
docs: list[Document]
|
||||
k: int = 5
|
||||
class SimpleRetriever(BaseRetriever):
|
||||
docs: list[Document]
|
||||
k: int = 5
|
||||
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"Return the first k documents from the list of documents\"\"\"
|
||||
return self.docs[:self.k]
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"Return the first k documents from the list of documents\"\"\"
|
||||
return self.docs[:self.k]
|
||||
|
||||
async def _aget_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"(Optional) async native implementation.\"\"\"
|
||||
return self.docs[:self.k]
|
||||
```
|
||||
async def _aget_relevant_documents(self, query: str) -> list[Document]:
|
||||
\"\"\"(Optional) async native implementation.\"\"\"
|
||||
return self.docs[:self.k]
|
||||
```
|
||||
|
||||
Example: A simple retriever based on a scikit-learn vectorizer
|
||||
|
||||
```python
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
```python
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
|
||||
|
||||
class TFIDFRetriever(BaseRetriever, BaseModel):
|
||||
vectorizer: Any
|
||||
docs: list[Document]
|
||||
tfidf_array: Any
|
||||
k: int = 4
|
||||
class TFIDFRetriever(BaseRetriever, BaseModel):
|
||||
vectorizer: Any
|
||||
docs: list[Document]
|
||||
tfidf_array: Any
|
||||
k: int = 4
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
# Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
|
||||
query_vec = self.vectorizer.transform([query])
|
||||
# Op -- (n_docs,1) -- Cosine Sim with each doc
|
||||
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
|
||||
return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
|
||||
```
|
||||
def _get_relevant_documents(self, query: str) -> list[Document]:
|
||||
# Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
|
||||
query_vec = self.vectorizer.transform([query])
|
||||
# Op -- (n_docs,1) -- Cosine Sim with each doc
|
||||
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
|
||||
return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
|
||||
```
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
|
||||
@@ -860,7 +860,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
The default implementation of batch works well for IO bound runnables.
|
||||
|
||||
Subclasses should override this method if they can batch more efficiently;
|
||||
Subclasses must override this method if they can batch more efficiently;
|
||||
e.g., if the underlying `Runnable` uses an API which supports a batch mode.
|
||||
|
||||
Args:
|
||||
@@ -871,7 +871,6 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
to do in parallel, and other keys. Please refer to the
|
||||
`RunnableConfig` for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Returns:
|
||||
@@ -938,7 +937,6 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -994,7 +992,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
The default implementation of `batch` works well for IO bound runnables.
|
||||
|
||||
Subclasses should override this method if they can batch more efficiently;
|
||||
Subclasses must override this method if they can batch more efficiently;
|
||||
e.g., if the underlying `Runnable` uses an API which supports a batch mode.
|
||||
|
||||
Args:
|
||||
@@ -1005,7 +1003,6 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Returns:
|
||||
@@ -1069,7 +1066,6 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
do in parallel, and other keys. Please refer to the `RunnableConfig`
|
||||
for more details.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
Defaults to `False`.
|
||||
**kwargs: Additional keyword arguments to pass to the `Runnable`.
|
||||
|
||||
Yields:
|
||||
@@ -1116,7 +1112,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
) -> Iterator[Output]:
|
||||
"""Default implementation of `stream`, which calls `invoke`.
|
||||
|
||||
Subclasses should override this method if they support streaming output.
|
||||
Subclasses must override this method if they support streaming output.
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
@@ -1137,7 +1133,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
) -> AsyncIterator[Output]:
|
||||
"""Default implementation of `astream`, which calls `ainvoke`.
|
||||
|
||||
Subclasses should override this method if they support streaming output.
|
||||
Subclasses must override this method if they support streaming output.
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
@@ -1357,7 +1353,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
).with_config({"run_name": "my_template", "tags": ["my_template"]})
|
||||
```
|
||||
|
||||
Example:
|
||||
For instance:
|
||||
|
||||
```python
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
|
||||
@@ -1370,8 +1367,8 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
events = [event async for event in chain.astream_events("hello", version="v2")]
|
||||
|
||||
# will produce the following events (run_id, and parent_ids
|
||||
# has been omitted for brevity):
|
||||
# Will produce the following events
|
||||
# (run_id, and parent_ids has been omitted for brevity):
|
||||
[
|
||||
{
|
||||
"data": {"input": "hello"},
|
||||
@@ -1426,7 +1423,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
async for event in slow_thing.astream_events("some_input", version="v2"):
|
||||
print(event)
|
||||
``
|
||||
```
|
||||
|
||||
Args:
|
||||
input: The input to the `Runnable`.
|
||||
@@ -1500,7 +1497,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Default implementation of transform, which buffers input and calls `astream`.
|
||||
|
||||
Subclasses should override this method if they can start producing output while
|
||||
Subclasses must override this method if they can start producing output while
|
||||
input is still being generated.
|
||||
|
||||
Args:
|
||||
@@ -1545,7 +1542,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Default implementation of atransform, which buffers input and calls `astream`.
|
||||
|
||||
Subclasses should override this method if they can start producing output while
|
||||
Subclasses must override this method if they can start producing output while
|
||||
input is still being generated.
|
||||
|
||||
Args:
|
||||
@@ -1816,7 +1813,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
output_type: The output type to bind to the `Runnable`.
|
||||
|
||||
Returns:
|
||||
A new Runnable with the types bound.
|
||||
A new `Runnable` with the types bound.
|
||||
"""
|
||||
return RunnableBinding(
|
||||
bound=self,
|
||||
@@ -1837,14 +1834,13 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
|
||||
Args:
|
||||
retry_if_exception_type: A tuple of exception types to retry on.
|
||||
Defaults to (Exception,).
|
||||
wait_exponential_jitter: Whether to add jitter to the wait
|
||||
time between retries. Defaults to `True`.
|
||||
time between retries.
|
||||
stop_after_attempt: The maximum number of attempts to make before
|
||||
giving up. Defaults to 3.
|
||||
giving up.
|
||||
exponential_jitter_params: Parameters for
|
||||
`tenacity.wait_exponential_jitter`. Namely: `initial`, `max`,
|
||||
`exp_base`, and `jitter` (all float values).
|
||||
`exp_base`, and `jitter` (all `float` values).
|
||||
|
||||
Returns:
|
||||
A new Runnable that retries the original Runnable on exceptions.
|
||||
@@ -1929,16 +1925,15 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
fallbacks: A sequence of runnables to try if the original `Runnable`
|
||||
fails.
|
||||
exceptions_to_handle: A tuple of exception types to handle.
|
||||
Defaults to `(Exception,)`.
|
||||
exception_key: If string is specified then handled exceptions will be passed
|
||||
to fallbacks as part of the input under the specified key.
|
||||
exception_key: If `string` is specified then handled exceptions will be
|
||||
passed to fallbacks as part of the input under the specified key.
|
||||
If `None`, exceptions will not be passed to fallbacks.
|
||||
If used, the base `Runnable` and its fallbacks must accept a
|
||||
dictionary as input.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` that will try the original `Runnable`, and then each
|
||||
Fallback in order, upon failures.
|
||||
Fallback in order, upon failures.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -1966,16 +1961,15 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
fallbacks: A sequence of runnables to try if the original `Runnable`
|
||||
fails.
|
||||
exceptions_to_handle: A tuple of exception types to handle.
|
||||
exception_key: If string is specified then handled exceptions will be passed
|
||||
to fallbacks as part of the input under the specified key.
|
||||
exception_key: If `string` is specified then handled exceptions will be
|
||||
passed to fallbacks as part of the input under the specified key.
|
||||
If `None`, exceptions will not be passed to fallbacks.
|
||||
If used, the base `Runnable` and its fallbacks must accept a
|
||||
dictionary as input.
|
||||
|
||||
Returns:
|
||||
A new `Runnable` that will try the original `Runnable`, and then each
|
||||
Fallback in order, upon failures.
|
||||
|
||||
Fallback in order, upon failures.
|
||||
"""
|
||||
# Import locally to prevent circular import
|
||||
from langchain_core.runnables.fallbacks import ( # noqa: PLC0415
|
||||
@@ -2633,9 +2627,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
|
||||
which: The `ConfigurableField` instance that will be used to select the
|
||||
alternative.
|
||||
default_key: The default key to use if no alternative is selected.
|
||||
Defaults to `'default'`.
|
||||
prefix_keys: Whether to prefix the keys with the `ConfigurableField` id.
|
||||
Defaults to `False`.
|
||||
**kwargs: A dictionary of keys to `Runnable` instances or callables that
|
||||
return `Runnable` instances.
|
||||
|
||||
@@ -2896,7 +2888,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -3627,7 +3619,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -5156,7 +5148,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -5479,7 +5471,7 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -5761,7 +5753,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
|
||||
`bind`: Bind kwargs to pass to the underlying `Runnable` when running it.
|
||||
|
||||
```python
|
||||
# Create a Runnable binding that invokes the ChatModel with the
|
||||
# Create a Runnable binding that invokes the chat model with the
|
||||
# additional kwarg `stop=['-']` when running it.
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
|
||||
@@ -75,26 +75,26 @@ class RunnableConfig(TypedDict, total=False):
|
||||
max_concurrency: int | None
|
||||
"""
|
||||
Maximum number of parallel calls to make. If not provided, defaults to
|
||||
ThreadPoolExecutor's default.
|
||||
`ThreadPoolExecutor`'s default.
|
||||
"""
|
||||
|
||||
recursion_limit: int
|
||||
"""
|
||||
Maximum number of times a call can recurse. If not provided, defaults to 25.
|
||||
Maximum number of times a call can recurse. If not provided, defaults to `25`.
|
||||
"""
|
||||
|
||||
configurable: dict[str, Any]
|
||||
"""
|
||||
Runtime values for attributes previously made configurable on this Runnable,
|
||||
or sub-Runnables, through .configurable_fields() or .configurable_alternatives().
|
||||
Check .output_schema() for a description of the attributes that have been made
|
||||
Runtime values for attributes previously made configurable on this `Runnable`,
|
||||
or sub-Runnables, through `configurable_fields` or `configurable_alternatives`.
|
||||
Check `output_schema` for a description of the attributes that have been made
|
||||
configurable.
|
||||
"""
|
||||
|
||||
run_id: uuid.UUID | None
|
||||
"""
|
||||
Unique identifier for the tracer run for this call. If not provided, a new UUID
|
||||
will be generated.
|
||||
will be generated.
|
||||
"""
|
||||
|
||||
|
||||
@@ -527,8 +527,7 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
self,
|
||||
fn: Callable[..., T],
|
||||
*iterables: Iterable[Any],
|
||||
timeout: float | None = None,
|
||||
chunksize: int = 1,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[T]:
|
||||
"""Map a function to multiple iterables.
|
||||
|
||||
@@ -536,7 +535,7 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
fn: The function to map.
|
||||
*iterables: The iterables to map over.
|
||||
timeout: The timeout for the map.
|
||||
chunksize: The chunksize for the map. Defaults to 1.
|
||||
chunksize: The chunksize for the map.
|
||||
|
||||
Returns:
|
||||
The iterator for the mapped function.
|
||||
@@ -549,8 +548,7 @@ class ContextThreadPoolExecutor(ThreadPoolExecutor):
|
||||
return super().map(
|
||||
_wrapped_fn,
|
||||
*iterables,
|
||||
timeout=timeout,
|
||||
chunksize=chunksize,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -540,7 +540,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
||||
"""The alternatives to choose from."""
|
||||
|
||||
default_key: str = "default"
|
||||
"""The enum value to use for the default option. Defaults to `'default'`."""
|
||||
"""The enum value to use for the default option."""
|
||||
|
||||
prefix_keys: bool
|
||||
"""Whether to prefix configurable fields of each alternative with a namespace
|
||||
|
||||
@@ -96,7 +96,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
Any exception that is not a subclass of these exceptions will be raised immediately.
|
||||
"""
|
||||
exception_key: str | None = None
|
||||
"""If string is specified then handled exceptions will be passed to fallbacks as
|
||||
"""If `string` is specified then handled exceptions will be passed to fallbacks as
|
||||
part of the input under the specified key. If `None`, exceptions
|
||||
will not be passed to fallbacks. If used, the base Runnable and its fallbacks
|
||||
must accept a dictionary as input."""
|
||||
@@ -143,7 +143,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
|
||||
@@ -71,7 +71,7 @@ class Edge(NamedTuple):
|
||||
data: Stringifiable | None = None
|
||||
"""Optional data associated with the edge. """
|
||||
conditional: bool = False
|
||||
"""Whether the edge is conditional. Defaults to `False`."""
|
||||
"""Whether the edge is conditional."""
|
||||
|
||||
def copy(self, *, source: str | None = None, target: str | None = None) -> Edge:
|
||||
"""Return a copy of the edge with optional new source and target nodes.
|
||||
@@ -157,9 +157,9 @@ class NodeStyles:
|
||||
"""Schema for Hexadecimal color codes for different node types.
|
||||
|
||||
Args:
|
||||
default: The default color code. Defaults to "fill:#f2f0ff,line-height:1.2".
|
||||
first: The color code for the first node. Defaults to "fill-opacity:0".
|
||||
last: The color code for the last node. Defaults to "fill:#bfb6fc".
|
||||
default: The default color code.
|
||||
first: The color code for the first node.
|
||||
last: The color code for the last node.
|
||||
"""
|
||||
|
||||
default: str = "fill:#f2f0ff,line-height:1.2"
|
||||
@@ -201,9 +201,9 @@ def node_data_json(
|
||||
"""Convert the data of a node to a JSON-serializable format.
|
||||
|
||||
Args:
|
||||
node: The node to convert.
|
||||
with_schemas: Whether to include the schema of the data if
|
||||
it is a Pydantic model. Defaults to `False`.
|
||||
node: The `Node` to convert.
|
||||
with_schemas: Whether to include the schema of the data if it is a Pydantic
|
||||
model.
|
||||
|
||||
Returns:
|
||||
A dictionary with the type of the data and the data itself.
|
||||
@@ -267,7 +267,7 @@ class Graph:
|
||||
|
||||
Args:
|
||||
with_schemas: Whether to include the schemas of the nodes if they are
|
||||
Pydantic models. Defaults to `False`.
|
||||
Pydantic models.
|
||||
|
||||
Returns:
|
||||
A dictionary with the nodes and edges of the graph.
|
||||
@@ -362,7 +362,7 @@ class Graph:
|
||||
source: The source node of the edge.
|
||||
target: The target node of the edge.
|
||||
data: Optional data associated with the edge.
|
||||
conditional: Whether the edge is conditional. Defaults to `False`.
|
||||
conditional: Whether the edge is conditional.
|
||||
|
||||
Returns:
|
||||
The edge that was added to the graph.
|
||||
@@ -391,7 +391,7 @@ class Graph:
|
||||
|
||||
Args:
|
||||
graph: The graph to add.
|
||||
prefix: The prefix to add to the node ids. Defaults to "".
|
||||
prefix: The prefix to add to the node ids.
|
||||
|
||||
Returns:
|
||||
A tuple of the first and last nodes of the subgraph.
|
||||
@@ -458,7 +458,7 @@ class Graph:
|
||||
def first_node(self) -> Node | None:
|
||||
"""Find the single node that is not a target of any edge.
|
||||
|
||||
If there is no such node, or there are multiple, return None.
|
||||
If there is no such node, or there are multiple, return `None`.
|
||||
When drawing the graph, this node would be the origin.
|
||||
|
||||
Returns:
|
||||
@@ -470,7 +470,7 @@ class Graph:
|
||||
def last_node(self) -> Node | None:
|
||||
"""Find the single node that is not a source of any edge.
|
||||
|
||||
If there is no such node, or there are multiple, return None.
|
||||
If there is no such node, or there are multiple, return `None`.
|
||||
When drawing the graph, this node would be the destination.
|
||||
|
||||
Returns:
|
||||
@@ -585,11 +585,10 @@ class Graph:
|
||||
"""Draw the graph as a Mermaid syntax string.
|
||||
|
||||
Args:
|
||||
with_styles: Whether to include styles in the syntax. Defaults to `True`.
|
||||
curve_style: The style of the edges. Defaults to CurveStyle.LINEAR.
|
||||
node_colors: The colors of the nodes. Defaults to NodeStyles().
|
||||
with_styles: Whether to include styles in the syntax.
|
||||
curve_style: The style of the edges.
|
||||
node_colors: The colors of the nodes.
|
||||
wrap_label_n_words: The number of words to wrap the node labels at.
|
||||
Defaults to 9.
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph.
|
||||
@@ -647,20 +646,16 @@ class Graph:
|
||||
"""Draw the graph as a PNG image using Mermaid.
|
||||
|
||||
Args:
|
||||
curve_style: The style of the edges. Defaults to CurveStyle.LINEAR.
|
||||
node_colors: The colors of the nodes. Defaults to NodeStyles().
|
||||
curve_style: The style of the edges.
|
||||
node_colors: The colors of the nodes.
|
||||
wrap_label_n_words: The number of words to wrap the node labels at.
|
||||
Defaults to 9.
|
||||
output_file_path: The path to save the image to. If `None`, the image
|
||||
is not saved.
|
||||
draw_method: The method to use to draw the graph.
|
||||
Defaults to MermaidDrawMethod.API.
|
||||
background_color: The color of the background. Defaults to "white".
|
||||
padding: The padding around the graph. Defaults to 10.
|
||||
max_retries: The maximum number of retries (MermaidDrawMethod.API).
|
||||
Defaults to 1.
|
||||
retry_delay: The delay between retries (MermaidDrawMethod.API).
|
||||
Defaults to 1.0.
|
||||
background_color: The color of the background.
|
||||
padding: The padding around the graph.
|
||||
max_retries: The maximum number of retries (`MermaidDrawMethod.API`).
|
||||
retry_delay: The delay between retries (`MermaidDrawMethod.API`).
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph.
|
||||
@@ -712,7 +707,7 @@ def _first_node(graph: Graph, exclude: Sequence[str] = ()) -> Node | None:
|
||||
"""Find the single node that is not a target of any edge.
|
||||
|
||||
Exclude nodes/sources with ids in the exclude list.
|
||||
If there is no such node, or there are multiple, return None.
|
||||
If there is no such node, or there are multiple, return `None`.
|
||||
When drawing the graph, this node would be the origin.
|
||||
"""
|
||||
targets = {edge.target for edge in graph.edges if edge.source not in exclude}
|
||||
@@ -728,7 +723,7 @@ def _last_node(graph: Graph, exclude: Sequence[str] = ()) -> Node | None:
|
||||
"""Find the single node that is not a source of any edge.
|
||||
|
||||
Exclude nodes/targets with ids in the exclude list.
|
||||
If there is no such node, or there are multiple, return None.
|
||||
If there is no such node, or there are multiple, return `None`.
|
||||
When drawing the graph, this node would be the destination.
|
||||
"""
|
||||
sources = {edge.source for edge in graph.edges if edge.target not in exclude}
|
||||
|
||||
@@ -60,10 +60,10 @@ def draw_mermaid(
|
||||
edges: List of edges, object with a source, target and data.
|
||||
first_node: Id of the first node.
|
||||
last_node: Id of the last node.
|
||||
with_styles: Whether to include styles in the graph. Defaults to `True`.
|
||||
curve_style: Curve style for the edges. Defaults to CurveStyle.LINEAR.
|
||||
node_styles: Node colors for different types. Defaults to NodeStyles().
|
||||
wrap_label_n_words: Words to wrap the edge labels. Defaults to 9.
|
||||
with_styles: Whether to include styles in the graph.
|
||||
curve_style: Curve style for the edges.
|
||||
node_styles: Node colors for different types.
|
||||
wrap_label_n_words: Words to wrap the edge labels.
|
||||
frontmatter_config: Mermaid frontmatter config.
|
||||
Can be used to customize theme and styles. Will be converted to YAML and
|
||||
added to the beginning of the mermaid graph.
|
||||
@@ -287,11 +287,11 @@ def draw_mermaid_png(
|
||||
Args:
|
||||
mermaid_syntax: Mermaid graph syntax.
|
||||
output_file_path: Path to save the PNG image.
|
||||
draw_method: Method to draw the graph. Defaults to MermaidDrawMethod.API.
|
||||
background_color: Background color of the image. Defaults to "white".
|
||||
padding: Padding around the image. Defaults to 10.
|
||||
max_retries: Maximum number of retries (MermaidDrawMethod.API). Defaults to 1.
|
||||
retry_delay: Delay between retries (MermaidDrawMethod.API). Defaults to 1.0.
|
||||
draw_method: Method to draw the graph.
|
||||
background_color: Background color of the image.
|
||||
padding: Padding around the image.
|
||||
max_retries: Maximum number of retries (MermaidDrawMethod.API).
|
||||
retry_delay: Delay between retries (MermaidDrawMethod.API).
|
||||
base_url: Base URL for the Mermaid.ink API.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -105,7 +105,7 @@ class PngDrawer:
|
||||
source: The source node.
|
||||
target: The target node.
|
||||
label: The label for the edge.
|
||||
conditional: Whether the edge is conditional. Defaults to `False`.
|
||||
conditional: Whether the edge is conditional.
|
||||
"""
|
||||
viz.add_edge(
|
||||
source,
|
||||
|
||||
@@ -296,9 +296,9 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef]
|
||||
```
|
||||
|
||||
input_messages_key: Must be specified if the base runnable accepts a dict
|
||||
as input. Default is None.
|
||||
as input.
|
||||
output_messages_key: Must be specified if the base runnable returns a dict
|
||||
as output. Default is None.
|
||||
as output.
|
||||
history_messages_key: Must be specified if the base runnable accepts a dict
|
||||
as input and expects a separate key for historical messages.
|
||||
history_factory_config: Configure fields that should be passed to the
|
||||
|
||||
@@ -185,7 +185,7 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -409,7 +409,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
@@ -714,7 +714,7 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
|
||||
@@ -126,7 +126,7 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): # type: ignore[no-rede
|
||||
|
||||
exponential_jitter_params: ExponentialJitterParams | None = None
|
||||
"""Parameters for `tenacity.wait_exponential_jitter`. Namely: `initial`,
|
||||
`max`, `exp_base`, and `jitter` (all float values).
|
||||
`max`, `exp_base`, and `jitter` (all `float` values).
|
||||
"""
|
||||
|
||||
max_attempt_number: int = 3
|
||||
|
||||
@@ -96,7 +96,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
|
||||
@classmethod
|
||||
@override
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
"""Get the namespace of the LangChain object.
|
||||
|
||||
Returns:
|
||||
`["langchain", "schema", "runnable"]`
|
||||
|
||||
@@ -65,7 +65,7 @@ class BaseStreamEvent(TypedDict):
|
||||
|
||||
events = [event async for event in chain.astream_events("hello")]
|
||||
|
||||
# will produce the following events
|
||||
# Will produce the following events
|
||||
# (where some fields have been omitted for brevity):
|
||||
[
|
||||
{
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import annotations
|
||||
import ast
|
||||
import asyncio
|
||||
import inspect
|
||||
import sys
|
||||
import textwrap
|
||||
from collections.abc import Callable, Mapping, Sequence
|
||||
from contextvars import Context
|
||||
@@ -118,14 +119,13 @@ def accepts_context(callable: Callable[..., Any]) -> bool: # noqa: A002
|
||||
return False
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def asyncio_accepts_context() -> bool:
|
||||
"""Cache the result of checking if asyncio.create_task accepts a `context` arg.
|
||||
"""Check if asyncio.create_task accepts a `context` arg.
|
||||
|
||||
Returns:
|
||||
True if `asyncio.create_task` accepts a context argument, `False` otherwise.
|
||||
"""
|
||||
return accepts_context(asyncio.create_task)
|
||||
return sys.version_info >= (3, 11)
|
||||
|
||||
|
||||
def coro_with_context(
|
||||
@@ -136,7 +136,7 @@ def coro_with_context(
|
||||
Args:
|
||||
coro: The coroutine to await.
|
||||
context: The context to use.
|
||||
create_task: Whether to create a task. Defaults to `False`.
|
||||
create_task: Whether to create a task.
|
||||
|
||||
Returns:
|
||||
The coroutine with the context.
|
||||
@@ -558,7 +558,7 @@ class ConfigurableField(NamedTuple):
|
||||
annotation: Any | None = None
|
||||
"""The annotation of the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
"""Whether the field is shared."""
|
||||
|
||||
@override
|
||||
def __hash__(self) -> int:
|
||||
@@ -579,7 +579,7 @@ class ConfigurableFieldSingleOption(NamedTuple):
|
||||
description: str | None = None
|
||||
"""The description of the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
"""Whether the field is shared."""
|
||||
|
||||
@override
|
||||
def __hash__(self) -> int:
|
||||
@@ -600,7 +600,7 @@ class ConfigurableFieldMultiOption(NamedTuple):
|
||||
description: str | None = None
|
||||
"""The description of the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
"""Whether the field is shared."""
|
||||
|
||||
@override
|
||||
def __hash__(self) -> int:
|
||||
@@ -626,7 +626,7 @@ class ConfigurableFieldSpec(NamedTuple):
|
||||
default: Any = None
|
||||
"""The default value for the field. """
|
||||
is_shared: bool = False
|
||||
"""Whether the field is shared. Defaults to `False`."""
|
||||
"""Whether the field is shared."""
|
||||
dependencies: list[str] | None = None
|
||||
"""The dependencies of the field. """
|
||||
|
||||
|
||||
@@ -293,10 +293,9 @@ def create_schema_from_function(
|
||||
filter_args: Optional list of arguments to exclude from the schema.
|
||||
Defaults to `FILTERED_ARGS`.
|
||||
parse_docstring: Whether to parse the function's docstring for descriptions
|
||||
for each argument. Defaults to `False`.
|
||||
for each argument.
|
||||
error_on_invalid_docstring: if `parse_docstring` is provided, configure
|
||||
whether to raise `ValueError` on invalid Google Style docstrings.
|
||||
Defaults to `False`.
|
||||
include_injected: Whether to include injected arguments in the schema.
|
||||
Defaults to `True`, since we want to include them in the schema
|
||||
when *validating* tool inputs.
|
||||
@@ -481,11 +480,11 @@ class ChildTool(BaseTool):
|
||||
"""Handle the content of the ValidationError thrown."""
|
||||
|
||||
response_format: Literal["content", "content_and_artifact"] = "content"
|
||||
"""The tool response format. Defaults to 'content'.
|
||||
"""The tool response format.
|
||||
|
||||
If "content" then the output of the tool is interpreted as the contents of a
|
||||
ToolMessage. If "content_and_artifact" then the output is expected to be a
|
||||
two-tuple corresponding to the (content, artifact) of a ToolMessage.
|
||||
If `"content"` then the output of the tool is interpreted as the contents of a
|
||||
ToolMessage. If `"content_and_artifact"` then the output is expected to be a
|
||||
two-tuple corresponding to the (content, artifact) of a `ToolMessage`.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
@@ -616,7 +615,7 @@ class ChildTool(BaseTool):
|
||||
The parsed and validated input.
|
||||
|
||||
Raises:
|
||||
ValueError: If string input is provided with JSON schema `args_schema`.
|
||||
ValueError: If `string` input is provided with JSON schema `args_schema`.
|
||||
ValueError: If InjectedToolCallId is required but `tool_call_id` is not
|
||||
provided.
|
||||
TypeError: If args_schema is not a Pydantic `BaseModel` or dict.
|
||||
@@ -768,8 +767,8 @@ class ChildTool(BaseTool):
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
verbose: Whether to log the tool's progress.
|
||||
start_color: The color to use when starting the tool. Defaults to 'green'.
|
||||
color: The color to use when ending the tool. Defaults to 'green'.
|
||||
start_color: The color to use when starting the tool.
|
||||
color: The color to use when ending the tool.
|
||||
callbacks: Callbacks to be called during tool execution.
|
||||
tags: Optional list of tags associated with the tool.
|
||||
metadata: Optional metadata associated with the tool.
|
||||
@@ -880,8 +879,8 @@ class ChildTool(BaseTool):
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
verbose: Whether to log the tool's progress.
|
||||
start_color: The color to use when starting the tool. Defaults to 'green'.
|
||||
color: The color to use when ending the tool. Defaults to 'green'.
|
||||
start_color: The color to use when starting the tool.
|
||||
color: The color to use when ending the tool.
|
||||
callbacks: Callbacks to be called during tool execution.
|
||||
tags: Optional list of tags associated with the tool.
|
||||
metadata: Optional metadata associated with the tool.
|
||||
@@ -1211,6 +1210,26 @@ class InjectedToolArg:
|
||||
"""
|
||||
|
||||
|
||||
class _DirectlyInjectedToolArg:
|
||||
"""Annotation for tool arguments that are injected at runtime.
|
||||
|
||||
Injected via direct type annotation, rather than annotated metadata.
|
||||
|
||||
For example, ToolRuntime is a directly injected argument.
|
||||
Note the direct annotation rather than the verbose alternative:
|
||||
Annotated[ToolRuntime, InjectedRuntime]
|
||||
```python
|
||||
from langchain_core.tools import tool, ToolRuntime
|
||||
|
||||
|
||||
@tool
|
||||
def foo(x: int, runtime: ToolRuntime) -> str:
|
||||
# use runtime.state, runtime.context, runtime.store, etc.
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
class InjectedToolCallId(InjectedToolArg):
|
||||
"""Annotation for injecting the tool call ID.
|
||||
|
||||
@@ -1238,6 +1257,24 @@ class InjectedToolCallId(InjectedToolArg):
|
||||
"""
|
||||
|
||||
|
||||
def _is_directly_injected_arg_type(type_: Any) -> bool:
|
||||
"""Check if a type annotation indicates a directly injected argument.
|
||||
|
||||
This is currently only used for ToolRuntime.
|
||||
Checks if either the annotation itself is a subclass of _DirectlyInjectedToolArg
|
||||
or the origin of the annotation is a subclass of _DirectlyInjectedToolArg.
|
||||
|
||||
Ex: ToolRuntime or ToolRuntime[ContextT, StateT] would both return True.
|
||||
"""
|
||||
return (
|
||||
isinstance(type_, type) and issubclass(type_, _DirectlyInjectedToolArg)
|
||||
) or (
|
||||
(origin := get_origin(type_)) is not None
|
||||
and isinstance(origin, type)
|
||||
and issubclass(origin, _DirectlyInjectedToolArg)
|
||||
)
|
||||
|
||||
|
||||
def _is_injected_arg_type(
|
||||
type_: type | TypeVar, injected_type: type[InjectedToolArg] | None = None
|
||||
) -> bool:
|
||||
@@ -1250,7 +1287,15 @@ def _is_injected_arg_type(
|
||||
Returns:
|
||||
`True` if the type is an injected argument, `False` otherwise.
|
||||
"""
|
||||
injected_type = injected_type or InjectedToolArg
|
||||
if injected_type is None:
|
||||
# if no injected type is specified,
|
||||
# check if the type is a directly injected argument
|
||||
if _is_directly_injected_arg_type(type_):
|
||||
return True
|
||||
injected_type = InjectedToolArg
|
||||
|
||||
# if the type is an Annotated type, check if annotated metadata
|
||||
# is an intance or subclass of the injected type
|
||||
return any(
|
||||
isinstance(arg, injected_type)
|
||||
or (isinstance(arg, type) and issubclass(arg, injected_type))
|
||||
|
||||
@@ -81,7 +81,7 @@ def tool(
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = True,
|
||||
) -> BaseTool | Callable[[Callable | Runnable], BaseTool]:
|
||||
"""Make tools out of functions, can be used with or without arguments.
|
||||
"""Make tools out of Python functions, can be used with or without arguments.
|
||||
|
||||
Args:
|
||||
name_or_callable: Optional name of the tool or the callable to be
|
||||
@@ -93,30 +93,26 @@ def tool(
|
||||
|
||||
- `description` argument
|
||||
(used even if docstring and/or `args_schema` are provided)
|
||||
- tool function docstring
|
||||
- Tool function docstring
|
||||
(used even if `args_schema` is provided)
|
||||
- `args_schema` description
|
||||
(used only if `description` / docstring are not provided)
|
||||
*args: Extra positional arguments. Must be empty.
|
||||
return_direct: Whether to return directly from the tool rather
|
||||
than continuing the agent loop. Defaults to `False`.
|
||||
args_schema: optional argument schema for user to specify.
|
||||
than continuing the agent loop.
|
||||
args_schema: Optional argument schema for user to specify.
|
||||
|
||||
infer_schema: Whether to infer the schema of the arguments from
|
||||
the function's signature. This also makes the resultant tool
|
||||
accept a dictionary input to its `run()` function.
|
||||
Defaults to `True`.
|
||||
response_format: The tool response format. If "content" then the output of
|
||||
the tool is interpreted as the contents of a ToolMessage. If
|
||||
"content_and_artifact" then the output is expected to be a two-tuple
|
||||
corresponding to the (content, artifact) of a ToolMessage.
|
||||
Defaults to "content".
|
||||
response_format: The tool response format. If `"content"` then the output of
|
||||
the tool is interpreted as the contents of a `ToolMessage`. If
|
||||
`"content_and_artifact"` then the output is expected to be a two-tuple
|
||||
corresponding to the `(content, artifact)` of a `ToolMessage`.
|
||||
parse_docstring: if `infer_schema` and `parse_docstring`, will attempt to
|
||||
parse parameter descriptions from Google Style function docstrings.
|
||||
Defaults to `False`.
|
||||
error_on_invalid_docstring: if `parse_docstring` is provided, configure
|
||||
whether to raise ValueError on invalid Google Style docstrings.
|
||||
Defaults to `True`.
|
||||
whether to raise `ValueError` on invalid Google Style docstrings.
|
||||
|
||||
Raises:
|
||||
ValueError: If too many positional arguments are provided.
|
||||
@@ -124,8 +120,8 @@ def tool(
|
||||
ValueError: If the first argument is not a string or callable with
|
||||
a `__name__` attribute.
|
||||
ValueError: If the function does not have a docstring and description
|
||||
is not provided and `infer_schema` is False.
|
||||
ValueError: If `parse_docstring` is True and the function has an invalid
|
||||
is not provided and `infer_schema` is `False`.
|
||||
ValueError: If `parse_docstring` is `True` and the function has an invalid
|
||||
Google-style docstring and `error_on_invalid_docstring` is True.
|
||||
ValueError: If a Runnable is provided that does not have an object schema.
|
||||
|
||||
@@ -133,7 +129,7 @@ def tool(
|
||||
The tool.
|
||||
|
||||
Requires:
|
||||
- Function must be of type (str) -> str
|
||||
- Function must be of type `(str) -> str`
|
||||
- Function must have a docstring
|
||||
|
||||
Examples:
|
||||
@@ -197,7 +193,7 @@ def tool(
|
||||
Note that parsing by default will raise `ValueError` if the docstring
|
||||
is considered invalid. A docstring is considered invalid if it contains
|
||||
arguments not in the function signature, or is unable to be parsed into
|
||||
a summary and "Args:" blocks. Examples below:
|
||||
a summary and `"Args:"` blocks. Examples below:
|
||||
|
||||
```python
|
||||
# No args section
|
||||
|
||||
@@ -82,12 +82,12 @@ def create_retriever_tool(
|
||||
description: The description for the tool. This will be passed to the language
|
||||
model, so should be descriptive.
|
||||
document_prompt: The prompt to use for the document.
|
||||
document_separator: The separator to use between documents. Defaults to "\n\n".
|
||||
response_format: The tool response format. If "content" then the output of
|
||||
the tool is interpreted as the contents of a ToolMessage. If
|
||||
"content_and_artifact" then the output is expected to be a two-tuple
|
||||
corresponding to the (content, artifact) of a ToolMessage (artifact
|
||||
being a list of documents in this case). Defaults to "content".
|
||||
document_separator: The separator to use between documents.
|
||||
response_format: The tool response format. If `"content"` then the output of
|
||||
the tool is interpreted as the contents of a `ToolMessage`. If
|
||||
`"content_and_artifact"` then the output is expected to be a two-tuple
|
||||
corresponding to the `(content, artifact)` of a `ToolMessage` (artifact
|
||||
being a list of documents in this case).
|
||||
|
||||
Returns:
|
||||
Tool class to pass to an agent.
|
||||
|
||||
@@ -176,7 +176,7 @@ class Tool(BaseTool):
|
||||
func: The function to create the tool from.
|
||||
name: The name of the tool.
|
||||
description: The description of the tool.
|
||||
return_direct: Whether to return the output directly. Defaults to `False`.
|
||||
return_direct: Whether to return the output directly.
|
||||
args_schema: The schema of the tool's input arguments.
|
||||
coroutine: The asynchronous version of the function.
|
||||
**kwargs: Additional arguments to pass to the tool.
|
||||
|
||||
@@ -149,21 +149,16 @@ class StructuredTool(BaseTool):
|
||||
description: The description of the tool.
|
||||
Defaults to the function docstring.
|
||||
return_direct: Whether to return the result directly or as a callback.
|
||||
Defaults to `False`.
|
||||
args_schema: The schema of the tool's input arguments.
|
||||
infer_schema: Whether to infer the schema from the function's signature.
|
||||
Defaults to `True`.
|
||||
response_format: The tool response format. If "content" then the output of
|
||||
the tool is interpreted as the contents of a ToolMessage. If
|
||||
"content_and_artifact" then the output is expected to be a two-tuple
|
||||
corresponding to the (content, artifact) of a ToolMessage.
|
||||
Defaults to "content".
|
||||
response_format: The tool response format. If `"content"` then the output of
|
||||
the tool is interpreted as the contents of a `ToolMessage`. If
|
||||
`"content_and_artifact"` then the output is expected to be a two-tuple
|
||||
corresponding to the `(content, artifact)` of a `ToolMessage`.
|
||||
parse_docstring: if `infer_schema` and `parse_docstring`, will attempt
|
||||
to parse parameter descriptions from Google Style function docstrings.
|
||||
Defaults to `False`.
|
||||
error_on_invalid_docstring: if `parse_docstring` is provided, configure
|
||||
whether to raise ValueError on invalid Google Style docstrings.
|
||||
Defaults to `False`.
|
||||
whether to raise `ValueError` on invalid Google Style docstrings.
|
||||
**kwargs: Additional arguments to pass to the tool
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -128,7 +128,10 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
||||
exclude_tags=exclude_tags,
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError:
|
||||
loop = asyncio.new_event_loop()
|
||||
memory_stream = _MemoryStream[StreamEvent](loop)
|
||||
self.send_stream = memory_stream.get_send_stream()
|
||||
self.receive_stream = memory_stream.get_receive_stream()
|
||||
|
||||
@@ -264,7 +264,10 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
|
||||
self.exclude_types = exclude_types
|
||||
self.exclude_tags = exclude_tags
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError:
|
||||
loop = asyncio.new_event_loop()
|
||||
memory_stream = _MemoryStream[RunLogPatch](loop)
|
||||
self.lock = threading.Lock()
|
||||
self.send_stream = memory_stream.get_send_stream()
|
||||
|
||||
@@ -5,7 +5,7 @@ channel. The writer and reader can be in the same event loop or in different eve
|
||||
loops. When they're in different event loops, they will also be in different
|
||||
threads.
|
||||
|
||||
This is useful in situations when there's a mix of synchronous and asynchronous
|
||||
Useful in situations when there's a mix of synchronous and asynchronous
|
||||
used in the code.
|
||||
"""
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ class RootListenersTracer(BaseTracer):
|
||||
"""Tracer that calls listeners on run start, end, and error."""
|
||||
|
||||
log_missing_parent = False
|
||||
"""Whether to log a warning if the parent is missing. Default is False."""
|
||||
"""Whether to log a warning if the parent is missing."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -79,7 +79,7 @@ class AsyncRootListenersTracer(AsyncBaseTracer):
|
||||
"""Async Tracer that calls listeners on run start, end, and error."""
|
||||
|
||||
log_missing_parent = False
|
||||
"""Whether to log a warning if the parent is missing. Default is False."""
|
||||
"""Whether to log a warning if the parent is missing."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user