mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-18 04:25:22 +00:00
Compare commits
147 Commits
langchain-
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
95c3e5f85f | ||
|
|
18b8c8628a | ||
|
|
152c8cac33 | ||
|
|
cd07521170 | ||
|
|
170cc8aec3 | ||
|
|
fbfed65fb1 | ||
|
|
3d26807b92 | ||
|
|
2d968213d7 | ||
|
|
9aba9e3e33 | ||
|
|
4fda7bf4f2 | ||
|
|
d9eff44400 | ||
|
|
2df8ac402a | ||
|
|
e5541d1da7 | ||
|
|
8ba4f77734 | ||
|
|
6dd621d636 | ||
|
|
74947ec894 | ||
|
|
fea6b99b16 | ||
|
|
37cfc00310 | ||
|
|
53293dace8 | ||
|
|
12d65f17ff | ||
|
|
58b6c72375 | ||
|
|
5eabe90494 | ||
|
|
50186da0a1 | ||
|
|
45ed5f3f51 | ||
|
|
444c2a3d9f | ||
|
|
8a877120c3 | ||
|
|
bf3aefce93 | ||
|
|
63284ffebf | ||
|
|
d948783a4c | ||
|
|
16617dd239 | ||
|
|
45351d1bc6 | ||
|
|
28456c2c33 | ||
|
|
3c1d77dd64 | ||
|
|
9a66c43146 | ||
|
|
b51a1eba4d | ||
|
|
b4d5f3181b | ||
|
|
6b98140b38 | ||
|
|
98c0b093bb | ||
|
|
ed5914ff61 | ||
|
|
709664a079 | ||
|
|
16b55b0704 | ||
|
|
c3bcfad66d | ||
|
|
b859765752 | ||
|
|
e7e41eaabe | ||
|
|
14a9c7c44e | ||
|
|
fc93bed8c4 | ||
|
|
403142eaba | ||
|
|
1f81277b9b | ||
|
|
36813d2f00 | ||
|
|
b7d08bf764 | ||
|
|
58360a1e53 | ||
|
|
ef53ccf54b | ||
|
|
4633b4cf2b | ||
|
|
4f2e3bd7fd | ||
|
|
6548052f9e | ||
|
|
8d82160a8a | ||
|
|
d8a1f1114d | ||
|
|
b0ef5e778a | ||
|
|
aed64daabb | ||
|
|
25ba733218 | ||
|
|
3b0437c05b | ||
|
|
24b5c27bb1 | ||
|
|
80f8fe1793 | ||
|
|
eb096675a8 | ||
|
|
7a5d042bd2 | ||
|
|
90f4d8842f | ||
|
|
a042e804b4 | ||
|
|
4cf523949a | ||
|
|
98b64f3ae3 | ||
|
|
1bc0ea5496 | ||
|
|
ded53297e0 | ||
|
|
fb6108c8f5 | ||
|
|
72d4a8eeed | ||
|
|
a983465694 | ||
|
|
5448e16fe6 | ||
|
|
4be5537837 | ||
|
|
35439cf3bd | ||
|
|
0923136851 | ||
|
|
8e1aeb8ad5 | ||
|
|
54adcd9e82 | ||
|
|
fc79b372cb | ||
|
|
3587c60396 | ||
|
|
96bd0b0844 | ||
|
|
d07885f8b7 | ||
|
|
d4359d3de6 | ||
|
|
c0e3c3a350 | ||
|
|
bd39b2ccdf | ||
|
|
2316635add | ||
|
|
d8a101074f | ||
|
|
9799437bc2 | ||
|
|
e98a4fd19a | ||
|
|
f54cbf8ff5 | ||
|
|
b0b302ec6b | ||
|
|
6a59f76f2b | ||
|
|
e6207ad4f3 | ||
|
|
c6da9533ac | ||
|
|
7a5e1bcf99 | ||
|
|
332ffed393 | ||
|
|
a43515ca65 | ||
|
|
aab9cb666f | ||
|
|
6499897c87 | ||
|
|
711b8f1e52 | ||
|
|
25d1c1c9bb | ||
|
|
0e72ed39a0 | ||
|
|
f4ffef98a2 | ||
|
|
6b97418836 | ||
|
|
1418d3af00 | ||
|
|
e8bdf245eb | ||
|
|
4470d3b4a0 | ||
|
|
0614a53d9c | ||
|
|
9c76739425 | ||
|
|
68a90e2252 | ||
|
|
8ed2ba9301 | ||
|
|
c98bd8505f | ||
|
|
b2f58d37db | ||
|
|
d85e46321a | ||
|
|
f2e75f9500 | ||
|
|
30bca57aae | ||
|
|
8da35fba7f | ||
|
|
8530bbac2d | ||
|
|
8cd6ed3e1e | ||
|
|
5ae982145e | ||
|
|
dd00aac7ad | ||
|
|
242eeb537f | ||
|
|
da4fef8131 | ||
|
|
b6c8b6f944 | ||
|
|
d3624eaba1 | ||
|
|
61ebe7991c | ||
|
|
0812723789 | ||
|
|
875230d5bc | ||
|
|
8b3c5f93f5 | ||
|
|
c3caec5aaf | ||
|
|
0180716a95 | ||
|
|
b1e7b40b6a | ||
|
|
9a39f92aba | ||
|
|
e6b7a1769b | ||
|
|
cdc8e2d0c2 | ||
|
|
d02380c504 | ||
|
|
67b6f6c82a | ||
|
|
d8f89a5e9b | ||
|
|
5285336cb1 | ||
|
|
2d3f4e1a16 | ||
|
|
169f525cfb | ||
|
|
2656bfe941 | ||
|
|
e5046cbd72 | ||
|
|
1b555021f7 | ||
|
|
0ad8de5eb7 |
9
.github/ISSUE_TEMPLATE/documentation.yml
vendored
9
.github/ISSUE_TEMPLATE/documentation.yml
vendored
@@ -26,6 +26,13 @@ body:
|
||||
[LangChain Github Discussions](https://github.com/langchain-ai/langchain/discussions),
|
||||
[LangChain Github Issues](https://github.com/langchain-ai/langchain/issues?q=is%3Aissue),
|
||||
[LangChain ChatBot](https://chat.langchain.com/)
|
||||
- type: input
|
||||
id: url
|
||||
attributes:
|
||||
label: URL
|
||||
description: URL to documentation
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
@@ -48,4 +55,4 @@ body:
|
||||
label: "Idea or request for content:"
|
||||
description: >
|
||||
Please describe as clearly as possible what topics you think are missing
|
||||
from the current documentation.
|
||||
from the current documentation.
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -26,4 +26,4 @@ Additional guidelines:
|
||||
- Changes should be backwards compatible.
|
||||
- If you are adding something to community, do not re-import it in langchain.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17.
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17.
|
||||
|
||||
4
.github/actions/people/app/main.py
vendored
4
.github/actions/people/app/main.py
vendored
@@ -537,7 +537,9 @@ if __name__ == "__main__":
|
||||
"nfcampos",
|
||||
"efriis",
|
||||
"eyurtsev",
|
||||
"rlancemartin"
|
||||
"rlancemartin",
|
||||
"ccurme",
|
||||
"vbarda",
|
||||
}
|
||||
hidden_logins = {
|
||||
"dev2049",
|
||||
|
||||
2
.github/scripts/check_diff.py
vendored
2
.github/scripts/check_diff.py
vendored
@@ -91,4 +91,4 @@ if __name__ == "__main__":
|
||||
}
|
||||
for key, value in outputs.items():
|
||||
json_output = json.dumps(value)
|
||||
print(f"{key}={json_output}") # noqa: T201
|
||||
print(f"{key}={json_output}")
|
||||
|
||||
2
.github/scripts/get_min_versions.py
vendored
2
.github/scripts/get_min_versions.py
vendored
@@ -76,4 +76,4 @@ if __name__ == "__main__":
|
||||
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
) # noqa: T201
|
||||
)
|
||||
|
||||
1
.github/workflows/_release.yml
vendored
1
.github/workflows/_release.yml
vendored
@@ -308,3 +308,4 @@ jobs:
|
||||
tag: ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}
|
||||
body: "# Release ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}\n\nPackage-specific release note generation coming soon."
|
||||
commit: ${{ github.sha }}
|
||||
makeLatest: ${{ needs.build.outputs.pkg-name == 'langchain-core'}}
|
||||
|
||||
@@ -7,4 +7,4 @@ ignore_words_list = (
|
||||
pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
|
||||
)
|
||||
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}") # noqa: T201
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}")
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -178,3 +178,4 @@ _dist
|
||||
docs/docs/templates
|
||||
|
||||
prof
|
||||
virtualenv/
|
||||
|
||||
@@ -6,23 +6,24 @@
|
||||
"source": [
|
||||
"# Oracle AI Vector Search with Document Processing\n",
|
||||
"Oracle AI Vector Search is designed for Artificial Intelligence (AI) workloads that allows you to query data based on semantics, rather than keywords.\n",
|
||||
"One of the biggest benefit of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system. This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"One of the biggest benefits of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system.\n",
|
||||
"This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"\n",
|
||||
"In addition, because Oracle has been building database technologies for so long, your vectors can benefit from all of Oracle Database's most powerful features, like the following:\n",
|
||||
"In addition, your vectors can benefit from all of Oracle Database’s most powerful features, like the following:\n",
|
||||
"\n",
|
||||
" * Partitioning Support\n",
|
||||
" * Real Application Clusters scalability\n",
|
||||
" * Exadata smart scans\n",
|
||||
" * Shard processing across geographically distributed databases\n",
|
||||
" * Transactions\n",
|
||||
" * Parallel SQL\n",
|
||||
" * Disaster recovery\n",
|
||||
" * Security\n",
|
||||
" * Oracle Machine Learning\n",
|
||||
" * Oracle Graph Database\n",
|
||||
" * Oracle Spatial and Graph\n",
|
||||
" * Oracle Blockchain\n",
|
||||
" * JSON\n",
|
||||
" * [Partitioning Support](https://www.oracle.com/database/technologies/partitioning.html)\n",
|
||||
" * [Real Application Clusters scalability](https://www.oracle.com/database/real-application-clusters/)\n",
|
||||
" * [Exadata smart scans](https://www.oracle.com/database/technologies/exadata/software/smartscan/)\n",
|
||||
" * [Shard processing across geographically distributed databases](https://www.oracle.com/database/distributed-database/)\n",
|
||||
" * [Transactions](https://docs.oracle.com/en/database/oracle/oracle-database/23/cncpt/transactions.html)\n",
|
||||
" * [Parallel SQL](https://docs.oracle.com/en/database/oracle/oracle-database/21/vldbg/parallel-exec-intro.html#GUID-D28717E4-0F77-44F5-BB4E-234C31D4E4BA)\n",
|
||||
" * [Disaster recovery](https://www.oracle.com/database/data-guard/)\n",
|
||||
" * [Security](https://www.oracle.com/security/database-security/)\n",
|
||||
" * [Oracle Machine Learning](https://www.oracle.com/artificial-intelligence/database-machine-learning/)\n",
|
||||
" * [Oracle Graph Database](https://www.oracle.com/database/integrated-graph-database/)\n",
|
||||
" * [Oracle Spatial and Graph](https://www.oracle.com/database/spatial/)\n",
|
||||
" * [Oracle Blockchain](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_blockchain_table.html#GUID-B469E277-978E-4378-A8C1-26D3FF96C9A6)\n",
|
||||
" * [JSON](https://docs.oracle.com/en/database/oracle/oracle-database/23/adjsn/json-in-oracle-database.html)\n",
|
||||
"\n",
|
||||
"This guide demonstrates how Oracle AI Vector Search can be used with Langchain to serve an end-to-end RAG pipeline. This guide goes through examples of:\n",
|
||||
"\n",
|
||||
@@ -33,6 +34,13 @@
|
||||
" * Storing and Indexing them in a Vector Store and querying them for queries in OracleVS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are just starting with Oracle Database, consider exploring the [free Oracle 23 AI](https://www.oracle.com/database/free/#resources) which provides a great introduction to setting up your database environment. While working with the database, it is often advisable to avoid using the system user by default; instead, you can create your own user for enhanced security and customization. For detailed steps on user creation, refer to our [end-to-end guide](https://github.com/langchain-ai/langchain/blob/master/cookbook/oracleai_demo.ipynb) which also shows how to set up a user in Oracle. Additionally, understanding user privileges is crucial for managing database security effectively. You can learn more about this topic in the official [Oracle guide](https://docs.oracle.com/en/database/oracle/oracle-database/19/admqs/administering-user-accounts-and-security.html#GUID-36B21D72-1BBB-46C9-A0C9-F0D2A8591B8D) on administering user accounts and security."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -131,13 +139,13 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Process Documents using Oracle AI\n",
|
||||
"Let's think about a scenario that the users have some documents in Oracle Database or in a file system. They want to use the data for Oracle AI Vector Search using Langchain.\n",
|
||||
"Consider the following scenario: users possess documents stored either in an Oracle Database or a file system and intend to utilize this data with Oracle AI Vector Search powered by Langchain.\n",
|
||||
"\n",
|
||||
"For that, the users need to do some document preprocessing. The first step would be to read the documents, generate their summary(if needed) and then chunk/split them if needed. After that, they need to generate the embeddings for those chunks and store into Oracle AI Vector Store. Finally, the users will perform some semantic queries on those data. \n",
|
||||
"To prepare the documents for analysis, a comprehensive preprocessing workflow is necessary. Initially, the documents must be retrieved, summarized (if required), and chunked as needed. Subsequent steps involve generating embeddings for these chunks and integrating them into the Oracle AI Vector Store. Users can then conduct semantic searches on this data.\n",
|
||||
"\n",
|
||||
"Oracle AI Vector Search Langchain library provides a range of document processing functionalities including document loading, splitting, generating summary and embeddings.\n",
|
||||
"The Oracle AI Vector Search Langchain library encompasses a suite of document processing tools that facilitate document loading, chunking, summary generation, and embedding creation.\n",
|
||||
"\n",
|
||||
"In the following sections, we will go through how to use Oracle AI Langchain APIs to achieve each of these functionalities individually. "
|
||||
"In the sections that follow, we will detail the utilization of Oracle AI Langchain APIs to effectively implement each of these processes."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -145,7 +153,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Demo User\n",
|
||||
"The following sample code will show how to connect to Oracle Database. "
|
||||
"The following sample code will show how to connect to Oracle Database. By default, python-oracledb runs in a ‘Thin’ mode which connects directly to Oracle Database. This mode does not need Oracle Client libraries. However, some additional functionality is available when python-oracledb uses them. Python-oracledb is said to be in ‘Thick’ mode when Oracle Client libraries are used. Both modes have comprehensive functionality supporting the Python Database API v2.0 Specification. See the following [guide](https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_a.html#featuresummary) that talks about features supported in each mode. You might want to switch to thick-mode if you are unable to use thin-mode."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -242,9 +250,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Now that we have a demo user and a demo table with some data, we just need to do one more setup. For embedding and summary, we have a few provider options that the users can choose from such as database, 3rd party providers like ocigenai, huggingface, openai, etc. If the users choose to use 3rd party provider, they need to create a credential with corresponding authentication information. On the other hand, if the users choose to use 'database' as provider, they need to load an onnx model to Oracle Database for embeddings; however, for summary, they don't need to do anything."
|
||||
"With the inclusion of a demo user and a populated sample table, the remaining configuration involves setting up embedding and summary functionalities. Users are presented with multiple provider options, including local database solutions and third-party services such as Ocigenai, Hugging Face, and OpenAI. Should users opt for a third-party provider, they are required to establish credentials containing the necessary authentication details. Conversely, if selecting a database as the provider for embeddings, it is necessary to upload an ONNX model to the Oracle Database. No additional setup is required for summary functionalities when using the database option."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -253,13 +259,13 @@
|
||||
"source": [
|
||||
"### Load ONNX Model\n",
|
||||
"\n",
|
||||
"To generate embeddings, Oracle provides a few provider options for users to choose from. The users can choose 'database' provider or some 3rd party providers like OCIGENAI, HuggingFace, etc.\n",
|
||||
"Oracle accommodates a variety of embedding providers, enabling users to choose between proprietary database solutions and third-party services such as OCIGENAI and HuggingFace. This selection dictates the methodology for generating and managing embeddings.\n",
|
||||
"\n",
|
||||
"***Note*** If the users choose database option, they need to load an ONNX model to Oracle Database. The users do not need to load an ONNX model to Oracle Database if they choose to use 3rd party provider to generate embeddings.\n",
|
||||
"***Important*** : Should users opt for the database option, they must upload an ONNX model into the Oracle Database. Conversely, if a third-party provider is selected for embedding generation, uploading an ONNX model to Oracle Database is not required.\n",
|
||||
"\n",
|
||||
"One of the core benefits of using an ONNX model is that the users do not need to transfer their data to 3rd party to generate embeddings. And also, since it does not involve any network or REST API calls, it may provide better performance.\n",
|
||||
"A significant advantage of utilizing an ONNX model directly within Oracle is the enhanced security and performance it offers by eliminating the need to transmit data to external parties. Additionally, this method avoids the latency typically associated with network or REST API calls.\n",
|
||||
"\n",
|
||||
"Here is the sample code to load an ONNX model to Oracle Database:"
|
||||
"Below is the example code to upload an ONNX model into Oracle Database:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -298,11 +304,11 @@
|
||||
"source": [
|
||||
"### Create Credential\n",
|
||||
"\n",
|
||||
"On the other hand, if the users choose to use 3rd party provider to generate embeddings and summary, they need to create credential to access 3rd party provider's end points.\n",
|
||||
"When selecting third-party providers for generating embeddings, users are required to establish credentials to securely access the provider's endpoints.\n",
|
||||
"\n",
|
||||
"***Note:*** The users do not need to create any credential if they choose to use 'database' provider to generate embeddings and summary. Should the users choose to 3rd party provider, they need to create credential for the 3rd party provider they want to use. \n",
|
||||
"***Important:*** No credentials are necessary when opting for the 'database' provider to generate embeddings. However, should users decide to utilize a third-party provider, they must create credentials specific to the chosen provider.\n",
|
||||
"\n",
|
||||
"Here is a sample example:"
|
||||
"Below is an illustrative example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -352,11 +358,11 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Documents\n",
|
||||
"The users can load the documents from Oracle Database or a file system or both. They just need to set the loader parameters accordingly. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"Users have the flexibility to load documents from either the Oracle Database, a file system, or both, by appropriately configuring the loader parameters. For comprehensive details on these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-73397E89-92FB-48ED-94BB-1AD960C4EA1F).\n",
|
||||
"\n",
|
||||
"The main benefit of using OracleDocLoader is that it can handle 150+ different file formats. You don't need to use different types of loader for different file formats. Here is the list formats that we support: [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html)\n",
|
||||
"A significant advantage of utilizing OracleDocLoader is its capability to process over 150 distinct file formats, eliminating the need for multiple loaders for different document types. For a complete list of the supported formats, please refer to the [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html).\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
"Below is a sample code snippet that demonstrates how to use OracleDocLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -399,7 +405,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Summary\n",
|
||||
"Now that the user loaded the documents, they may want to generate a summary for each document. The Oracle AI Vector Search Langchain library provides an API to do that. There are a few summary generation provider options including Database, OCIGENAI, HuggingFace and so on. The users can choose their preferred provider to generate a summary. Like before, they just need to set the summary parameters accordingly. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters."
|
||||
"Now that the user loaded the documents, they may want to generate a summary for each document. The Oracle AI Vector Search Langchain library offers a suite of APIs designed for document summarization. It supports multiple summarization providers such as Database, OCIGENAI, HuggingFace, among others, allowing users to select the provider that best meets their needs. To utilize these capabilities, users must configure the summary parameters as specified. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-EC9DDB58-6A15-4B36-BA66-ECBA20D2CE57)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -470,9 +476,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split Documents\n",
|
||||
"The documents can be in different sizes: small, medium, large, or very large. The users like to split/chunk their documents into smaller pieces to generate embeddings. There are lots of different splitting customizations the users can do. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"The documents may vary in size, ranging from small to very large. Users often prefer to chunk their documents into smaller sections to facilitate the generation of embeddings. A wide array of customization options is available for this splitting process. For comprehensive details regarding these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-4E145629-7098-4C7C-804F-FC85D1F24240).\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
"Below is a sample code illustrating how to implement this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -513,14 +519,16 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Embeddings\n",
|
||||
"Now that the documents are chunked as per requirements, the users may want to generate embeddings for these chunks. Oracle AI Vector Search provides a number of ways to generate embeddings. The users can load an ONNX embedding model to Oracle Database and use it to generate embeddings or use some 3rd party API's end points to generate embeddings. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters."
|
||||
"Now that the documents are chunked as per requirements, the users may want to generate embeddings for these chunks. Oracle AI Vector Search provides multiple methods for generating embeddings, utilizing either locally hosted ONNX models or third-party APIs. For comprehensive instructions on configuring these alternatives, please refer to the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-C6439E94-4E86-4ECD-954E-4B73D53579DE)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** The users may need to set proxy if they want to use some 3rd party embedding generation providers other than 'database' provider (aka using ONNX model)."
|
||||
"***Note:*** Currently, OracleEmbeddings processes each embedding generation request individually, without batching, by calling REST endpoints separately for each request. This method could potentially lead to exceeding the maximum request per minute quota set by some providers. However, we are actively working to enhance this process by implementing request batching, which will allow multiple embedding requests to be combined into fewer API calls, thereby optimizing our use of provider resources and adhering to their request limits. This update is expected to be rolled out soon, eliminating the current limitation.\n",
|
||||
"\n",
|
||||
"***Note:*** Users may need to configure a proxy to utilize third-party embedding generation providers, excluding the 'database' provider that utilizes an ONNX model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -752,20 +760,18 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above example creates a vector store with DOT_PRODUCT distance strategy. \n",
|
||||
"\n",
|
||||
"However, the users can create Oracle AI Vector Store provides different distance strategies. Please see the [comprehensive guide](/docs/integrations/vectorstores/oracle) for more information."
|
||||
"The example provided illustrates the creation of a vector store using the DOT_PRODUCT distance strategy. Users have the flexibility to employ various distance strategies with the Oracle AI Vector Store, as detailed in our [comprehensive guide](https://python.langchain.com/v0.1/docs/integrations/vectorstores/oracle/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that we have embeddings stored in vector stores, let's create an index on them to get better semantic search performance during query time.\n",
|
||||
"With embeddings now stored in vector stores, it is advisable to establish an index to enhance semantic search performance during query execution.\n",
|
||||
"\n",
|
||||
"***Note*** If you are getting some insufficient memory error, please increase ***vector_memory_size*** in your database.\n",
|
||||
"***Note*** Should you encounter an \"insufficient memory\" error, it is recommended to increase the ***vector_memory_size*** in your database configuration\n",
|
||||
"\n",
|
||||
"Here is the sample code to create an index:"
|
||||
"Below is a sample code snippet for creating an index:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -785,9 +791,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above example creates a default HNSW index on the embeddings stored in 'oravs' table. The users can set different parameters as per their requirements. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"This example demonstrates the creation of a default HNSW index on embeddings within the 'oravs' table. Users may adjust various parameters according to their specific needs. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/vecse/manage-different-categories-vector-indexes.html).\n",
|
||||
"\n",
|
||||
"Also, there are different types of vector indices that the users can create. Please see the [comprehensive guide](/docs/integrations/vectorstores/oracle) for more information.\n"
|
||||
"Additionally, various types of vector indices can be created to meet diverse requirements. More details can be found in our [comprehensive guide](https://python.langchain.com/v0.1/docs/integrations/vectorstores/oracle/).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -797,9 +803,9 @@
|
||||
"## Perform Semantic Search\n",
|
||||
"All set!\n",
|
||||
"\n",
|
||||
"We have processed the documents, stored them to vector store, and then created index to get better query performance. Now let's do some semantic searches.\n",
|
||||
"We have successfully processed the documents and stored them in the vector store, followed by the creation of an index to enhance query performance. We are now prepared to proceed with semantic searches.\n",
|
||||
"\n",
|
||||
"Here is the sample code for this:"
|
||||
"Below is the sample code for this process:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -36,7 +36,9 @@
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_documents(docs, embedding=UpstageEmbeddings())\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_documents(\n",
|
||||
" docs, embedding=UpstageEmbeddings(model=\"solar-embedding-1-large\")\n",
|
||||
")\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
|
||||
@@ -39,12 +39,10 @@
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# noqa\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"DOCSTORE_DIR = \".\"\n",
|
||||
"DOCSTORE_ID_KEY = \"doc_id\""
|
||||
|
||||
@@ -45,9 +45,6 @@ generate-files:
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O $(INTERMEDIATE_DIR)/langserve.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langserve.md https://github.com/langchain-ai/langserve/tree/main/
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langgraph/main/README.md -O $(INTERMEDIATE_DIR)/langgraph.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langgraph.md https://github.com/langchain-ai/langgraph/tree/main/
|
||||
|
||||
copy-infra:
|
||||
mkdir -p $(OUTPUT_NEW_DIR)
|
||||
cp -r src $(OUTPUT_NEW_DIR)
|
||||
@@ -69,9 +66,9 @@ md-sync:
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
build: install-py-deps generate-files copy-infra render md-sync generate-references
|
||||
build: install-py-deps generate-files copy-infra render md-sync
|
||||
|
||||
vercel-build: install-vercel-deps build
|
||||
vercel-build: install-vercel-deps build generate-references
|
||||
rm -rf docs
|
||||
mv $(OUTPUT_NEW_DOCS_DIR) docs
|
||||
rm -rf build
|
||||
|
||||
@@ -187,7 +187,7 @@ def _load_package_modules(
|
||||
modules_by_namespace[top_namespace] = _module_members
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}") # noqa: T201
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}")
|
||||
|
||||
return modules_by_namespace
|
||||
|
||||
@@ -364,7 +364,7 @@ def main(dirs: Optional[list] = None) -> None:
|
||||
dirs += [
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs" / "partners")
|
||||
if os.path.isdir(dir_)
|
||||
if os.path.isdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
and "pyproject.toml" in os.listdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
]
|
||||
for dir_ in dirs:
|
||||
|
||||
@@ -1,54 +1,146 @@
|
||||
# arXiv
|
||||
|
||||
LangChain implements the latest research in the field of Natural Language Processing.
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation and API Reference.
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
|
||||
and Templates.
|
||||
|
||||
## Summary
|
||||
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation and API Reference |
|
||||
|------------------|---------|-------------------|-------------------------|
|
||||
| `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt, et al. | 2023-07-06 | `Docs:` [docs/modules/data_connection/retrievers/long_context_reorder](https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder)
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|
||||
|------------------|---------|-------------------|------------------------|
|
||||
| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
|
||||
| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
|
||||
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
|
||||
| `2305.06983v2` [Active Retrieval Augmented Generation](http://arxiv.org/abs/2305.06983v2) | Zhengbao Jiang, Frank F. Xu, Luyu Gao, et al. | 2023-05-11 | `Docs:` [docs/modules/chains](https://python.langchain.com/docs/modules/chains)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
|
||||
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `Docs:` [docs/use_cases/query_analysis/techniques/hyde](https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde), `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
| `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al. | 2022-12-15 | `Docs:` [docs/guides/productionization/evaluation/string/criteria_eval_chain](https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `Docs:` [docs/use_cases/sql/quickstart](https://python.langchain.com/docs/use_cases/sql/quickstart), `API:` [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
## Lost in the Middle: How Language Models Use Long Contexts
|
||||
## Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
|
||||
- **arXiv id:** 2307.03172v3
|
||||
- **Title:** Lost in the Middle: How Language Models Use Long Contexts
|
||||
- **Authors:** Nelson F. Liu, Kevin Lin, John Hewitt, et al.
|
||||
- **Published Date:** 2023-07-06
|
||||
- **URL:** http://arxiv.org/abs/2307.03172v3
|
||||
- **LangChain Documentation:** [docs/modules/data_connection/retrievers/long_context_reorder](https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder)
|
||||
- **arXiv id:** 2312.06648v2
|
||||
- **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
- **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.
|
||||
- **Published Date:** 2023-12-11
|
||||
- **URL:** http://arxiv.org/abs/2312.06648v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
|
||||
**Abstract:** While recent language models have the ability to take long contexts as input,
|
||||
relatively little is known about how well they use longer context. We analyze
|
||||
the performance of language models on two tasks that require identifying
|
||||
relevant information in their input contexts: multi-document question answering
|
||||
and key-value retrieval. We find that performance can degrade significantly
|
||||
when changing the position of relevant information, indicating that current
|
||||
language models do not robustly make use of information in long input contexts.
|
||||
In particular, we observe that performance is often highest when relevant
|
||||
information occurs at the beginning or end of the input context, and
|
||||
significantly degrades when models must access relevant information in the
|
||||
middle of long contexts, even for explicitly long-context models. Our analysis
|
||||
provides a better understanding of how language models use their input context
|
||||
and provides new evaluation protocols for future long-context language models.
|
||||
**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or
|
||||
world knowledge in open-domain NLP tasks. When we use a learned dense retriever
|
||||
on a retrieval corpus at inference time, an often-overlooked design choice is
|
||||
the retrieval unit in which the corpus is indexed, e.g. document, passage, or
|
||||
sentence. We discover that the retrieval unit choice significantly impacts the
|
||||
performance of both retrieval and downstream tasks. Distinct from the typical
|
||||
approach of using passages or sentences, we introduce a novel retrieval unit,
|
||||
proposition, for dense retrieval. Propositions are defined as atomic
|
||||
expressions within text, each encapsulating a distinct factoid and presented in
|
||||
a concise, self-contained natural language format. We conduct an empirical
|
||||
comparison of different retrieval granularity. Our results reveal that
|
||||
proposition-based retrieval significantly outperforms traditional passage or
|
||||
sentence-based methods in dense retrieval. Moreover, retrieval by proposition
|
||||
also enhances the performance of downstream QA tasks, since the retrieved texts
|
||||
are more condensed with question-relevant information, reducing the need for
|
||||
lengthy input tokens and minimizing the inclusion of extraneous, irrelevant
|
||||
information.
|
||||
|
||||
## Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
|
||||
- **arXiv id:** 2311.09210v1
|
||||
- **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
- **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.
|
||||
- **Published Date:** 2023-11-15
|
||||
- **URL:** http://arxiv.org/abs/2311.09210v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
|
||||
**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial
|
||||
advancement in the capabilities of large language models, notably in reducing
|
||||
factual hallucination by leveraging external knowledge sources. However, the
|
||||
reliability of the retrieved information is not always guaranteed. The
|
||||
retrieval of irrelevant data can lead to misguided responses, and potentially
|
||||
causing the model to overlook its inherent knowledge, even when it possesses
|
||||
adequate information to address the query. Moreover, standard RALMs often
|
||||
struggle to assess whether they possess adequate knowledge, both intrinsic and
|
||||
retrieved, to provide an accurate answer. In situations where knowledge is
|
||||
lacking, these systems should ideally respond with "unknown" when the answer is
|
||||
unattainable. In response to these challenges, we introduces Chain-of-Noting
|
||||
(CoN), a novel approach aimed at improving the robustness of RALMs in facing
|
||||
noisy, irrelevant documents and in handling unknown scenarios. The core idea of
|
||||
CoN is to generate sequential reading notes for retrieved documents, enabling a
|
||||
thorough evaluation of their relevance to the given question and integrating
|
||||
this information to formulate the final answer. We employed ChatGPT to create
|
||||
training data for CoN, which was subsequently trained on an LLaMa-2 7B model.
|
||||
Our experiments across four open-domain QA benchmarks show that RALMs equipped
|
||||
with CoN significantly outperform standard RALMs. Notably, CoN achieves an
|
||||
average improvement of +7.9 in EM score given entirely noisy retrieved
|
||||
documents and +10.5 in rejection rates for real-time questions that fall
|
||||
outside the pre-training knowledge scope.
|
||||
|
||||
## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
|
||||
- **arXiv id:** 2310.06117v2
|
||||
- **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
- **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.
|
||||
- **Published Date:** 2023-10-09
|
||||
- **URL:** http://arxiv.org/abs/2310.06117v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
|
||||
|
||||
**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables
|
||||
LLMs to do abstractions to derive high-level concepts and first principles from
|
||||
instances containing specific details. Using the concepts and principles to
|
||||
guide reasoning, LLMs significantly improve their abilities in following a
|
||||
correct reasoning path towards the solution. We conduct experiments of
|
||||
Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe
|
||||
substantial performance gains on various challenging reasoning-intensive tasks
|
||||
including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back
|
||||
Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7%
|
||||
and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.
|
||||
|
||||
## Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
|
||||
- **arXiv id:** 2305.14283v3
|
||||
- **Title:** Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
- **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.
|
||||
- **Published Date:** 2023-05-23
|
||||
- **URL:** http://arxiv.org/abs/2305.14283v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
|
||||
|
||||
**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the
|
||||
retrieve-then-read pipeline, making remarkable progress in knowledge-intensive
|
||||
tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of
|
||||
the previous retrieve-then-read for the retrieval-augmented LLMs from the
|
||||
perspective of the query rewriting. Unlike prior studies focusing on adapting
|
||||
either the retriever or the reader, our approach pays attention to the
|
||||
adaptation of the search query itself, for there is inevitably a gap between
|
||||
the input text and the needed knowledge in retrieval. We first prompt an LLM to
|
||||
generate the query, then use a web search engine to retrieve contexts.
|
||||
Furthermore, to better align the query to the frozen modules, we propose a
|
||||
trainable scheme for our pipeline. A small language model is adopted as a
|
||||
trainable rewriter to cater to the black-box LLM reader. The rewriter is
|
||||
trained using the feedback of the LLM reader by reinforcement learning.
|
||||
Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice
|
||||
QA. Experiments results show consistent performance improvement, indicating
|
||||
that our framework is proven effective and scalable, and brings a new framework
|
||||
for retrieval-augmented LLM.
|
||||
|
||||
## Large Language Model Guided Tree-of-Thought
|
||||
|
||||
@@ -57,8 +149,9 @@ and provides new evaluation protocols for future long-context language models.
|
||||
- **Authors:** Jieyi Long
|
||||
- **Published Date:** 2023-05-15
|
||||
- **URL:** http://arxiv.org/abs/2305.08291v1
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
|
||||
- **API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
|
||||
|
||||
**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel
|
||||
approach aimed at improving the problem-solving capabilities of auto-regressive
|
||||
@@ -78,35 +171,6 @@ significantly increase the success rate of Sudoku puzzle solving. Our
|
||||
implementation of the ToT-based Sudoku solver is available on GitHub:
|
||||
\url{https://github.com/jieyilong/tree-of-thought-puzzle-solver}.
|
||||
|
||||
## Active Retrieval Augmented Generation
|
||||
|
||||
- **arXiv id:** 2305.06983v2
|
||||
- **Title:** Active Retrieval Augmented Generation
|
||||
- **Authors:** Zhengbao Jiang, Frank F. Xu, Luyu Gao, et al.
|
||||
- **Published Date:** 2023-05-11
|
||||
- **URL:** http://arxiv.org/abs/2305.06983v2
|
||||
- **LangChain Documentation:** [docs/modules/chains](https://python.langchain.com/docs/modules/chains)
|
||||
|
||||
|
||||
**Abstract:** Despite the remarkable ability of large language models (LMs) to comprehend
|
||||
and generate language, they have a tendency to hallucinate and create factually
|
||||
inaccurate output. Augmenting LMs by retrieving information from external
|
||||
knowledge resources is one promising solution. Most existing retrieval
|
||||
augmented LMs employ a retrieve-and-generate setup that only retrieves
|
||||
information once based on the input. This is limiting, however, in more general
|
||||
scenarios involving generation of long texts, where continually gathering
|
||||
information throughout generation is essential. In this work, we provide a
|
||||
generalized view of active retrieval augmented generation, methods that
|
||||
actively decide when and what to retrieve across the course of the generation.
|
||||
We propose Forward-Looking Active REtrieval augmented generation (FLARE), a
|
||||
generic method which iteratively uses a prediction of the upcoming sentence to
|
||||
anticipate future content, which is then utilized as a query to retrieve
|
||||
relevant documents to regenerate the sentence if it contains low-confidence
|
||||
tokens. We test FLARE along with baselines comprehensively over 4 long-form
|
||||
knowledge-intensive generation tasks/datasets. FLARE achieves superior or
|
||||
competitive performance on all tasks, demonstrating the effectiveness of our
|
||||
method. Code and datasets are available at https://github.com/jzbjyb/FLARE.
|
||||
|
||||
## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
|
||||
|
||||
- **arXiv id:** 2303.17580v4
|
||||
@@ -114,8 +178,9 @@ method. Code and datasets are available at https://github.com/jzbjyb/FLARE.
|
||||
- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
|
||||
- **Published Date:** 2023-03-30
|
||||
- **URL:** http://arxiv.org/abs/2303.17580v4
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
|
||||
- **API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
|
||||
|
||||
**Abstract:** Solving complicated AI tasks with different domains and modalities is a key
|
||||
step toward artificial general intelligence. While there are numerous AI models
|
||||
@@ -144,8 +209,9 @@ realization of artificial general intelligence.
|
||||
- **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.
|
||||
- **Published Date:** 2023-03-15
|
||||
- **URL:** http://arxiv.org/abs/2303.08774v6
|
||||
- **LangChain Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
|
||||
**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can
|
||||
accept image and text inputs and produce text outputs. While less capable than
|
||||
@@ -167,8 +233,9 @@ more than 1/1,000th the compute of GPT-4.
|
||||
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
|
||||
- **Published Date:** 2023-01-24
|
||||
- **URL:** http://arxiv.org/abs/2301.10226v4
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
|
||||
- **API Reference:** [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Potential harms of large language models can be mitigated by watermarking
|
||||
model output, i.e., embedding signals into generated text that are invisible to
|
||||
@@ -191,8 +258,10 @@ family, and discuss robustness and security.
|
||||
- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
|
||||
- **Published Date:** 2022-12-20
|
||||
- **URL:** http://arxiv.org/abs/2212.10496v1
|
||||
- **LangChain Documentation:** [docs/use_cases/query_analysis/techniques/hyde](https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde)
|
||||
- **LangChain API Reference:** [langchain.chains.hyde.base.HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
|
||||
|
||||
**Abstract:** While dense retrieval has been shown effective and efficient across tasks and
|
||||
languages, it remains difficult to create effective fully zero-shot dense
|
||||
@@ -212,35 +281,6 @@ state-of-the-art unsupervised dense retriever Contriever and shows strong
|
||||
performance comparable to fine-tuned retrievers, across various tasks (e.g. web
|
||||
search, QA, fact verification) and languages~(e.g. sw, ko, ja).
|
||||
|
||||
## Constitutional AI: Harmlessness from AI Feedback
|
||||
|
||||
- **arXiv id:** 2212.08073v1
|
||||
- **Title:** Constitutional AI: Harmlessness from AI Feedback
|
||||
- **Authors:** Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al.
|
||||
- **Published Date:** 2022-12-15
|
||||
- **URL:** http://arxiv.org/abs/2212.08073v1
|
||||
- **LangChain Documentation:** [docs/guides/productionization/evaluation/string/criteria_eval_chain](https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain)
|
||||
|
||||
|
||||
**Abstract:** As AI systems become more capable, we would like to enlist their help to
|
||||
supervise other AIs. We experiment with methods for training a harmless AI
|
||||
assistant through self-improvement, without any human labels identifying
|
||||
harmful outputs. The only human oversight is provided through a list of rules
|
||||
or principles, and so we refer to the method as 'Constitutional AI'. The
|
||||
process involves both a supervised learning and a reinforcement learning phase.
|
||||
In the supervised phase we sample from an initial model, then generate
|
||||
self-critiques and revisions, and then finetune the original model on revised
|
||||
responses. In the RL phase, we sample from the finetuned model, use a model to
|
||||
evaluate which of the two samples is better, and then train a preference model
|
||||
from this dataset of AI preferences. We then train with RL using the preference
|
||||
model as the reward signal, i.e. we use 'RL from AI Feedback' (RLAIF). As a
|
||||
result we are able to train a harmless but non-evasive AI assistant that
|
||||
engages with harmful queries by explaining its objections to them. Both the SL
|
||||
and RL methods can leverage chain-of-thought style reasoning to improve the
|
||||
human-judged performance and transparency of AI decision making. These methods
|
||||
make it possible to control AI behavior more precisely and with far fewer human
|
||||
labels.
|
||||
|
||||
## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
|
||||
|
||||
- **arXiv id:** 2212.07425v3
|
||||
@@ -248,8 +288,9 @@ labels.
|
||||
- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
|
||||
- **Published Date:** 2022-12-12
|
||||
- **URL:** http://arxiv.org/abs/2212.07425v3
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
- **API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
|
||||
**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been
|
||||
amplified in the Internet era. Given the volume of data and the subtlety of
|
||||
@@ -280,8 +321,9 @@ further work on logical fallacy identification.
|
||||
- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
|
||||
- **Published Date:** 2022-11-25
|
||||
- **URL:** http://arxiv.org/abs/2211.13892v2
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
- **API Reference:** [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
|
||||
learning from explanations in prompts, but there has been limited understanding
|
||||
@@ -307,8 +349,9 @@ performance across three real-world tasks on multiple LLMs.
|
||||
- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
|
||||
- **Published Date:** 2022-11-18
|
||||
- **URL:** http://arxiv.org/abs/2211.10435v2
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_experimental.pal_chain.base.PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
- **API Reference:** [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
|
||||
to perform arithmetic and symbolic reasoning tasks, when provided with a few
|
||||
@@ -340,8 +383,9 @@ publicly available at http://reasonwithpal.com/ .
|
||||
- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
|
||||
- **Published Date:** 2022-09-22
|
||||
- **URL:** http://arxiv.org/abs/2209.10785v2
|
||||
- **LangChain Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
|
||||
**Abstract:** Traditional data lakes provide critical data infrastructure for analytical
|
||||
workloads by enabling time travel, running SQL queries, ingesting data with
|
||||
@@ -367,8 +411,9 @@ TensorFlow, JAX, and integrate with numerous MLOps tools.
|
||||
- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
|
||||
- **Published Date:** 2022-05-25
|
||||
- **URL:** http://arxiv.org/abs/2205.12654v1
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_community.embeddings.laser.LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
- **API Reference:** [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
|
||||
**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
|
||||
languages is challenging, in particular to cover the long tail of low-resource
|
||||
@@ -395,8 +440,9 @@ encoders, mine bitexts, and validate the bitexts by training NMT systems.
|
||||
- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
|
||||
- **Published Date:** 2022-03-15
|
||||
- **URL:** http://arxiv.org/abs/2204.00498v1
|
||||
- **LangChain Documentation:** [docs/use_cases/sql/quickstart](https://python.langchain.com/docs/use_cases/sql/quickstart)
|
||||
- **LangChain API Reference:** [langchain_community.utilities.sql_database.SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities.spark_sql.SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
|
||||
|
||||
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
|
||||
language model. We find that, without any finetuning, Codex is a strong
|
||||
@@ -413,8 +459,9 @@ few-shot examples.
|
||||
- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
|
||||
- **Published Date:** 2022-02-01
|
||||
- **URL:** http://arxiv.org/abs/2202.00666v5
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Today's probabilistic language generators fall short when it comes to
|
||||
producing coherent and fluent text despite the fact that the underlying models
|
||||
@@ -444,8 +491,9 @@ reducing degenerate repetitions.
|
||||
- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
|
||||
- **Published Date:** 2021-02-26
|
||||
- **URL:** http://arxiv.org/abs/2103.00020v1
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
- **API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
|
||||
**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set
|
||||
of predetermined object categories. This restricted form of supervision limits
|
||||
@@ -475,8 +523,9 @@ https://github.com/OpenAI/CLIP.
|
||||
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
|
||||
- **Published Date:** 2019-09-11
|
||||
- **URL:** http://arxiv.org/abs/1909.05858v2
|
||||
- **LangChain:**
|
||||
|
||||
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Large-scale language models show promising text generation capabilities, but
|
||||
users cannot easily control particular aspects of the generated text. We
|
||||
@@ -497,8 +546,9 @@ full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
|
||||
- **Authors:** Nils Reimers, Iryna Gurevych
|
||||
- **Published Date:** 2019-08-27
|
||||
- **URL:** http://arxiv.org/abs/1908.10084v1
|
||||
- **LangChain Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new
|
||||
state-of-the-art performance on sentence-pair regression tasks like semantic
|
||||
|
||||
@@ -1,137 +1,63 @@
|
||||
# YouTube videos
|
||||
|
||||
⛓ icon marks a new addition [last update 2023-09-21]
|
||||
[Updated 2024-05-16]
|
||||
|
||||
### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)
|
||||
|
||||
### Introduction to LangChain with Harrison Chase, creator of LangChain
|
||||
- [Building the Future with LLMs, `LangChain`, & `Pinecone`](https://youtu.be/nMniwlGyX-c) by [Pinecone](https://www.youtube.com/@pinecone-io)
|
||||
- [LangChain and Weaviate with Harrison Chase and Bob van Luijt - Weaviate Podcast #36](https://youtu.be/lhby7Ql7hbk) by [Weaviate • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [LangChain Demo + Q&A with Harrison Chase](https://youtu.be/zaYTXQFR0_s?t=788) by [Full Stack Deep Learning](https://www.youtube.com/@The_Full_Stack)
|
||||
- [LangChain Agents: Build Personal Assistants For Your Data (Q&A with Harrison Chase and Mayo Oshin)](https://youtu.be/gVkF8cwfBLI) by [Chat with data](https://www.youtube.com/@chatwithdata)
|
||||
### [Tutorials on YouTube](/docs/additional_resources/tutorials/#tutorials)
|
||||
|
||||
## Videos (sorted by views)
|
||||
|
||||
- [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain OpenAI API)](https://youtu.be/9AXP7tCI9PI) by [TechLead](https://www.youtube.com/@TechLead)
|
||||
- [First look - `ChatGPT` + `WolframAlpha` (`GPT-3.5` and Wolfram|Alpha via LangChain by James Weaver)](https://youtu.be/wYGbY811oMo) by [Dr Alan D. Thompson](https://www.youtube.com/@DrAlanDThompson)
|
||||
- [LangChain explained - The hottest new Python framework](https://youtu.be/RoR4XJw8wIc) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- [Chatbot with INFINITE MEMORY using `OpenAI` & `Pinecone` - `GPT-3`, `Embeddings`, `ADA`, `Vector DB`, `Semantic`](https://youtu.be/2xNzB7xq8nk) by [David Shapiro ~ AI](https://www.youtube.com/@DaveShap)
|
||||
- [LangChain for LLMs is... basically just an Ansible playbook](https://youtu.be/X51N9C-OhlE) by [David Shapiro ~ AI](https://www.youtube.com/@DaveShap)
|
||||
- [Build your own LLM Apps with LangChain & `GPT-Index`](https://youtu.be/-75p09zFUJY) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [`BabyAGI` - New System of Autonomous AI Agents with LangChain](https://youtu.be/lg3kJvf1kXo) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [Run `BabyAGI` with Langchain Agents (with Python Code)](https://youtu.be/WosPGHPObx8) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [How to Use Langchain With `Zapier` | Write and Send Email with GPT-3 | OpenAI API Tutorial](https://youtu.be/p9v2-xEa9A0) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [Use Your Locally Stored Files To Get Response From GPT - `OpenAI` | Langchain | Python](https://youtu.be/NC1Ni9KS-rk) by [Shweta Lodha](https://www.youtube.com/@shweta-lodha)
|
||||
- [`Langchain JS` | How to Use GPT-3, GPT-4 to Reference your own Data | `OpenAI Embeddings` Intro](https://youtu.be/veV2I-NEjaM) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [The easiest way to work with large language models | Learn LangChain in 10min](https://youtu.be/kmbS6FDQh7c) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [4 Autonomous AI Agents: “Westworld” simulation `BabyAGI`, `AutoGPT`, `Camel`, `LangChain`](https://youtu.be/yWbnH6inT_U) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [AI CAN SEARCH THE INTERNET? Langchain Agents + OpenAI ChatGPT](https://youtu.be/J-GL0htqda8) by [tylerwhatsgood](https://www.youtube.com/@tylerwhatsgood)
|
||||
- [Query Your Data with GPT-4 | Embeddings, Vector Databases | Langchain JS Knowledgebase](https://youtu.be/jRnUPUTkZmU) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [`Weaviate` + LangChain for LLM apps presented by Erika Cardenas](https://youtu.be/7AGj4Td5Lgw) by [`Weaviate` • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [Langchain Overview — How to Use Langchain & `ChatGPT`](https://youtu.be/oYVYIq0lOtI) by [Python In Office](https://www.youtube.com/@pythoninoffice6568)
|
||||
- [Langchain Overview - How to Use Langchain & `ChatGPT`](https://youtu.be/oYVYIq0lOtI) by [Python In Office](https://www.youtube.com/@pythoninoffice6568)
|
||||
- [LangChain Tutorials](https://www.youtube.com/watch?v=FuqdVNB_8c0&list=PL9V0lbeJ69brU-ojMpU1Y7Ic58Tap0Cw6) by [Edrick](https://www.youtube.com/@edrickdch):
|
||||
- [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- [LangChain 101: The Complete Beginner's Guide](https://youtu.be/P3MAbZ2eMUI)
|
||||
- [Custom langchain Agent & Tools with memory. Turn any `Python function` into langchain tool with Gpt 3](https://youtu.be/NIG8lXk0ULg) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [Building AI LLM Apps with LangChain (and more?) - LIVE STREAM](https://www.youtube.com/live/M-2Cj_2fzWI?feature=share) by [Nicholas Renotte](https://www.youtube.com/@NicholasRenotte)
|
||||
- [`ChatGPT` with any `YouTube` video using langchain and `chromadb`](https://youtu.be/TQZfB2bzVwU) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [How to Talk to a `PDF` using LangChain and `ChatGPT`](https://youtu.be/v2i1YDtrIwk) by [Automata Learning Lab](https://www.youtube.com/@automatalearninglab)
|
||||
- [Langchain Document Loaders Part 1: Unstructured Files](https://youtu.be/O5C0wfsen98) by [Merk](https://www.youtube.com/@heymichaeldaigler)
|
||||
- [LangChain - Prompt Templates (what all the best prompt engineers use)](https://youtu.be/1aRu8b0XNOQ) by [Nick Daigler](https://www.youtube.com/@nickdaigler)
|
||||
- [LangChain. Crear aplicaciones Python impulsadas por GPT](https://youtu.be/DkW_rDndts8) by [Jesús Conde](https://www.youtube.com/@0utKast)
|
||||
- [Easiest Way to Use GPT In Your Products | LangChain Basics Tutorial](https://youtu.be/fLy0VenZyGc) by [Rachel Woods](https://www.youtube.com/@therachelwoods)
|
||||
- [`BabyAGI` + `GPT-4` Langchain Agent with Internet Access](https://youtu.be/wx1z_hs5P6E) by [tylerwhatsgood](https://www.youtube.com/@tylerwhatsgood)
|
||||
- [Learning LLM Agents. How does it actually work? LangChain, AutoGPT & OpenAI](https://youtu.be/mb_YAABSplk) by [Arnoldas Kemeklis](https://www.youtube.com/@processusAI)
|
||||
- [Get Started with LangChain in `Node.js`](https://youtu.be/Wxx1KUWJFv4) by [Developers Digest](https://www.youtube.com/@DevelopersDigest)
|
||||
- [LangChain + `OpenAI` tutorial: Building a Q&A system w/ own text data](https://youtu.be/DYOU_Z0hAwo) by [Samuel Chan](https://www.youtube.com/@SamuelChan)
|
||||
- [Langchain + `Zapier` Agent](https://youtu.be/yribLAb-pxA) by [Merk](https://www.youtube.com/@heymichaeldaigler)
|
||||
- [Connecting the Internet with `ChatGPT` (LLMs) using Langchain And Answers Your Questions](https://youtu.be/9Y0TBC63yZg) by [Kamalraj M M](https://www.youtube.com/@insightbuilder)
|
||||
- [Build More Powerful LLM Applications for Business’s with LangChain (Beginners Guide)](https://youtu.be/sp3-WLKEcBg) by[ No Code Blackbox](https://www.youtube.com/@nocodeblackbox)
|
||||
- [LangFlow LLM Agent Demo for 🦜🔗LangChain](https://youtu.be/zJxDHaWt-6o) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [Chatbot Factory: Streamline Python Chatbot Creation with LLMs and Langchain](https://youtu.be/eYer3uzrcuM) by [Finxter](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [LangChain Tutorial - ChatGPT mit eigenen Daten](https://youtu.be/0XDLyY90E2c) by [Coding Crashkurse](https://www.youtube.com/@codingcrashkurse6429)
|
||||
- [Chat with a `CSV` | LangChain Agents Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [GoDataProf](https://www.youtube.com/@godataprof)
|
||||
- [Introdução ao Langchain - #Cortes - Live DataHackers](https://youtu.be/fw8y5VRei5Y) by [Prof. João Gabriel Lima](https://www.youtube.com/@profjoaogabriellima)
|
||||
- [LangChain: Level up `ChatGPT` !? | LangChain Tutorial Part 1](https://youtu.be/vxUGx8aZpDE) by [Code Affinity](https://www.youtube.com/@codeaffinitydev)
|
||||
- [KI schreibt krasses Youtube Skript 😲😳 | LangChain Tutorial Deutsch](https://youtu.be/QpTiXyK1jus) by [SimpleKI](https://www.youtube.com/@simpleki)
|
||||
- [Chat with Audio: Langchain, `Chroma DB`, OpenAI, and `Assembly AI`](https://youtu.be/Kjy7cx1r75g) by [AI Anytime](https://www.youtube.com/@AIAnytime)
|
||||
- [QA over documents with Auto vector index selection with Langchain router chains](https://youtu.be/9G05qybShv8) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [Build your own custom LLM application with `Bubble.io` & Langchain (No Code & Beginner friendly)](https://youtu.be/O7NhQGu1m6c) by [No Code Blackbox](https://www.youtube.com/@nocodeblackbox)
|
||||
- [Simple App to Question Your Docs: Leveraging `Streamlit`, `Hugging Face Spaces`, LangChain, and `Claude`!](https://youtu.be/X4YbNECRr7o) by [Chris Alexiuk](https://www.youtube.com/@chrisalexiuk)
|
||||
- [LANGCHAIN AI- `ConstitutionalChainAI` + Databutton AI ASSISTANT Web App](https://youtu.be/5zIU6_rdJCU) by [Avra](https://www.youtube.com/@Avra_b)
|
||||
- [LANGCHAIN AI AUTONOMOUS AGENT WEB APP - 👶 `BABY AGI` 🤖 with EMAIL AUTOMATION using `DATABUTTON`](https://youtu.be/cvAwOGfeHgw) by [Avra](https://www.youtube.com/@Avra_b)
|
||||
- [The Future of Data Analysis: Using A.I. Models in Data Analysis (LangChain)](https://youtu.be/v_LIcVyg5dk) by [Absent Data](https://www.youtube.com/@absentdata)
|
||||
- [Memory in LangChain | Deep dive (python)](https://youtu.be/70lqvTFh_Yg) by [Eden Marco](https://www.youtube.com/@EdenMarco)
|
||||
- [9 LangChain UseCases | Beginner's Guide | 2023](https://youtu.be/zS8_qosHNMw) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [Use Large Language Models in Jupyter Notebook | LangChain | Agents & Indexes](https://youtu.be/JSe11L1a_QQ) by [Abhinaw Tiwari](https://www.youtube.com/@AbhinawTiwariAT)
|
||||
- [How to Talk to Your Langchain Agent | `11 Labs` + `Whisper`](https://youtu.be/N4k459Zw2PU) by [VRSEN](https://www.youtube.com/@vrsen)
|
||||
- [LangChain Deep Dive: 5 FUN AI App Ideas To Build Quickly and Easily](https://youtu.be/mPYEPzLkeks) by [James NoCode](https://www.youtube.com/@jamesnocode)
|
||||
- [LangChain 101: Models](https://youtu.be/T6c_XsyaNSQ) by [Mckay Wrigley](https://www.youtube.com/@realmckaywrigley)
|
||||
- [LangChain with JavaScript Tutorial #1 | Setup & Using LLMs](https://youtu.be/W3AoeMrg27o) by [Leon van Zyl](https://www.youtube.com/@leonvanzyl)
|
||||
- [LangChain Overview & Tutorial for Beginners: Build Powerful AI Apps Quickly & Easily (ZERO CODE)](https://youtu.be/iI84yym473Q) by [James NoCode](https://www.youtube.com/@jamesnocode)
|
||||
- [LangChain In Action: Real-World Use Case With Step-by-Step Tutorial](https://youtu.be/UO699Szp82M) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- [Summarizing and Querying Multiple Papers with LangChain](https://youtu.be/p_MQRWH5Y6k) by [Automata Learning Lab](https://www.youtube.com/@automatalearninglab)
|
||||
- [Using Langchain (and `Replit`) through `Tana`, ask `Google`/`Wikipedia`/`Wolfram Alpha` to fill out a table](https://youtu.be/Webau9lEzoI) by [Stian Håklev](https://www.youtube.com/@StianHaklev)
|
||||
- [Langchain PDF App (GUI) | Create a ChatGPT For Your `PDF` in Python](https://youtu.be/wUAUdEw5oxM) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Auto-GPT with LangChain 🔥 | Create Your Own Personal AI Assistant](https://youtu.be/imDfPmMKEjM) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [Create Your OWN Slack AI Assistant with Python & LangChain](https://youtu.be/3jFXRNn2Bu8) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- [How to Create LOCAL Chatbots with GPT4All and LangChain [Full Guide]](https://youtu.be/4p1Fojur8Zw) by [Liam Ottley](https://www.youtube.com/@LiamOttley)
|
||||
- [Build a `Multilingual PDF` Search App with LangChain, `Cohere` and `Bubble`](https://youtu.be/hOrtuumOrv8) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [Building a LangChain Agent (code-free!) Using `Bubble` and `Flowise`](https://youtu.be/jDJIIVWTZDE) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [Build a LangChain-based Semantic PDF Search App with No-Code Tools Bubble and Flowise](https://youtu.be/s33v5cIeqA4) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [LangChain Memory Tutorial | Building a ChatGPT Clone in Python](https://youtu.be/Cwq91cj2Pnc) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [ChatGPT For Your DATA | Chat with Multiple Documents Using LangChain](https://youtu.be/TeDgIDqQmzs) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [`Llama Index`: Chat with Documentation using URL Loader](https://youtu.be/XJRoDEctAwA) by [Merk](https://www.youtube.com/@heymichaeldaigler)
|
||||
- [Using OpenAI, LangChain, and `Gradio` to Build Custom GenAI Applications](https://youtu.be/1MsmqMg3yUc) by [David Hundley](https://www.youtube.com/@dkhundley)
|
||||
- [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- [Build AI chatbot with custom knowledge base using OpenAI API and GPT Index](https://youtu.be/vDZAZuaXf48) by [Irina Nik](https://www.youtube.com/@irina_nik)
|
||||
- [Build Your Own Auto-GPT Apps with LangChain (Python Tutorial)](https://youtu.be/NYSWn1ipbgg) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Chat with a `CSV` | `LangChain Agents` Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Create Your Own ChatGPT with `PDF` Data in 5 Minutes (LangChain Tutorial)](https://youtu.be/au2WVVGUvc8) by [Liam Ottley](https://www.youtube.com/@LiamOttley)
|
||||
- [Build a Custom Chatbot with OpenAI: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU) by [Fabrikod](https://www.youtube.com/@fabrikod)
|
||||
- [`Flowise` is an open-source no-code UI visual tool to build 🦜🔗LangChain applications](https://youtu.be/CovAPtQPU0k) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [LangChain & GPT 4 For Data Analysis: The `Pandas` Dataframe Agent](https://youtu.be/rFQ5Kmkd4jc) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw) by [Girlfriend GPT](https://www.youtube.com/@girlfriendGPT)
|
||||
- [How to build with Langchain 10x easier | ⛓️ LangFlow & `Flowise`](https://youtu.be/Ya1oGL7ZTvU) by [AI Jason](https://www.youtube.com/@AIJasonZ)
|
||||
- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg) by [Krish Naik](https://www.youtube.com/@krishnaik06)
|
||||
- ⛓ [Vector Embeddings Tutorial – Code Your Own AI Assistant with `GPT-4 API` + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=5uJhxoh2tvdnOXok) by [FreeCodeCamp.org](https://www.youtube.com/@freecodecamp)
|
||||
- ⛓ [Fully LOCAL `Llama 2` Q&A with LangChain](https://youtu.be/wgYctKFnQ74?si=UX1F3W-B3MqF4-K-) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- ⛓ [Fully LOCAL `Llama 2` Langchain on CPU](https://youtu.be/yhECvKMu8kM?si=IvjxwlA1c09VwHZ4) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- ⛓ [Build LangChain Audio Apps with Python in 5 Minutes](https://youtu.be/7w7ysaDz2W4?si=BvdMiyHhormr2-vr) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- ⛓ [`Voiceflow` & `Flowise`: Want to Beat Competition? New Tutorial with Real AI Chatbot](https://youtu.be/EZKkmeFwag0?si=-4dETYDHEstiK_bb) by [AI SIMP](https://www.youtube.com/@aisimp)
|
||||
- ⛓ [THIS Is How You Build Production-Ready AI Apps (`LangSmith` Tutorial)](https://youtu.be/tFXm5ijih98?si=lfiqpyaivxHFyI94) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- ⛓ [Build POWERFUL LLM Bots EASILY with Your Own Data - `Embedchain` - Langchain 2.0? (Tutorial)](https://youtu.be/jE24Y_GasE8?si=0yEDZt3BK5Q-LIuF) by [WorldofAI](https://www.youtube.com/@intheworldofai)
|
||||
- ⛓ [`Code Llama` powered Gradio App for Coding: Runs on CPU](https://youtu.be/AJOhV6Ryy5o?si=ouuQT6IghYlc1NEJ) by [AI Anytime](https://www.youtube.com/@AIAnytime)
|
||||
- ⛓ [LangChain Complete Course in One Video | Develop LangChain (AI) Based Solutions for Your Business](https://youtu.be/j9mQd-MyIg8?si=_wlNT3nP2LpDKztZ) by [UBprogrammer](https://www.youtube.com/@UBprogrammer)
|
||||
- ⛓ [How to Run `LLaMA` Locally on CPU or GPU | Python & Langchain & CTransformers Guide](https://youtu.be/SvjWDX2NqiM?si=DxFml8XeGhiLTzLV) by [Code With Prince](https://www.youtube.com/@CodeWithPrince)
|
||||
- ⛓ [PyData Heidelberg #11 - TimeSeries Forecasting & LLM Langchain](https://www.youtube.com/live/Glbwb5Hxu18?si=PIEY8Raq_C9PCHuW) by [PyData](https://www.youtube.com/@PyDataTV)
|
||||
- ⛓ [Prompt Engineering in Web Development | Using LangChain and Templates with OpenAI](https://youtu.be/pK6WzlTOlYw?si=fkcDQsBG2h-DM8uQ) by [Akamai Developer
|
||||
](https://www.youtube.com/@AkamaiDeveloper)
|
||||
- ⛓ [Retrieval-Augmented Generation (RAG) using LangChain and `Pinecone` - The RAG Special Episode](https://youtu.be/J_tCD_J6w3s?si=60Mnr5VD9UED9bGG) by [Generative AI and Data Science On AWS](https://www.youtube.com/@GenerativeAIOnAWS)
|
||||
- ⛓ [`LLAMA2 70b-chat` Multiple Documents Chatbot with Langchain & Streamlit |All OPEN SOURCE|Replicate API](https://youtu.be/vhghB81vViM?si=dszzJnArMeac7lyc) by [DataInsightEdge](https://www.youtube.com/@DataInsightEdge01)
|
||||
- ⛓ [Chatting with 44K Fashion Products: LangChain Opportunities and Pitfalls](https://youtu.be/Zudgske0F_s?si=8HSshHoEhh0PemJA) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- ⛓ [Structured Data Extraction from `ChatGPT` with LangChain](https://youtu.be/q1lYg8JISpQ?si=0HctzOHYZvq62sve) by [MG](https://www.youtube.com/@MG_cafe)
|
||||
- ⛓ [Chat with Multiple PDFs using `Llama 2`, `Pinecone` and LangChain (Free LLMs and Embeddings)](https://youtu.be/TcJ_tVSGS4g?si=FZYnMDJyoFfL3Z2i) by [Muhammad Moin](https://www.youtube.com/@muhammadmoinfaisal)
|
||||
- ⛓ [Integrate Audio into `LangChain.js` apps in 5 Minutes](https://youtu.be/hNpUSaYZIzs?si=Gb9h7W9A8lzfvFKi) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- ⛓ [`ChatGPT` for your data with Local LLM](https://youtu.be/bWrjpwhHEMU?si=uM6ZZ18z9og4M90u) by [Jacob Jedryszek](https://www.youtube.com/@jj09)
|
||||
- ⛓ [Training `Chatgpt` with your personal data using langchain step by step in detail](https://youtu.be/j3xOMde2v9Y?si=179HsiMU-hEPuSs4) by [NextGen Machines](https://www.youtube.com/@MayankGupta-kb5yc)
|
||||
- ⛓ [Use ANY language in `LangSmith` with REST](https://youtu.be/7BL0GEdMmgY?si=iXfOEdBLqXF6hqRM) by [Nerding I/O](https://www.youtube.com/@nerding_io)
|
||||
- ⛓ [How to Leverage the Full Potential of LLMs for Your Business with Langchain - Leon Ruddat](https://youtu.be/vZmoEa7oWMg?si=ZhMmydq7RtkZd56Q) by [PyData](https://www.youtube.com/@PyDataTV)
|
||||
- ⛓ [`ChatCSV` App: Chat with CSV files using LangChain and `Llama 2`](https://youtu.be/PvsMg6jFs8E?si=Qzg5u5gijxj933Ya) by [Muhammad Moin](https://www.youtube.com/@muhammadmoinfaisal)
|
||||
- ⛓ [Build Chat PDF app in Python with LangChain, OpenAI, Streamlit | Full project | Learn Coding](https://www.youtube.com/watch?v=WYzFzZg4YZI) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
- ⛓ [Build Eminem Bot App with LangChain, Streamlit, OpenAI | Full Python Project | Tutorial | AI ChatBot](https://www.youtube.com/watch?v=a2shHB4MRZ4) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
|
||||
|
||||
### [Prompt Engineering and LangChain](https://www.youtube.com/watch?v=muXbPpG_ys4&list=PLEJK-H61Xlwzm5FYLDdKt_6yibO33zoMW) by [Venelin Valkov](https://www.youtube.com/@venelin_valkov)
|
||||
- [Getting Started with LangChain: Load Custom Data, Run OpenAI Models, Embeddings and `ChatGPT`](https://www.youtube.com/watch?v=muXbPpG_ys4)
|
||||
- [Loaders, Indexes & Vectorstores in LangChain: Question Answering on `PDF` files with `ChatGPT`](https://www.youtube.com/watch?v=FQnvfR8Dmr0)
|
||||
- [LangChain Models: `ChatGPT`, `Flan Alpaca`, `OpenAI Embeddings`, Prompt Templates & Streaming](https://www.youtube.com/watch?v=zy6LiK5F5-s)
|
||||
- [LangChain Chains: Use `ChatGPT` to Build Conversational Agents, Summaries and Q&A on Text With LLMs](https://www.youtube.com/watch?v=h1tJZQPcimM)
|
||||
- [Analyze Custom CSV Data with `GPT-4` using Langchain](https://www.youtube.com/watch?v=Ew3sGdX8at4)
|
||||
- [Build ChatGPT Chatbots with LangChain Memory: Understanding and Implementing Memory in Conversations](https://youtu.be/CyuUlf54wTs)
|
||||
Only videos with 40K+ views:
|
||||
|
||||
- [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)
|
||||
- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)
|
||||
- [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)
|
||||
- [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)
|
||||
- [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)
|
||||
- [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)
|
||||
- [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)
|
||||
- [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)
|
||||
- [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)
|
||||
- [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)
|
||||
- [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)
|
||||
- [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)
|
||||
- [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)
|
||||
- [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)
|
||||
- [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)
|
||||
- [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)
|
||||
- [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)
|
||||
- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)
|
||||
- [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)
|
||||
- [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)
|
||||
- [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)
|
||||
- [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)
|
||||
- [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)
|
||||
- [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)
|
||||
- [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)
|
||||
- [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)
|
||||
- [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)
|
||||
- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)
|
||||
- [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)
|
||||
- [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)
|
||||
- [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)
|
||||
- [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)
|
||||
- [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)
|
||||
- [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)
|
||||
- [Prompt Engineering And LLM's With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)
|
||||
- [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)
|
||||
- [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)
|
||||
- [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)
|
||||
- [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)
|
||||
- [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)
|
||||
- [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)
|
||||
- [What's next for AI agents ft. LangChain's Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)
|
||||
- [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)
|
||||
- [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)
|
||||
- [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)
|
||||
- [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)
|
||||
- [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)
|
||||
- [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)
|
||||
|
||||
---------------------
|
||||
⛓ icon marks a new addition [last update 2024-02-04]
|
||||
[Updated 2024-05-16]
|
||||
|
||||
@@ -33,7 +33,7 @@ Key partner packages are separated out (see below).
|
||||
This contains all integrations for various components (LLMs, vectorstores, retrievers).
|
||||
All dependencies in this package are optional to keep the package as lightweight as possible.
|
||||
|
||||
### [`langgraph`](/docs/langgraph)
|
||||
### [`langgraph`](https://langchain-ai.github.io/langgraph)
|
||||
|
||||
`langgraph` is an extension of `langchain` aimed at
|
||||
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
@@ -44,7 +44,7 @@ LangGraph exposes high level interfaces for creating common types of agents, as
|
||||
|
||||
A package to deploy LangChain chains as REST APIs. Makes it easy to get a production ready API up and running.
|
||||
|
||||
### [LangSmith](/docs/langsmith)
|
||||
### [LangSmith](https://docs.smith.langchain.com)
|
||||
|
||||
A developer platform that lets you debug, test, evaluate, and monitor LLM applications.
|
||||
|
||||
@@ -66,7 +66,7 @@ LCEL was designed from day 1 to **support putting prototypes in production, with
|
||||
When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens.
|
||||
|
||||
**Async support**
|
||||
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langsmith) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
|
||||
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langserve/) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
|
||||
|
||||
**Optimized parallel execution**
|
||||
Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it, both in the sync and the async interfaces, for the smallest possible latency.
|
||||
@@ -80,9 +80,9 @@ For more complex chains it’s often very useful to access the results of interm
|
||||
**Input and output schemas**
|
||||
Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas inferred from the structure of your chain. This can be used for validation of inputs and outputs, and is an integral part of LangServe.
|
||||
|
||||
[**Seamless LangSmith tracing**](/docs/langsmith)
|
||||
[**Seamless LangSmith tracing**](https://docs.smith.langchain.com)
|
||||
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
|
||||
With LCEL, **all** steps are automatically logged to [LangSmith](/docs/langsmith/) for maximum observability and debuggability.
|
||||
With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability.
|
||||
|
||||
[**Seamless LangServe deployment**](/docs/langserve)
|
||||
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
|
||||
@@ -174,7 +174,7 @@ The `content` property describes the content of the message.
|
||||
This can be a few different things:
|
||||
|
||||
- A string (most models deal this type of content)
|
||||
- A List of dictionaries (this is used for multi-modal input, where the dictionary contains information about that input type and that input location)
|
||||
- A List of dictionaries (this is used for multimodal input, where the dictionary contains information about that input type and that input location)
|
||||
|
||||
#### HumanMessage
|
||||
|
||||
@@ -476,6 +476,82 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how
|
||||
It is recommended, however, that you start to transition to LangGraph.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent)
|
||||
|
||||
### Multimodal
|
||||
|
||||
Some models are multimodal, accepting images, audio and even video as inputs. These are still less common, meaning model providers haven't standardized on the "best" way to define the API. Multimodal **outputs** are even less common. As such, we've kept our multimodal abstractions fairly light weight and plan to further solidify the multimodal APIs and interaction patterns as the field matures.
|
||||
|
||||
In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
|
||||
|
||||
### Callbacks
|
||||
|
||||
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
|
||||
|
||||
You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail.
|
||||
|
||||
#### Callback Events
|
||||
|
||||
| Event | Event Trigger | Associated Method |
|
||||
|------------------|---------------------------------------------|-----------------------|
|
||||
| Chat model start | When a chat model starts | `on_chat_model_start` |
|
||||
| LLM start | When a llm starts | `on_llm_start` |
|
||||
| LLM new token | When an llm OR chat model emits a new token | `on_llm_new_token` |
|
||||
| LLM ends | When an llm OR chat model ends | `on_llm_end` |
|
||||
| LLM errors | When an llm OR chat model errors | `on_llm_error` |
|
||||
| Chain start | When a chain starts running | `on_chain_start` |
|
||||
| Chain end | When a chain ends | `on_chain_end` |
|
||||
| Chain error | When a chain errors | `on_chain_error` |
|
||||
| Tool start | When a tool starts running | `on_tool_start` |
|
||||
| Tool end | When a tool ends | `on_tool_end` |
|
||||
| Tool error | When a tool errors | `on_tool_error` |
|
||||
| Agent action | When an agent takes an action | `on_agent_action` |
|
||||
| Agent finish | When an agent ends | `on_agent_finish` |
|
||||
| Retriever start | When a retriever starts | `on_retriever_start` |
|
||||
| Retriever end | When a retriever ends | `on_retriever_end` |
|
||||
| Retriever error | When a retriever errors | `on_retriever_error` |
|
||||
| Text | When arbitrary text is run | `on_text` |
|
||||
| Retry | When a retry event is run | `on_retry` |
|
||||
|
||||
#### Callback handlers
|
||||
|
||||
Callback handlers can either be `sync` or `async`:
|
||||
|
||||
* Sync callback handlers implement the [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface.
|
||||
* Async callback handlers implement the [AsyncCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface.
|
||||
|
||||
During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered.
|
||||
|
||||
#### Passing callbacks
|
||||
|
||||
The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
|
||||
|
||||
The callbacks are available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
|
||||
|
||||
- **Request time callbacks**: Passed at the time of the request in addition to the input data.
|
||||
Available on all standard `Runnable` objects. These callbacks are INHERITED by all children
|
||||
of the object they are defined on. For example, `chain.invoke({"number": 25}, {"callbacks": [handler]})`.
|
||||
- **Constructor callbacks**: `chain = TheNameOfSomeChain(callbacks=[handler])`. These callbacks
|
||||
are passed as arguments to the constructor of the object. The callbacks are scoped
|
||||
only to the object they are defined on, and are **not** inherited by any children of the object.
|
||||
|
||||
:::warning
|
||||
Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children
|
||||
of the object.
|
||||
:::
|
||||
|
||||
If you're creating a custom chain or runnable, you need to remember to propagate request time
|
||||
callbacks to any child objects.
|
||||
|
||||
:::important Async in Python<=3.10
|
||||
|
||||
Any `RunnableLambda`, a `RunnableGenerator`, or `Tool` that invokes other runnables
|
||||
and is running async in python<=3.10, will have to propagate callbacks to child
|
||||
objects manually. This is because LangChain cannot automatically propagate
|
||||
callbacks to child objects in this case.
|
||||
|
||||
This is a common reason why you may fail to see events being emitted from custom
|
||||
runnables or tools.
|
||||
:::
|
||||
|
||||
## Techniques
|
||||
|
||||
### Function/tool calling
|
||||
@@ -572,3 +648,7 @@ Table columns:
|
||||
| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. |
|
||||
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
|
||||
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ Concepts covered in `Integrations` should generally exist in `langchain_communit
|
||||
|
||||
### Guides and Ecosystem
|
||||
|
||||
The [Guides](/docs/tutorials) and [Ecosystem](/docs/langsmith/) sections should contain guides that address higher-level problems than the sections above.
|
||||
The [Guides](/docs/tutorials) and [Ecosystem](https://docs.smith.langchain.com/) sections should contain guides that address higher-level problems than the sections above.
|
||||
This includes, but is not limited to, considerations around productionization and development workflows.
|
||||
|
||||
These should contain mostly **How-to guides**, **Explanations**, and **Tutorials**.
|
||||
|
||||
@@ -132,7 +132,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"html_string = \"\"\"\n",
|
||||
" <!DOCTYPE html>\n",
|
||||
|
||||
@@ -138,20 +138,10 @@
|
||||
"execution_count": 5,
|
||||
"id": "d9afb0ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/chestercurme/.pyenv/versions/3.10.4/envs/sandbox310/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 0.3.0. Use RunnableSequence, e.g., `prompt | llm` instead.\n",
|
||||
" warn_deprecated(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_core.output_parsers import BaseOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
@@ -180,7 +170,7 @@
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)\n",
|
||||
"llm_chain = QUERY_PROMPT | llm | output_parser\n",
|
||||
"\n",
|
||||
"# Other inputs\n",
|
||||
"question = \"What are the approaches to Task Decomposition?\""
|
||||
@@ -189,14 +179,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2eca2d96-8057-4ed9-873d-fa1064c09acf",
|
||||
"id": "59c75c56-dbd7-4887-b9ba-0b5b21069f51",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression analysis?', '4. What are the teachings of the course regarding regression?', '5. In what manner is regression covered in the course curriculum?']\n"
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression?', '4. In what way is regression covered in the course?', '5. What are the teachings of the course regarding regression?']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"id": "711752cb-4f15-42a3-9838-a0c67f397771",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to attach runtime arguments to a Runnable\n",
|
||||
"# How to add default invocation args to a Runnable\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
|
||||
179
docs/docs/how_to/callbacks_async.ipynb
Normal file
179
docs/docs/how_to/callbacks_async.ipynb
Normal file
@@ -0,0 +1,179 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use callbacks in async environments\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-warning}\n",
|
||||
"If you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-danger}\n",
|
||||
"\n",
|
||||
"If you're on `python<=3.10`, you need to remember to propagate `config` or `callbacks` when invoking other `runnable` from within a `RunnableLambda`, `RunnableGenerator` or `@tool`. If you do not do this,\n",
|
||||
"the callbacks will not be propagated to the child runnables being invoked.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"zzzz....\n",
|
||||
"Hi! I just woke up. Your llm is starting\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: Here\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: a\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: little\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: joke\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: for\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: you\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: :\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: \n",
|
||||
"\n",
|
||||
"Why\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: can\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: 't\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: a\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: bicycle\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: stan\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: d up\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: by\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: itself\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: ?\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: Because\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: it\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: two\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: -\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: tire\n",
|
||||
"zzzz....\n",
|
||||
"Hi! I just woke up. Your llm is ending\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", message=AIMessage(content=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", id='run-8afc89e8-02c0-4522-8480-d96977240bd4-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('8afc89e8-02c0-4522-8480-d96977240bd4'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomSyncHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
|
||||
" print(f\"Sync handler being called in a `thread_pool_executor`: token: {token}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomAsyncHandler(AsyncCallbackHandler):\n",
|
||||
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
|
||||
"\n",
|
||||
" async def on_llm_start(\n",
|
||||
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Run when chain starts running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.3)\n",
|
||||
" class_name = serialized[\"name\"]\n",
|
||||
" print(\"Hi! I just woke up. Your llm is starting\")\n",
|
||||
"\n",
|
||||
" async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
|
||||
" \"\"\"Run when chain ends running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.3)\n",
|
||||
" print(\"Hi! I just woke up. Your llm is ending\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
|
||||
"# Additionally, we pass in a list with our custom handler\n",
|
||||
"chat = ChatAnthropic(\n",
|
||||
" model=\"claude-3-sonnet-20240229\",\n",
|
||||
" max_tokens=25,\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"await chat.agenerate([[HumanMessage(content=\"Tell me a joke\")]])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to create your own custom callback handlers.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
149
docs/docs/how_to/callbacks_attach.ipynb
Normal file
149
docs/docs/how_to/callbacks_attach.ipynb
Normal file
@@ -0,0 +1,149 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to attach callbacks to a runnable\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence)\n",
|
||||
"- [Attach runtime arguments to a Runnable](/docs/how_to/binding)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"`with_config()` binds a configuration which will be interpreted as **runtime** configuration. So these callbacks will propagate to all child components.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chain RunnableSequence started\n",
|
||||
"Chain ChatPromptTemplate started\n",
|
||||
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
|
||||
"Chat model started\n",
|
||||
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'))]] llm_output={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
|
||||
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class LoggingHandler(BaseCallbackHandler):\n",
|
||||
" def on_chat_model_start(\n",
|
||||
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(\"Chat model started\")\n",
|
||||
"\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(f\"Chat model ended, response: {response}\")\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(f\"Chain {serialized.get('name')} started\")\n",
|
||||
"\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
|
||||
" print(f\"Chain ended, outputs: {outputs}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"callbacks = [LoggingHandler()]\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"chain_with_callbacks = chain.with_config(callbacks=callbacks)\n",
|
||||
"\n",
|
||||
"chain_with_callbacks.invoke({\"number\": \"2\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The bound callbacks will run for all nested module runs.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to attach callbacks to a chain.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as how to [pass callbacks in at runtime](/docs/how_to/callbacks_runtime)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
141
docs/docs/how_to/callbacks_constructor.ipynb
Normal file
141
docs/docs/how_to/callbacks_constructor.ipynb
Normal file
@@ -0,0 +1,141 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to propagate callbacks constructor\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Most LangChain modules allow you to pass `callbacks` directly into the constructor (i.e., initializer). In this case, the callbacks will only be called for that instance (and any nested runs).\n",
|
||||
"\n",
|
||||
":::{.callout-warning}\n",
|
||||
"Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children of the object. This can lead to confusing behavior,\n",
|
||||
"and it's generally better to pass callbacks as a run time argument.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chat model started\n",
|
||||
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0'))]] llm_output={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class LoggingHandler(BaseCallbackHandler):\n",
|
||||
" def on_chat_model_start(\n",
|
||||
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(\"Chat model started\")\n",
|
||||
"\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(f\"Chat model ended, response: {response}\")\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(f\"Chain {serialized.get('name')} started\")\n",
|
||||
"\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
|
||||
" print(f\"Chain ended, outputs: {outputs}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"callbacks = [LoggingHandler()]\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", callbacks=callbacks)\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"chain.invoke({\"number\": \"2\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can see that we only see events from the chat model run - no chain events from the prompt or broader chain.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to pass callbacks into a constructor.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as how to [pass callbacks at runtime](/docs/how_to/callbacks_runtime)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
140
docs/docs/how_to/callbacks_runtime.ipynb
Normal file
140
docs/docs/how_to/callbacks_runtime.ipynb
Normal file
@@ -0,0 +1,140 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass callbacks in at runtime\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
|
||||
"\n",
|
||||
"This prevents us from having to manually attach the handlers to each individual nested object. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chain RunnableSequence started\n",
|
||||
"Chain ChatPromptTemplate started\n",
|
||||
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
|
||||
"Chat model started\n",
|
||||
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'))]] llm_output={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
|
||||
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class LoggingHandler(BaseCallbackHandler):\n",
|
||||
" def on_chat_model_start(\n",
|
||||
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(\"Chat model started\")\n",
|
||||
"\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(f\"Chat model ended, response: {response}\")\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(f\"Chain {serialized.get('name')} started\")\n",
|
||||
"\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
|
||||
" print(f\"Chain ended, outputs: {outputs}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"callbacks = [LoggingHandler()]\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"chain.invoke({\"number\": \"2\"}, config={\"callbacks\": callbacks})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to pass callbacks at runtime.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as how to [pass callbacks into a module constructor](/docs/how_to/custom_callbacks)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -170,7 +170,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can do the same thing with a SQLite cache\n",
|
||||
"from langchain.cache import SQLiteCache\n",
|
||||
"from langchain_community.cache import SQLiteCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(SQLiteCache(database_path=\".langchain.db\"))"
|
||||
]
|
||||
|
||||
@@ -165,7 +165,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
|
||||
@@ -336,7 +336,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history_for_chain = ChatMessageHistory()\n",
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
@@ -312,8 +312,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
141
docs/docs/how_to/custom_callbacks.ipynb
Normal file
141
docs/docs/how_to/custom_callbacks.ipynb
Normal file
@@ -0,0 +1,141 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create custom callback handlers\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n",
|
||||
"\n",
|
||||
"To create a custom callback handler, we need to determine the [event(s)](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
|
||||
"\n",
|
||||
"In the example below, we'll implement streaming with a custom handler.\n",
|
||||
"\n",
|
||||
"In our custom callback handler `MyCustomHandler`, we implement the `on_llm_new_token` handler to print the token we have just received. We then attach our custom handler to the model object as a constructor callback."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"My custom handler, token: Here\n",
|
||||
"My custom handler, token: 's\n",
|
||||
"My custom handler, token: a\n",
|
||||
"My custom handler, token: bear\n",
|
||||
"My custom handler, token: joke\n",
|
||||
"My custom handler, token: for\n",
|
||||
"My custom handler, token: you\n",
|
||||
"My custom handler, token: :\n",
|
||||
"My custom handler, token: \n",
|
||||
"\n",
|
||||
"Why\n",
|
||||
"My custom handler, token: di\n",
|
||||
"My custom handler, token: d the\n",
|
||||
"My custom handler, token: bear\n",
|
||||
"My custom handler, token: dissol\n",
|
||||
"My custom handler, token: ve\n",
|
||||
"My custom handler, token: in\n",
|
||||
"My custom handler, token: water\n",
|
||||
"My custom handler, token: ?\n",
|
||||
"My custom handler, token: \n",
|
||||
"Because\n",
|
||||
"My custom handler, token: it\n",
|
||||
"My custom handler, token: was\n",
|
||||
"My custom handler, token: a\n",
|
||||
"My custom handler, token: polar\n",
|
||||
"My custom handler, token: bear\n",
|
||||
"My custom handler, token: !\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
|
||||
" print(f\"My custom handler, token: {token}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\"Tell me a joke about {animal}\"])\n",
|
||||
"\n",
|
||||
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
|
||||
"# Additionally, we pass in our custom handler as a list to the callbacks parameter\n",
|
||||
"model = ChatAnthropic(\n",
|
||||
" model=\"claude-3-sonnet-20240229\", streaming=True, callbacks=[MyCustomHandler()]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"\n",
|
||||
"response = chain.invoke({\"animal\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can see [this reference page](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to create your own custom callback handlers.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -258,11 +258,11 @@
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
"from langchain.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"from langchain.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"\n",
|
||||
"- Verbose Mode: This adds print statements for \"important\" events in your chain.\n",
|
||||
"- Debug Mode: This add logging statements for ALL events in your chain.\n",
|
||||
"- LangSmith Tracing: This logs events to [LangSmith](/docs/langsmith/) to allow for visualization there.\n",
|
||||
"- LangSmith Tracing: This logs events to [LangSmith](https://docs.smith.langchain.com/) to allow for visualization there.\n",
|
||||
"\n",
|
||||
"| | Verbose Mode | Debug Mode | LangSmith Tracing |\n",
|
||||
"|------------------------|--------------|------------|-------------------|\n",
|
||||
|
||||
@@ -463,7 +463,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"cur_idx = -1\n",
|
||||
"semantic_snippets = []\n",
|
||||
|
||||
200
docs/docs/how_to/dynamic_chain.ipynb
Normal file
200
docs/docs/how_to/dynamic_chain.ipynb
Normal file
@@ -0,0 +1,200 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "77bf57fb-e990-45f2-8b5f-c76388b05966",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [LCEL]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "50d57bf2-7104-4570-b3e5-90fd71e1bea1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create a dynamic (self-constructing) chain\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following:\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
|
||||
"- [How to turn any function into a runnable](/docs/how_to/functions)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Sometimes we want to construct parts of a chain at runtime, depending on the chain inputs ([routing](/docs/how_to/routing/) is the most common example of this). We can create dynamic chains like this using a very useful property of RunnableLambda's, which is that if a RunnableLambda returns a Runnable, that Runnable is itself invoked. Let's see an example.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "406bffc2-86d0-4cb9-9262-5c1e3442397a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "0ae6692b-983e-40b8-aa2a-6c078d945b9e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"According to the context provided, Egypt's population in 2024 is estimated to be about 111 million.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
|
||||
"\n",
|
||||
"contextualize_instructions = \"\"\"Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text).\"\"\"\n",
|
||||
"contextualize_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", contextualize_instructions),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"contextualize_question = contextualize_prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"qa_instructions = (\n",
|
||||
" \"\"\"Answer the user question given the following context:\\n\\n{context}.\"\"\"\n",
|
||||
")\n",
|
||||
"qa_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", qa_instructions), (\"human\", \"{question}\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def contextualize_if_needed(input_: dict) -> Runnable:\n",
|
||||
" if input_.get(\"chat_history\"):\n",
|
||||
" # NOTE: This is returning another Runnable, not an actual output.\n",
|
||||
" return contextualize_question\n",
|
||||
" else:\n",
|
||||
" return RunnablePassthrough()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def fake_retriever(input_: dict) -> str:\n",
|
||||
" return \"egypt's population in 2024 is about 111 million\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(question=contextualize_if_needed).assign(\n",
|
||||
" context=fake_retriever\n",
|
||||
" )\n",
|
||||
" | qa_prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"what about egypt\",\n",
|
||||
" \"chat_history\": [\n",
|
||||
" (\"human\", \"what's the population of indonesia\"),\n",
|
||||
" (\"ai\", \"about 276 million\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5076ddb4-4a99-47ad-b549-8ac27ca3e2c6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The key here is that `contextualize_if_needed` returns another Runnable and not an actual output. This returned Runnable is itself run when the full chain is executed.\n",
|
||||
"\n",
|
||||
"Looking at the trace we can see that, since we passed in chat_history, we executed the contextualize_question chain as part of the full chain: https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fe6ca44-a643-4859-a290-be68403f51f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the streaming, batching, etc. capabilities of the returned Runnable are all preserved"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "6def37fa-5105-4090-9b07-77cb488ecd9c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What\n",
|
||||
" is\n",
|
||||
" the\n",
|
||||
" population\n",
|
||||
" of\n",
|
||||
" Egypt\n",
|
||||
"?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in contextualize_if_needed.stream(\n",
|
||||
" {\n",
|
||||
" \"question\": \"what about egypt\",\n",
|
||||
" \"chat_history\": [\n",
|
||||
" (\"human\", \"what's the population of indonesia\"),\n",
|
||||
" (\"ai\", \"about 276 million\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
"):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -17,8 +17,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import LengthBasedExampleSelector\n",
|
||||
"from langchain_core.example_selectors import LengthBasedExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"\n",
|
||||
"# Examples of a pretend task of creating antonyms.\n",
|
||||
"examples = [\n",
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import (\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.example_selectors import (\n",
|
||||
" MaxMarginalRelevanceExampleSelector,\n",
|
||||
" SemanticSimilarityExampleSelector,\n",
|
||||
")\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
|
||||
@@ -19,8 +19,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector.ngram_overlap import NGramOverlapExampleSelector\n",
|
||||
"from langchain_community.example_selectors import NGramOverlapExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"input\", \"output\"],\n",
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
|
||||
"\n",
|
||||
|
||||
@@ -1,11 +1,21 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "018f3868-e60d-4db6-a1c6-c6633c66b1f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [LCEL, fallbacks]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "19c9cbd6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Fallbacks\n",
|
||||
"# How to add fallbacks to a runnable\n",
|
||||
"\n",
|
||||
"When working with language models, you may often encounter issues from the underlying APIs, whether these be rate limiting or downtime. Therefore, as you go to move your LLM applications into production it becomes more and more important to safeguard against these. That's why we've introduced the concept of fallbacks. \n",
|
||||
"\n",
|
||||
@@ -43,7 +53,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
@@ -80,8 +90,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Note that we set max_retries = 0 to avoid retrying on RateLimits, etc\n",
|
||||
"openai_llm = ChatOpenAI(max_retries=0)\n",
|
||||
"anthropic_llm = ChatAnthropic()\n",
|
||||
"openai_llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", max_retries=0)\n",
|
||||
"anthropic_llm = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
|
||||
"llm = openai_llm.with_fallbacks([anthropic_llm])"
|
||||
]
|
||||
},
|
||||
@@ -447,7 +457,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate.from_template(\"Question: {question}\\n{answer}\")"
|
||||
]
|
||||
@@ -222,7 +222,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.few_shot import FewShotPromptTemplate\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = FewShotPromptTemplate(\n",
|
||||
" examples=examples,\n",
|
||||
@@ -282,8 +282,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_selector = SemanticSimilarityExampleSelector.from_examples(\n",
|
||||
|
||||
@@ -88,10 +88,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" FewShotChatMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
|
||||
@@ -218,8 +215,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
@@ -305,10 +302,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" FewShotChatMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"\n",
|
||||
"# Define the few-shot prompt.\n",
|
||||
"few_shot_prompt = FewShotChatMessagePromptTemplate(\n",
|
||||
|
||||
@@ -167,7 +167,7 @@
|
||||
"source": [
|
||||
"Above, the `@chain` decorator is used to convert `custom_chain` into a runnable, which we invoke with the `.invoke()` method.\n",
|
||||
"\n",
|
||||
"If you are using a tracing with [LangSmith](/docs/langsmith/), you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath.\n",
|
||||
"If you are using a tracing with [LangSmith](https://docs.smith.langchain.com/), you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath.\n",
|
||||
"\n",
|
||||
"## Automatic coercion in chains\n",
|
||||
"\n",
|
||||
|
||||
@@ -300,7 +300,7 @@
|
||||
"Entities in the question map to the following database values:\n",
|
||||
"{entities_list}\n",
|
||||
"Question: {question}\n",
|
||||
"Cypher query:\"\"\" # noqa: E501\n",
|
||||
"Cypher query:\"\"\"\n",
|
||||
"\n",
|
||||
"cypher_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -377,7 +377,7 @@
|
||||
"response_template = \"\"\"Based on the the question, Cypher query, and Cypher response, write a natural language response:\n",
|
||||
"Question: {question}\n",
|
||||
"Cypher query: {query}\n",
|
||||
"Cypher Response: {response}\"\"\" # noqa: E501\n",
|
||||
"Cypher Response: {response}\"\"\"\n",
|
||||
"\n",
|
||||
"response_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
|
||||
@@ -177,14 +177,13 @@
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain.tools import BaseTool\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"description_query = \"\"\"\n",
|
||||
"MATCH (m:Movie|Person)\n",
|
||||
@@ -227,14 +226,13 @@
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain.tools import BaseTool\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class InformationInput(BaseModel):\n",
|
||||
@@ -287,8 +285,8 @@
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.utils.function_calling import convert_to_openai_function\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
@@ -19,27 +19,29 @@ For comprehensive descriptions of every class and function see the [API Referenc
|
||||
|
||||
This highlights functionality that is core to using LangChain.
|
||||
|
||||
- [How to: return structured data from an LLM](/docs/how_to/structured_output/)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: return structured data from a model](/docs/how_to/structured_output/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: stream runnables](/docs/how_to/streaming)
|
||||
- [How to: debug your LLM apps](/docs/how_to/debugging/)
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
|
||||
LangChain Expression Language is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol.
|
||||
[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol.
|
||||
|
||||
[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives.
|
||||
|
||||
- [How to: chain runnables](/docs/how_to/sequence)
|
||||
- [How to: stream runnables](/docs/how_to/streaming)
|
||||
- [How to: invoke runnables in parallel](/docs/how_to/parallel/)
|
||||
- [How to: attach runtime arguments to a runnable](/docs/how_to/binding/)
|
||||
- [How to: run custom functions](/docs/how_to/functions)
|
||||
- [How to: pass through arguments from one step to the next](/docs/how_to/passthrough)
|
||||
- [How to: add values to a chain's state](/docs/how_to/assign)
|
||||
- [How to: configure a chain at runtime](/docs/how_to/configure)
|
||||
- [How to: add message history](/docs/how_to/message_history)
|
||||
- [How to: route execution within a chain](/docs/how_to/routing)
|
||||
- [How to: add default invocation args to runnables](/docs/how_to/binding/)
|
||||
- [How to: turn any function into a runnable](/docs/how_to/functions)
|
||||
- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough)
|
||||
- [How to: configure runnable behavior at runtime](/docs/how_to/configure)
|
||||
- [How to: add message history (memory) to a chain](/docs/how_to/message_history)
|
||||
- [How to: route between sub-chains](/docs/how_to/routing)
|
||||
- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/)
|
||||
- [How to: inspect runnables](/docs/how_to/inspect)
|
||||
- [How to: add fallbacks](/docs/how_to/fallbacks)
|
||||
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
|
||||
|
||||
## Components
|
||||
|
||||
@@ -169,11 +171,15 @@ LangChain Tools contain a description of the tool (to pass to the language model
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: use tools with LLMs that do not support tool calling natively](/docs/how_to/tools_prompting)
|
||||
- [How to: convert LangChain tools to OpenAI functions](/docs/how_to/tools_as_openai_functions)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: call tools using multi-modal data](/docs/how_to/tool_calls_multi_modal)
|
||||
|
||||
### Multimodal
|
||||
|
||||
- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/)
|
||||
- [How to: use multimodal prompts](/docs/how_to/multimodal_prompts/)
|
||||
|
||||
|
||||
### Agents
|
||||
|
||||
@@ -186,6 +192,14 @@ For in depth how-to guides for agents, please check out [LangGraph](https://gith
|
||||
- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor)
|
||||
- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent)
|
||||
|
||||
### Callbacks
|
||||
|
||||
- [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime)
|
||||
- [How to: attach callbacks to a module](/docs/how_to/callbacks_attach)
|
||||
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
|
||||
|
||||
### Custom
|
||||
|
||||
All of LangChain components can easily be extended to support your own versions.
|
||||
@@ -195,6 +209,7 @@ All of LangChain components can easily be extended to support your own versions.
|
||||
- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
|
||||
- [How to: write a custom document loader](/docs/how_to/document_loader_custom)
|
||||
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
@@ -786,7 +786,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.base import BaseLoader\n",
|
||||
"from langchain_core.document_loaders import BaseLoader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomLoader(BaseLoader):\n",
|
||||
|
||||
@@ -39,9 +39,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -41,7 +41,7 @@ pip install langchain-core
|
||||
```
|
||||
|
||||
## LangChain community
|
||||
The `langchain-community` package contains third-party integrations. It is automatically installed by `langchain`, but can also be used separately. Install with:
|
||||
The `langchain-community` package contains third-party integrations. Install with:
|
||||
|
||||
```bash
|
||||
pip install langchain-community
|
||||
|
||||
1142
docs/docs/how_to/lcel_cheatsheet.ipynb
Normal file
1142
docs/docs/how_to/lcel_cheatsheet.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -119,7 +119,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can do the same thing with a SQLite cache\n",
|
||||
"from langchain.cache import SQLiteCache\n",
|
||||
"from langchain_community.cache import SQLiteCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(SQLiteCache(database_path=\".langchain.db\"))"
|
||||
]
|
||||
|
||||
@@ -134,8 +134,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama2\", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])\n",
|
||||
@@ -288,9 +287,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.llms import LlamaCpp\n",
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\",\n",
|
||||
|
||||
@@ -52,11 +52,11 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain, StuffDocumentsChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_transformers import (\n",
|
||||
" LongContextReorder,\n",
|
||||
")\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
|
||||
@@ -512,7 +512,7 @@
|
||||
"id": "36f43b87-655c-4f64-aa7b-bd8c1955d8e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [LangSmith](/docs/langsmith)\n",
|
||||
"### [LangSmith](https://docs.smith.langchain.com)\n",
|
||||
"\n",
|
||||
"LangSmith is especially useful for something like message history injection, where it can be hard to otherwise understand what the inputs are to various parts of the chain.\n",
|
||||
"\n",
|
||||
|
||||
@@ -344,7 +344,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
|
||||
@@ -423,7 +423,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n",
|
||||
"from langchain_core.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"doc\": lambda x: x.page_content}\n",
|
||||
|
||||
228
docs/docs/how_to/multimodal_inputs.ipynb
Normal file
228
docs/docs/how_to/multimodal_inputs.ipynb
Normal file
@@ -0,0 +1,228 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4facdf7f-680e-4d28-908b-2b8408e2a741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass multimodal data directly to models\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to pass multimodal input directly to models. \n",
|
||||
"We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision).\n",
|
||||
"For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n",
|
||||
"\n",
|
||||
"In this example we will ask a model to describe an image."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fb896ce9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fca4da7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The most commonly supported way to pass in images is to pass it in as a byte string.\n",
|
||||
"This should work for most model integrations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9ca1040c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ec680b6b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8656018e-c56d-47d2-b2be-71e87827f90a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can feed the image URL directly in a content block of type \"image_url\". Note that only some model providers support this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered clouds, suggesting good visibility and a likely pleasant temperature. The bright sunlight is casting distinct shadows on the grass and vegetation, indicating it is likely daytime, possibly late morning or early afternoon. The overall ambiance suggests a warm and inviting day, suitable for outdoor activities.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c470309",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also pass in multiple images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "325fb4ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Yes, the two images are the same. They both depict a wooden boardwalk extending through a grassy field under a blue sky with light clouds. The scenery, lighting, and composition are identical.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"are these two images the same?\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71bd28cf-d76c-44e2-a55e-c5f265db986e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calls\n",
|
||||
"\n",
|
||||
"Some multimodal models support [tool calling](/docs/concepts/#functiontool-calling) features as well. To call tools using such models, simply bind tools to them in the [usual way](/docs/how_to/tool_calling), and invoke the model using content blocks of the desired type (e.g., containing image data)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "cd22ea82-2f93-46f9-9f7a-6aaf479fcaa9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_BSX4oq4SKnLlp2WlzDhToHBr'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Literal\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def weather_tool(weather: Literal[\"sunny\", \"cloudy\", \"rainy\"]) -> None:\n",
|
||||
" \"\"\"Describe the weather\"\"\"\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind_tools([weather_tool])\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model_with_tools.invoke([message])\n",
|
||||
"print(response.tool_calls)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
184
docs/docs/how_to/multimodal_prompts.ipynb
Normal file
184
docs/docs/how_to/multimodal_prompts.ipynb
Normal file
@@ -0,0 +1,184 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4facdf7f-680e-4d28-908b-2b8408e2a741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use multimodal prompts\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to use prompt templates to format multimodal inputs to models. \n",
|
||||
"\n",
|
||||
"In this example we will ask a model to describe an image."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2671f995",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "4ee35e4f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Describe the image provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [{\"type\": \"image_url\", \"image_url\": \"data:image/jpeg;base64,{image_data}\"}],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "089f75c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "02744b06",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The image depicts a sunny day with a beautiful blue sky filled with scattered white clouds. The sky has varying shades of blue, ranging from a deeper hue near the horizon to a lighter, almost pale blue higher up. The white clouds are fluffy and scattered across the expanse of the sky, creating a peaceful and serene atmosphere. The lighting and cloud patterns suggest pleasant weather conditions, likely during the daytime hours on a mild, sunny day in an outdoor natural setting.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chain.invoke({\"image_data\": image_data})\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9b9ebf6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also pass in multiple images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "02190ee3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"compare the two pictures provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data1}\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data2}\",\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "42af057b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "513abe00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The two images provided are identical. Both images feature a wooden boardwalk path extending through a lush green field under a bright blue sky with some clouds. The perspective, colors, and elements in both images are exactly the same.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chain.invoke({\"image_data1\": image_data, \"image_data2\": image_data})\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ea8152c3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -23,7 +23,7 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
|
||||
@@ -17,13 +17,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import (\n",
|
||||
" OutputFixingParser,\n",
|
||||
" PydanticOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain.prompts import (\n",
|
||||
" PromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.output_parsers import OutputFixingParser\n",
|
||||
"from langchain_core.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI"
|
||||
]
|
||||
|
||||
@@ -941,7 +941,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -83,9 +83,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"texts = [\"Harrison worked at Kensho\", \"Ankush worked at Facebook\"]\n",
|
||||
"embeddings = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n",
|
||||
|
||||
@@ -83,9 +83,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"texts = [\"Harrison worked at Kensho\"]\n",
|
||||
"embeddings = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n",
|
||||
|
||||
@@ -85,9 +85,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"texts = [\"Harrison worked at Kensho\"]\n",
|
||||
"embeddings = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"id": "4b47436a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to route execution within a chain\n",
|
||||
"# How to route between sub-chains\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -335,7 +335,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utils.math import cosine_similarity\n",
|
||||
"from langchain_community.utils.math import cosine_similarity\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"\n",
|
||||
"The resulting [`RunnableSequence`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n",
|
||||
"\n",
|
||||
"## The pipe operator\n",
|
||||
"## The pipe operator: `|`\n",
|
||||
"\n",
|
||||
"To show off how this works, let's go through an example. We'll walk through a common pattern in LangChain: using a [prompt template](/docs/how_to#prompt-templates) to format input into a [chat model](/docs/how_to#chat-models), and finally converting the chat message output into a string with an [output parser](/docs/how_to#output-parsers).\n",
|
||||
"\n",
|
||||
@@ -230,11 +230,28 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"Or the abbreviated:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"composed_chain_with_pipe = RunnableParallel({\"joke\": chain}).pipe(\n",
|
||||
" analysis_prompt, model, StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Related\n",
|
||||
"\n",
|
||||
"You now know some ways to chain two runnables together.\n",
|
||||
"\n",
|
||||
"To learn more, see the other how-to guides on runnables in this section."
|
||||
"- [Streaming](/docs/how_to/streaming/): Check out the streaming guide to understand the streaming behavior of a chain\n",
|
||||
"- "
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -428,7 +428,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser\n",
|
||||
"from langchain_core.output_parsers.openai_tools import JsonOutputKeyToolsParser\n",
|
||||
"\n",
|
||||
"parser = JsonOutputKeyToolsParser(key_name=tool.name, first_tool_only=True)\n",
|
||||
"(llm_with_tools | parser).invoke(\n",
|
||||
@@ -503,7 +503,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = prompt | llm_with_tools | parser | tool # noqa\n",
|
||||
"chain = prompt | llm_with_tools | parser | tool\n",
|
||||
"chain.invoke({\"question\": \"What's the correlation between age and fare\"})"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -262,7 +262,7 @@
|
||||
" return tables\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"table_chain = category_chain | get_tables # noqa\n",
|
||||
"table_chain = category_chain | get_tables\n",
|
||||
"table_chain.invoke({\"input\": \"What are all the genres of Alanis Morisette songs\"})"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1524,7 +1524,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -518,7 +518,7 @@
|
||||
"\n",
|
||||
"### Using `PydanticOutputParser`\n",
|
||||
"\n",
|
||||
"The following example uses the built-in [`PydanticOutputParser`](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html) to parse the output of a chat model prompted to match a the given Pydantic schema. Note that we are adding `format_instructions` directly to the prompt from a method on the parser:"
|
||||
"The following example uses the built-in [`PydanticOutputParser`](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html) to parse the output of a chat model prompted to match the given Pydantic schema. Note that we are adding `format_instructions` directly to the prompt from a method on the parser:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -205,7 +205,7 @@
|
||||
"source": [
|
||||
"import datetime\n",
|
||||
"\n",
|
||||
"from langchain.utils import mock_now"
|
||||
"from langchain_core.utils import mock_now"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use a chat model to call tools\n",
|
||||
"# How to use a model to call tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -33,7 +33,7 @@
|
||||
"result.\n",
|
||||
"\n",
|
||||
"However, tool calling goes beyond [structured output](/docs/how_to/structured_output/)\n",
|
||||
"since you can pass responses to caled tools back to the model to create longer interactions.\n",
|
||||
"since you can pass responses from called tools back to the model to create longer interactions.\n",
|
||||
"For instance, given a search engine tool, an LLM might handle a \n",
|
||||
"query by first issuing a call to the search engine with arguments. The system calling the LLM can \n",
|
||||
"receive the tool call, execute it, and return the output to the LLM to inform its \n",
|
||||
|
||||
@@ -1,160 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4facdf7f-680e-4d28-908b-2b8408e2a741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to call tools with multi-modal data\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to call tools with multi-modal data, such as images.\n",
|
||||
"\n",
|
||||
"Some multi-modal models, such as those that can reason over images or audio, support [tool calling](/docs/concepts/#functiontool-calling) features as well.\n",
|
||||
"\n",
|
||||
"To call tools using such models, simply bind tools to them in the [usual way](/docs/how_to/tool_calling), and invoke the model using content blocks of the desired type (e.g., containing image data).\n",
|
||||
"\n",
|
||||
"Below, we demonstrate examples using [OpenAI](/docs/integrations/platforms/openai) and [Anthropic](/docs/integrations/platforms/anthropic). We will use the same image and tool in all cases. Let's first select an image, and build a placeholder tool that expects as input the string \"sunny\", \"cloudy\", or \"rainy\". We will ask the models to describe the weather in the image."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Literal\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def weather_tool(weather: Literal[\"sunny\", \"cloudy\", \"rainy\"]) -> None:\n",
|
||||
" \"\"\"Describe the weather\"\"\"\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8656018e-c56d-47d2-b2be-71e87827f90a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OpenAI\n",
|
||||
"\n",
|
||||
"For OpenAI, we can feed the image URL directly in a content block of type \"image_url\":"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_mRYL50MtHdeNuNIjSCm5UPmB'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\").bind_tools([weather_tool])\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.tool_calls)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e5738224-1109-4bf8-8976-ff1570dd1d46",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we recover tool calls with parsed arguments in LangChain's [standard format](/docs/how_to/tool_calling) in the model response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0cee63ff-e09f-4dd8-8323-912edbde94f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Anthropic\n",
|
||||
"\n",
|
||||
"For Anthropic, we can format a base64-encoded image into a content block of type \"image\", as below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d90c4590-71c8-42b1-99ff-03a9eca8082e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'toolu_016m9KfknJqx5fVRYk4tkF6s'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic(model=\"claude-3-sonnet-20240229\").bind_tools([weather_tool])\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source\": {\n",
|
||||
" \"type\": \"base64\",\n",
|
||||
" \"media_type\": \"image/jpeg\",\n",
|
||||
" \"data\": image_data,\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.tool_calls)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -45,7 +45,7 @@
|
||||
"id": "36a9c6fc-8264-462f-b8d7-9c7bbec22ef9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:"
|
||||
"If you'd like to trace your runs in [LangSmith](https://docs.smith.langchain.com/) uncomment and set the following environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"id": "68107597-0c8c-4bb5-8c12-9992fabdf71a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:"
|
||||
"If you'd like to trace your runs in [LangSmith](https://docs.smith.langchain.com/) uncomment and set the following environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,16 @@
|
||||
"source": [
|
||||
"# How to add a human-in-the-loop for tools\n",
|
||||
"\n",
|
||||
"There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked."
|
||||
"There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked.\n",
|
||||
"\n",
|
||||
":::{.callout-info}\n",
|
||||
"\n",
|
||||
"This how-to guide shows a simple way to add human-in-the-loop for code running in a jupyter notebook or in a terminal.\n",
|
||||
"\n",
|
||||
"To build a production application, you will need to do more work to keep track of application state appropriately.\n",
|
||||
"\n",
|
||||
"We recommend using `langgraph` for powering such a capability. For more details, please see this [guide](https://langchain-ai.github.io/langgraph/how-tos/human-in-the-loop/).\n",
|
||||
":::\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -40,7 +49,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"id": "2bed0ccf-20cc-4fd3-9947-55471dd8c4da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -55,13 +64,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43721981-4595-4721-bea0-5c67696426d3",
|
||||
"id": "7ecd5d7e-7c3c-4180-8958-7db2c1e43564",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chain\n",
|
||||
"\n",
|
||||
"Suppose we have the following (dummy) tools and tool-calling chain:\n",
|
||||
"\n",
|
||||
"Let's create a few simple (dummy) tools and a tool-calling chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43721981-4595-4721-bea0-5c67696426d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
@@ -71,13 +86,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "e0ff02ac-e750-493b-9b09-4578711a6726",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"# | outout: false\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
@@ -86,7 +101,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "0221fdfd-2a18-4449-a123-e6b0b15bb3d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -95,17 +110,16 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'count_emails',\n",
|
||||
" 'args': {'last_n_days': 5},\n",
|
||||
" 'id': 'toolu_012VHuh7vk5dVNct5SgZj3gh',\n",
|
||||
" 'id': 'toolu_01QYZdJ4yPiqsdeENWHqioFW',\n",
|
||||
" 'output': 10}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"from typing import Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
@@ -149,12 +163,14 @@
|
||||
"source": [
|
||||
"## Adding human approval\n",
|
||||
"\n",
|
||||
"We can add a simple human approval step to our tool_chain function:"
|
||||
"Let's add a step in the chain that will ask a person to approve or reject the tall call request.\n",
|
||||
"\n",
|
||||
"On rejection, the step will raise an exception which will stop execution of the rest of the chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 12,
|
||||
"id": "341fb055-0315-47bc-8f72-ed6103d2981f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -162,23 +178,35 @@
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def human_approval(msg: AIMessage) -> Runnable:\n",
|
||||
"class NotApproved(Exception):\n",
|
||||
" \"\"\"Custom exception.\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def human_approval(msg: AIMessage) -> AIMessage:\n",
|
||||
" \"\"\"Responsible for passing through its input or raising an exception.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" msg: output from the chat model\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" msg: original output from the msg\n",
|
||||
" \"\"\"\n",
|
||||
" tool_strs = \"\\n\\n\".join(\n",
|
||||
" json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls\n",
|
||||
" )\n",
|
||||
" input_msg = (\n",
|
||||
" f\"Do you approve of the following tool invocations\\n\\n{tool_strs}\\n\\n\"\n",
|
||||
" \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\"\n",
|
||||
" \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\\n >>>\"\n",
|
||||
" )\n",
|
||||
" resp = input(input_msg)\n",
|
||||
" if resp.lower() not in (\"yes\", \"y\"):\n",
|
||||
" raise ValueError(f\"Tool invocations not approved:\\n\\n{tool_strs}\")\n",
|
||||
" raise NotApproved(f\"Tool invocations not approved:\\n\\n{tool_strs}\")\n",
|
||||
" return msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"id": "25dca07b-56ca-4b94-9955-d4f3e9895e03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -193,10 +221,11 @@
|
||||
" \"args\": {\n",
|
||||
" \"last_n_days\": 5\n",
|
||||
" },\n",
|
||||
" \"id\": \"toolu_01LCpjpFxrRspygDscnHYyPm\"\n",
|
||||
" \"id\": \"toolu_01WbD8XeMoQaRFtsZezfsHor\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. yes\n"
|
||||
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
|
||||
" >>> yes\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,11 +233,11 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'count_emails',\n",
|
||||
" 'args': {'last_n_days': 5},\n",
|
||||
" 'id': 'toolu_01LCpjpFxrRspygDscnHYyPm',\n",
|
||||
" 'id': 'toolu_01WbD8XeMoQaRFtsZezfsHor',\n",
|
||||
" 'output': 10}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -220,7 +249,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 14,
|
||||
"id": "f558f2cd-847b-4ef9-a770-3961082b540c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -233,45 +262,41 @@
|
||||
"{\n",
|
||||
" \"name\": \"send_email\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"message\": \"What's up homie\",\n",
|
||||
" \"recipient\": \"sally@gmail.com\"\n",
|
||||
" \"recipient\": \"sally@gmail.com\",\n",
|
||||
" \"message\": \"What's up homie\"\n",
|
||||
" },\n",
|
||||
" \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n",
|
||||
" \"id\": \"toolu_014XccHFzBiVcc9GV1harV9U\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. no\n"
|
||||
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
|
||||
" >>> no\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "ValueError",
|
||||
"evalue": "Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[11], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mSend sally@gmail.com an email saying \u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms up homie\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3961\u001b[0m, in \u001b[0;36mRunnableLambda.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 3959\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Invoke this runnable synchronously.\"\"\"\u001b[39;00m\n\u001b[1;32m 3960\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfunc\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m-> 3961\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_with_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3962\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_invoke\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3963\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3964\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_config\u001b[49m\u001b[43m(\u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3965\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3966\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3967\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 3968\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 3969\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot invoke a coroutine function synchronously.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3970\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUse `ainvoke` instead.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3971\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:1625\u001b[0m, in \u001b[0;36mRunnable._call_with_config\u001b[0;34m(self, func, input, config, run_type, **kwargs)\u001b[0m\n\u001b[1;32m 1621\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 1622\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(var_child_runnable_config\u001b[38;5;241m.\u001b[39mset, child_config)\n\u001b[1;32m 1623\u001b[0m output \u001b[38;5;241m=\u001b[39m cast(\n\u001b[1;32m 1624\u001b[0m Output,\n\u001b[0;32m-> 1625\u001b[0m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1626\u001b[0m \u001b[43m \u001b[49m\u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1627\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1628\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1629\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1630\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1631\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1632\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 1633\u001b[0m )\n\u001b[1;32m 1634\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 1635\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3835\u001b[0m, in \u001b[0;36mRunnableLambda._invoke\u001b[0;34m(self, input, run_manager, config, **kwargs)\u001b[0m\n\u001b[1;32m 3833\u001b[0m output \u001b[38;5;241m=\u001b[39m chunk\n\u001b[1;32m 3834\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 3835\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3836\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 3837\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3838\u001b[0m \u001b[38;5;66;03m# If the output is a runnable, invoke it\u001b[39;00m\n\u001b[1;32m 3839\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(output, Runnable):\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"Cell \u001b[0;32mIn[9], line 14\u001b[0m, in \u001b[0;36mhuman_approval\u001b[0;34m(msg)\u001b[0m\n\u001b[1;32m 12\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m(input_msg)\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m resp\u001b[38;5;241m.\u001b[39mlower() \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myes\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m---> 14\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool invocations not approved:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtool_strs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m msg\n",
|
||||
"\u001b[0;31mValueError\u001b[0m: Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Tool invocations not approved:\n",
|
||||
"\n",
|
||||
"{\n",
|
||||
" \"name\": \"send_email\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"recipient\": \"sally@gmail.com\",\n",
|
||||
" \"message\": \"What's up homie\"\n",
|
||||
" },\n",
|
||||
" \"id\": \"toolu_014XccHFzBiVcc9GV1harV9U\"\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"Send sally@gmail.com an email saying 'What's up homie'\")"
|
||||
"try:\n",
|
||||
" chain.invoke(\"Send sally@gmail.com an email saying 'What's up homie'\")\n",
|
||||
"except NotApproved as e:\n",
|
||||
" print()\n",
|
||||
" print(e)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e938d8f1-df93-4726-a465-78e596312246",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -290,7 +315,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -15,9 +15,30 @@
|
||||
"id": "14b94240",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use tools without function calling\n",
|
||||
"# How to add ad-hoc tool calling capability to LLMs and Chat Models\n",
|
||||
"\n",
|
||||
"In this guide we'll build a Chain that does not rely on any special model APIs (like tool calling, which we showed in the [Quickstart](/docs/how_to/tool_calling)) and instead just prompts the model directly to invoke tools."
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"Some models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide for more information.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Function/tool calling](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LLMs](/docs/concepts/#llms)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In this guide, we'll see how to add **ad-hoc** tool calling support to a chat model. This is an alternative method to invoke tools if you're using a model that does not natively support [tool calling](/docs/how_to/tool_calling/).\n",
|
||||
"\n",
|
||||
"We'll do this by simply writing a prompt that will get the model to invoke the appropriate tools. Here's a diagram of the logic:\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -37,34 +58,58 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai"
|
||||
"%pip install --upgrade --quiet langchain langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5e727d22-f861-4eee-882a-688f8efc885e",
|
||||
"id": "897bc01e-cc2b-4400-8a64-db4aa56085d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And set these environment variables:"
|
||||
"If you'd like to use LangSmith, uncomment the below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "527ef906-0104-4872-b4e5-f371cf73feba",
|
||||
"execution_count": 26,
|
||||
"id": "5efb4170-b95b-4d29-8f57-09509f3ba6df",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"# If you'd like to use LangSmith, uncomment the below:\n",
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7ec6409b-21e5-4d0a-8a46-c4ef0b055dd3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can select any of the given models for this how-to guide. Keep in mind that most of these models already [support native tool calling](/docs/integrations/chat/), so using the prompting strategy shown here doesn't make sense for these models, and instead you should follow the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"To illustrate the idea, we'll use `phi3` via Ollama, which does **NOT** have native support for tool calling. If you'd like to use `Ollama` as well follow [these instructions](/docs/integrations/chat/ollama/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "424be968-2806-4d1a-a6aa-5499ae20fac5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"model = Ollama(model=\"phi3\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68946881",
|
||||
@@ -72,66 +117,75 @@
|
||||
"source": [
|
||||
"## Create a tool\n",
|
||||
"\n",
|
||||
"First, we need to create a tool to call. For this example, we will create a custom tool from a function. For more information on all details related to creating custom tools, please see [this guide](/docs/how_to/custom_tools)."
|
||||
"First, let's create an `add` and `multiply` tools. For more information on creating custom tools, please see [this guide](/docs/how_to/custom_tools)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "90187d07",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(first_int: int, second_int: int) -> int:\n",
|
||||
" \"\"\"Multiply two integers together.\"\"\"\n",
|
||||
" return first_int * second_int"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d7009e1a",
|
||||
"execution_count": 4,
|
||||
"id": "4548e6fa-0f9b-4d7a-8fa5-66cec0350e5f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--\n",
|
||||
"multiply\n",
|
||||
"multiply(first_int: int, second_int: int) -> int - Multiply two integers together.\n",
|
||||
"{'first_int': {'title': 'First Int', 'type': 'integer'}, 'second_int': {'title': 'Second Int', 'type': 'integer'}}\n"
|
||||
"Multiply two numbers together.\n",
|
||||
"{'x': {'title': 'X', 'type': 'number'}, 'y': {'title': 'Y', 'type': 'number'}}\n",
|
||||
"--\n",
|
||||
"add\n",
|
||||
"Add two numbers.\n",
|
||||
"{'x': {'title': 'X', 'type': 'integer'}, 'y': {'title': 'Y', 'type': 'integer'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(multiply.name)\n",
|
||||
"print(multiply.description)\n",
|
||||
"print(multiply.args)"
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(x: float, y: float) -> float:\n",
|
||||
" \"\"\"Multiply two numbers together.\"\"\"\n",
|
||||
" return x * y\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(x: int, y: int) -> int:\n",
|
||||
" \"Add two numbers.\"\n",
|
||||
" return x + y\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [multiply, add]\n",
|
||||
"\n",
|
||||
"# Let's inspect the tools\n",
|
||||
"for t in tools:\n",
|
||||
" print(\"--\")\n",
|
||||
" print(t.name)\n",
|
||||
" print(t.description)\n",
|
||||
" print(t.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"id": "be77e780",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"20"
|
||||
"20.0"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"multiply.invoke({\"first_int\": 4, \"second_int\": 5})"
|
||||
"multiply.invoke({\"x\": 4, \"y\": 5})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -146,48 +200,85 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c64818f0-9364-423c-922e-bdfb8f01e726",
|
||||
"execution_count": 6,
|
||||
"id": "2063b564-25ca-4729-a45f-ba4633175b04",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'multiply: multiply(first_int: int, second_int: int) -> int - Multiply two integers together.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"multiply(x: float, y: float) -> float - Multiply two numbers together.\n",
|
||||
"add(x: int, y: int) -> int - Add two numbers.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.tools import render_text_description\n",
|
||||
"\n",
|
||||
"rendered_tools = render_text_description([multiply])\n",
|
||||
"rendered_tools"
|
||||
"rendered_tools = render_text_description(tools)\n",
|
||||
"print(rendered_tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "63552d4d-8bd6-4aca-8805-56e236f6552d",
|
||||
"execution_count": 17,
|
||||
"id": "f02f1dce-76e7-4ca9-9bac-5af496131fe1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"system_prompt = f\"\"\"You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n",
|
||||
"system_prompt = f\"\"\"\\\n",
|
||||
"You are an assistant that has access to the following set of tools. \n",
|
||||
"Here are the names and descriptions for each tool:\n",
|
||||
"\n",
|
||||
"{rendered_tools}\n",
|
||||
"\n",
|
||||
"Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.\"\"\"\n",
|
||||
"Given the user input, return the name and input of the tool to use. \n",
|
||||
"Return your response as a JSON blob with 'name' and 'arguments' keys.\n",
|
||||
"\n",
|
||||
"The `arguments` should be a dictionary, with keys corresponding \n",
|
||||
"to the argument names and the values corresponding to the requested values.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", system_prompt), (\"user\", \"{input}\")]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "f8623e03-60eb-4439-b57b-ecbcebc61b58",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"name\": \"add\",\n",
|
||||
" \"arguments\": {\n",
|
||||
" \"x\": 3,\n",
|
||||
" \"y\": 1132\n",
|
||||
" }\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = prompt | model\n",
|
||||
"message = chain.invoke({\"input\": \"what's 3 plus 1132\"})\n",
|
||||
"\n",
|
||||
"# Let's take a look at the output from the model\n",
|
||||
"# if the model is an LLM (not a chat model), the output will be a string.\n",
|
||||
"if isinstance(message, str):\n",
|
||||
" print(message)\n",
|
||||
"else: # Otherwise it's a chat model\n",
|
||||
" print(message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14df2cd5-b6fa-4b10-892d-e8692c7931e5",
|
||||
@@ -200,156 +291,153 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 19,
|
||||
"id": "f129f5bd-127c-4c95-8f34-8f437da7ca8f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'name': 'multiply', 'arguments': {'first_int': 13, 'second_int': 4}}"
|
||||
"{'name': 'multiply', 'arguments': {'x': 13.0, 'y': 4.0}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
|
||||
"chain = prompt | model | JsonOutputParser()\n",
|
||||
"chain.invoke({\"input\": \"what's thirteen times 4\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e1f08255-f146-4f4a-be43-5c21c1d3ae83",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"🎉 Amazing! 🎉 We now instructed our model on how to **request** that a tool be invoked.\n",
|
||||
"\n",
|
||||
"Now, let's create some logic to actually run the tool!\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e29dd4c-8eb5-457f-92d1-8add076404dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invoking the tool\n",
|
||||
"## Invoking the tool 🏃\n",
|
||||
"\n",
|
||||
"We can invoke the tool as part of the chain by passing along the model-generated \"arguments\" to it:"
|
||||
"Now that the model can request that a tool be invoked, we need to write a function that can actually invoke \n",
|
||||
"the tool.\n",
|
||||
"\n",
|
||||
"The function will select the appropriate tool by name, and pass to it the arguments chosen by the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 20,
|
||||
"id": "faee95e0-4095-4310-991f-9e9465c6738e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, Dict, Optional, TypedDict\n",
|
||||
"\n",
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ToolCallRequest(TypedDict):\n",
|
||||
" \"\"\"A typed dict that shows the inputs into the invoke_tool function.\"\"\"\n",
|
||||
"\n",
|
||||
" name: str\n",
|
||||
" arguments: Dict[str, Any]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def invoke_tool(\n",
|
||||
" tool_call_request: ToolCallRequest, config: Optional[RunnableConfig] = None\n",
|
||||
"):\n",
|
||||
" \"\"\"A function that we can use the perform a tool invocation.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" tool_call_request: a dict that contains the keys name and arguments.\n",
|
||||
" The name must match the name of a tool that exists.\n",
|
||||
" The arguments are the arguments to that tool.\n",
|
||||
" config: This is configuration information that LangChain uses that contains\n",
|
||||
" things like callbacks, metadata, etc.See LCEL documentation about RunnableConfig.\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" output from the requested tool\n",
|
||||
" \"\"\"\n",
|
||||
" tool_name_to_tool = {tool.name: tool for tool in tools}\n",
|
||||
" name = tool_call_request[\"name\"]\n",
|
||||
" requested_tool = tool_name_to_tool[name]\n",
|
||||
" return requested_tool.invoke(tool_call_request[\"arguments\"], config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f4957532-9e0c-47f6-bb62-0fd789ac1d3e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's test this out 🧪!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "d0ea3b2a-8fb2-4016-83c8-a5d3e78fedbc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"15.0"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"invoke_tool({\"name\": \"multiply\", \"arguments\": {\"x\": 3, \"y\": 5}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "715af6e1-935d-4bc0-a3d2-646ecf8a329b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Let's put it together\n",
|
||||
"\n",
|
||||
"Let's put it together into a chain that creates a calculator with add and multiplication capabilities."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "0555b384-fde6-4404-86e0-7ea199003d58",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"52"
|
||||
"53.83784653"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"chain = prompt | model | JsonOutputParser() | itemgetter(\"arguments\") | multiply\n",
|
||||
"chain.invoke({\"input\": \"what's thirteen times 4\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d60b2cb-6ce0-48fc-8d18-d2337161a53d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Choosing from multiple tools\n",
|
||||
"\n",
|
||||
"Suppose we have multiple tools we want the chain to be able to choose from:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "95c86d32-ee45-4c87-a28c-14eff19b49e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"def add(first_int: int, second_int: int) -> int:\n",
|
||||
" \"Add two integers.\"\n",
|
||||
" return first_int + second_int\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def exponentiate(base: int, exponent: int) -> int:\n",
|
||||
" \"Exponentiate the base to the exponent power.\"\n",
|
||||
" return base**exponent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "748405ff-4c85-4bd7-82e1-30458b5a4106",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With function calling, we can do this like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb3aa89e-40e1-45ec-b1f3-ab28cfc8e42d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to run the model selected tool, we can do so using a function that returns the tool based on the model output. Specifically, our function will action return it's own subchain that gets the \"arguments\" part of the model output and passes it to the chosen tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "db254773-5b8e-43d0-aabe-c21566c154cd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [add, exponentiate, multiply]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def tool_chain(model_output):\n",
|
||||
" tool_map = {tool.name: tool for tool in tools}\n",
|
||||
" chosen_tool = tool_map[model_output[\"name\"]]\n",
|
||||
" return itemgetter(\"arguments\") | chosen_tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "ad9f5cff-b86a-45fc-9ce4-b0aa9025a378",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1135"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rendered_tools = render_text_description(tools)\n",
|
||||
"system_prompt = f\"\"\"You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n",
|
||||
"\n",
|
||||
"{rendered_tools}\n",
|
||||
"\n",
|
||||
"Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", system_prompt), (\"user\", \"{input}\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model | JsonOutputParser() | tool_chain\n",
|
||||
"chain.invoke({\"input\": \"what's 3 plus 1132\"})"
|
||||
"chain = prompt | model | JsonOutputParser() | invoke_tool\n",
|
||||
"chain.invoke({\"input\": \"what's thirteen times 4.14137281\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -364,19 +452,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 23,
|
||||
"id": "45404406-859d-4caa-8b9d-5838162c80a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'name': 'add',\n",
|
||||
" 'arguments': {'first_int': 3, 'second_int': 1132},\n",
|
||||
" 'output': 1135}"
|
||||
"{'name': 'multiply',\n",
|
||||
" 'arguments': {'x': 13, 'y': 4.14137281},\n",
|
||||
" 'output': 53.83784653}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -385,9 +473,26 @@
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" prompt | model | JsonOutputParser() | RunnablePassthrough.assign(output=tool_chain)\n",
|
||||
" prompt | model | JsonOutputParser() | RunnablePassthrough.assign(output=invoke_tool)\n",
|
||||
")\n",
|
||||
"chain.invoke({\"input\": \"what's 3 plus 1132\"})"
|
||||
"chain.invoke({\"input\": \"what's thirteen times 4.14137281\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1797fe82-ea35-4cba-834a-1caf9740d184",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## What's next?\n",
|
||||
"\n",
|
||||
"This how-to guide shows the \"happy path\" when the model correctly outputs all the required tool information.\n",
|
||||
"\n",
|
||||
"In reality, if you're using more complex tools, you will start encountering errors from the model, especially for models that have not been fine tuned for tool calling and for less capable models.\n",
|
||||
"\n",
|
||||
"You will need to be prepared to add strategies to improve the output from the model; e.g.,\n",
|
||||
"\n",
|
||||
"1. Provide few shot examples.\n",
|
||||
"2. Add error handling (e.g., catch the exception and feed it back to the LLM to ask it to correct its previous output)."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -407,7 +512,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import comet_llm\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_COMET_TRACING\"] = \"true\"\n",
|
||||
"\n",
|
||||
@@ -40,8 +41,7 @@
|
||||
"# here we are configuring the comet project\n",
|
||||
"os.environ[\"COMET_PROJECT_NAME\"] = \"comet-example-langchain-tracing\"\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai deepeval langchain-chroma"
|
||||
"%pip install --upgrade --quiet langchain langchain-openai langchain-community deepeval langchain-chroma"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai context-python"
|
||||
"%pip install --upgrade --quiet langchain langchain-openai langchain-community context-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -114,10 +114,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"token = os.environ[\"CONTEXT_API_TOKEN\"]\n",
|
||||
|
||||
@@ -36,7 +36,8 @@
|
||||
"# Install necessary dependencies.\n",
|
||||
"%pip install --upgrade --quiet infinopy\n",
|
||||
"%pip install --upgrade --quiet matplotlib\n",
|
||||
"%pip install --upgrade --quiet tiktoken"
|
||||
"%pip install --upgrade --quiet tiktoken\n",
|
||||
"%pip install --upgrade --quiet langchain langchain-openai langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai"
|
||||
"%pip install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet promptlayer --upgrade"
|
||||
"%pip install --upgrade --quiet langchain-community promptlayer --upgrade"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -94,9 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat_llm = ChatOpenAI(\n",
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet trubrics"
|
||||
"%pip install --upgrade --quiet trubrics langchain langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -81,7 +81,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain_openai uptrain faiss-cpu flashrank"
|
||||
"%pip install -qU langchain langchain_openai langchain-community uptrain faiss-cpu flashrank"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -100,7 +100,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -131,7 +131,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -148,7 +148,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -165,7 +165,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -183,7 +183,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -194,55 +194,69 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set the openai API key\n",
|
||||
"This key is required to perform the evaluations. UpTrain uses the GPT models to evaluate the responses generated by the LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"UpTrain provides you with:\n",
|
||||
"1. Dashboards with advanced drill-down and filtering options\n",
|
||||
"1. Insights and common topics among failing cases\n",
|
||||
"1. Observability and real-time monitoring of production data\n",
|
||||
"1. Regression testing via seamless integration with your CI/CD pipelines\n",
|
||||
"\n",
|
||||
"You can choose between the following options for evaluating using UpTrain:\n",
|
||||
"### 1. **UpTrain's Open-Source Software (OSS)**: \n",
|
||||
"You can use the open-source evaluation service to evaluate your model. In this case, you will need to provie an OpenAI API key. UpTrain uses the GPT models to evaluate the responses generated by the LLM. You can get yours [here](https://platform.openai.com/account/api-keys).\n",
|
||||
"\n",
|
||||
"In order to view your evaluations in the UpTrain dashboard, you will need to set it up by running the following commands in your terminal:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"git clone https://github.com/uptrain-ai/uptrain\n",
|
||||
"cd uptrain\n",
|
||||
"bash run_uptrain.sh\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will start the UpTrain dashboard on your local machine. You can access it at `http://localhost:3000/dashboard`.\n",
|
||||
"\n",
|
||||
"Parameters:\n",
|
||||
"- key_type=\"openai\"\n",
|
||||
"- api_key=\"OPENAI_API_KEY\"\n",
|
||||
"- project_name=\"PROJECT_NAME\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### 2. **UpTrain Managed Service and Dashboards**:\n",
|
||||
"Alternatively, you can use UpTrain's managed service to evaluate your model. You can create a free UpTrain account [here](https://uptrain.ai/) and get free trial credits. If you want more trial credits, [book a call with the maintainers of UpTrain here](https://calendly.com/uptrain-sourabh/30min).\n",
|
||||
"\n",
|
||||
"The benefits of using the managed service are:\n",
|
||||
"1. No need to set up the UpTrain dashboard on your local machine.\n",
|
||||
"1. Access to many LLMs without needing their API keys.\n",
|
||||
"\n",
|
||||
"Once you perform the evaluations, you can view them in the UpTrain dashboard at `https://dashboard.uptrain.ai/dashboard`\n",
|
||||
"\n",
|
||||
"Parameters:\n",
|
||||
"- key_type=\"uptrain\"\n",
|
||||
"- api_key=\"UPTRAIN_API_KEY\"\n",
|
||||
"- project_name=\"PROJECT_NAME\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**Note:** The `project_name` will be the project name under which the evaluations performed will be shown in the UpTrain dashboard."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"## Set the API key\n",
|
||||
"\n",
|
||||
"For each of the retrievers below, it is better to define the callback handler again to avoid interference. You can choose between the following options for evaluating using UpTrain:\n",
|
||||
"\n",
|
||||
"### 1. **UpTrain's Open-Source Software (OSS)**: \n",
|
||||
"You can use the open-source evaluation service to evaluate your model.\n",
|
||||
"In this case, you will need to provie an OpenAI API key. You can get yours [here](https://platform.openai.com/account/api-keys).\n",
|
||||
"\n",
|
||||
"Parameters:\n",
|
||||
"- key_type=\"openai\"\n",
|
||||
"- api_key=\"OPENAI_API_KEY\"\n",
|
||||
"- project_name_prefix=\"PROJECT_NAME_PREFIX\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### 2. **UpTrain Managed Service and Dashboards**: \n",
|
||||
"You can create a free UpTrain account [here](https://uptrain.ai/) and get free trial credits. If you want more trial credits, [book a call with the maintainers of UpTrain here](https://calendly.com/uptrain-sourabh/30min).\n",
|
||||
"\n",
|
||||
"UpTrain Managed service provides:\n",
|
||||
"1. Dashboards with advanced drill-down and filtering options\n",
|
||||
"1. Insights and common topics among failing cases\n",
|
||||
"1. Observability and real-time monitoring of production data\n",
|
||||
"1. Regression testing via seamless integration with your CI/CD pipelines\n",
|
||||
"\n",
|
||||
"The notebook contains some screenshots of the dashboards and the insights that you can get from the UpTrain managed service.\n",
|
||||
"\n",
|
||||
"Parameters:\n",
|
||||
"- key_type=\"uptrain\"\n",
|
||||
"- api_key=\"UPTRAIN_API_KEY\"\n",
|
||||
"- project_name_prefix=\"PROJECT_NAME_PREFIX\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**Note:** The `project_name_prefix` will be used as prefix for the project names in the UpTrain dashboard. These will be different for different types of evals. For example, if you set project_name_prefix=\"langchain\" and perform the multi_query evaluation, the project name will be \"langchain_multi_query\"."
|
||||
"The notebook will prompt you to enter the API key. You can choose between the OpenAI API key or the UpTrain API key by changing the `key_type` parameter in the cell below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"KEY_TYPE = \"openai\" # or \"uptrain\"\n",
|
||||
"API_KEY = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -264,7 +278,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -306,7 +320,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the uptrain callback handler\n",
|
||||
"uptrain_callback = UpTrainCallbackHandler(key_type=\"openai\", api_key=OPENAI_API_KEY)\n",
|
||||
"uptrain_callback = UpTrainCallbackHandler(key_type=KEY_TYPE, api_key=API_KEY)\n",
|
||||
"config = {\"callbacks\": [uptrain_callback]}\n",
|
||||
"\n",
|
||||
"# Invoke the chain with a query\n",
|
||||
@@ -328,7 +342,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -380,7 +394,7 @@
|
||||
"multi_query_retriever = MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)\n",
|
||||
"\n",
|
||||
"# Create the uptrain callback\n",
|
||||
"uptrain_callback = UpTrainCallbackHandler(key_type=\"openai\", api_key=OPENAI_API_KEY)\n",
|
||||
"uptrain_callback = UpTrainCallbackHandler(key_type=KEY_TYPE, api_key=API_KEY)\n",
|
||||
"config = {\"callbacks\": [uptrain_callback]}\n",
|
||||
"\n",
|
||||
"# Create the RAG prompt\n",
|
||||
@@ -415,7 +429,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -470,13 +484,24 @@
|
||||
"chain = RetrievalQA.from_chain_type(llm=llm, retriever=compression_retriever)\n",
|
||||
"\n",
|
||||
"# Create the uptrain callback\n",
|
||||
"uptrain_callback = UpTrainCallbackHandler(key_type=\"openai\", api_key=OPENAI_API_KEY)\n",
|
||||
"uptrain_callback = UpTrainCallbackHandler(key_type=KEY_TYPE, api_key=API_KEY)\n",
|
||||
"config = {\"callbacks\": [uptrain_callback]}\n",
|
||||
"\n",
|
||||
"# Invoke the chain with a query\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = chain.invoke(query, config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# UpTrain's Dashboard and Insights\n",
|
||||
"\n",
|
||||
"Here's a short video showcasing the dashboard and the insights:\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
"from langchain_ai21 import ChatAI21\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"chat = ChatAI21(model=\"j2-ultra\")\n",
|
||||
"chat = ChatAI21(model=\"jamba-instruct\")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -107,14 +107,6 @@
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({\"english_text\": \"Hello, how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c159a79f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -670,7 +670,7 @@
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" # langchain logo\n",
|
||||
" \"url\": f\"data:image/png;base64,{img_base64}\", # noqa: E501\n",
|
||||
" \"url\": f\"data:image/png;base64,{img_base64}\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" {\"type\": \"text\", \"text\": \"What is this logo for?\"},\n",
|
||||
|
||||
@@ -127,7 +127,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import get_openai_callback"
|
||||
"from langchain_community.callbacks import get_openai_callback"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -161,8 +161,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -86,7 +86,13 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='为你找到关于coze的信息如下:\n\nCoze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。\n\n用户无论是否有编程经验,都可以通过该平台快速创建各种类型的聊天机器人、智能体、AI应用和插件,并将其部署在社交平台和即时聊天应用程序中。\n\n国际版使用的模型比国内版更强大。')"
|
||||
"AIMessage(content='为你找到关于coze的信息如下:\n",
|
||||
"\n",
|
||||
"Coze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。\n",
|
||||
"\n",
|
||||
"用户无论是否有编程经验,都可以通过该平台快速创建各种类型的聊天机器人、智能体、AI应用和插件,并将其部署在社交平台和即时聊天应用程序中。\n",
|
||||
"\n",
|
||||
"国际版使用的模型比国内版更强大。')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -173,8 +179,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -133,8 +133,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -126,8 +126,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
@@ -184,8 +184,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
|
||||
@@ -143,8 +143,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -62,10 +62,10 @@
|
||||
"%pip install --upgrade --quiet langchain-core langchain-community\n",
|
||||
"\n",
|
||||
"# Install Kineitca DB connection package\n",
|
||||
"%pip install --upgrade --quiet gpudb typeguard\n",
|
||||
"%pip install --upgrade --quiet 'gpudb>=7.2.0.8' typeguard pandas tqdm\n",
|
||||
"\n",
|
||||
"# Install packages needed for this tutorial\n",
|
||||
"%pip install --upgrade --quiet faker"
|
||||
"%pip install --upgrade --quiet faker ipykernel "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -114,7 +114,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -139,11 +139,11 @@
|
||||
"\n",
|
||||
" birthdate \n",
|
||||
"id \n",
|
||||
"0 1997-12-01 \n",
|
||||
"1 1924-07-27 \n",
|
||||
"2 1933-11-28 \n",
|
||||
"3 1988-10-19 \n",
|
||||
"4 1931-03-12 \n"
|
||||
"0 1997-12-08 \n",
|
||||
"1 1924-08-03 \n",
|
||||
"2 1933-12-05 \n",
|
||||
"3 1988-10-26 \n",
|
||||
"4 1931-03-19 \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -222,39 +222,60 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CREATE OR REPLACE CONTEXT \"demo\".\"test_llm_ctx\" (\n",
|
||||
" TABLE = \"demo\".\"user_profiles\",\n",
|
||||
" COMMENT = 'Contains user profiles.'\n",
|
||||
"),\n",
|
||||
"(\n",
|
||||
" SAMPLES = ( \n",
|
||||
" 'How many male users are there?' = 'select count(1) as num_users\n",
|
||||
" from demo.user_profiles\n",
|
||||
" where sex = ''M'';' )\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# create an LLM context for the table.\n",
|
||||
"from gpudb import GPUdbSamplesClause, GPUdbSqlContext, GPUdbTableClause\n",
|
||||
"\n",
|
||||
"sql = f\"\"\"\n",
|
||||
"CREATE OR REPLACE CONTEXT {kinetica_ctx}\n",
|
||||
"(\n",
|
||||
" TABLE = {table_name}\n",
|
||||
" COMMENT = 'Contains user profiles.'\n",
|
||||
"),\n",
|
||||
"(\n",
|
||||
" SAMPLES = (\n",
|
||||
" 'How many male users are there?' = \n",
|
||||
" 'select count(1) as num_users\n",
|
||||
" from {table_name}\n",
|
||||
" where sex = ''M'';')\n",
|
||||
"table_ctx = GPUdbTableClause(table=table_name, comment=\"Contains user profiles.\")\n",
|
||||
"\n",
|
||||
"samples_ctx = GPUdbSamplesClause(\n",
|
||||
" samples=[\n",
|
||||
" (\n",
|
||||
" \"How many male users are there?\",\n",
|
||||
" f\"\"\"\n",
|
||||
" select count(1) as num_users\n",
|
||||
" from {table_name}\n",
|
||||
" where sex = 'M';\n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"count_affected = kinetica_llm.kdbc.execute(sql)\n",
|
||||
"context_sql = GPUdbSqlContext(\n",
|
||||
" name=kinetica_ctx, tables=[table_ctx], samples=samples_ctx\n",
|
||||
").build_sql()\n",
|
||||
"\n",
|
||||
"print(context_sql)\n",
|
||||
"count_affected = kinetica_llm.kdbc.execute(context_sql)\n",
|
||||
"count_affected"
|
||||
]
|
||||
},
|
||||
@@ -273,7 +294,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -334,7 +355,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -357,7 +378,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -404,7 +425,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.19"
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -94,8 +94,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -122,8 +122,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -173,8 +173,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import OnlinePDFLoader\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import OnlinePDFLoader\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"# Loading the COMVEST 2024 notice\n",
|
||||
"loader = OnlinePDFLoader(\n",
|
||||
@@ -202,7 +202,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import BM25Retriever\n",
|
||||
"from langchain_community.retrievers import BM25Retriever\n",
|
||||
"\n",
|
||||
"retriever = BM25Retriever.from_documents(texts)"
|
||||
]
|
||||
|
||||
@@ -71,8 +71,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"This notebook shows how to get started using `MLX` LLM's as chat models.\n",
|
||||
"\n",
|
||||
"In particular, we will:\n",
|
||||
"1. Utilize the [MLXPipeline](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/mlx_pipelines.py), \n",
|
||||
"1. Utilize the [MLXPipeline](https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/llms/mlx_pipeline.py), \n",
|
||||
"2. Utilize the `ChatMLX` class to enable any of these LLMs to interface with LangChain's [Chat Messages](https://python.langchain.com/docs/modules/model_io/chat/#messages) abstraction.\n",
|
||||
"3. Demonstrate how to use an open-source LLM to power an `ChatAgent` pipeline\n"
|
||||
]
|
||||
@@ -66,10 +66,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models.mlx import ChatMLX\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
|
||||
@@ -78,8 +78,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user