mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-05 16:50:03 +00:00
Compare commits
12 Commits
vwp/defaul
...
dev2049/nu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
795f823bb1 | ||
|
|
6cd653deb4 | ||
|
|
e953d2cf93 | ||
|
|
50668693d7 | ||
|
|
6fec15b6fb | ||
|
|
7bcdc66b99 | ||
|
|
4cdd19bd4e | ||
|
|
90cef7b53a | ||
|
|
675e27c136 | ||
|
|
fa4a4f2940 | ||
|
|
55c7964e4e | ||
|
|
3cc2ce6ac9 |
@@ -1,15 +0,0 @@
|
||||
# AnalyticDB
|
||||
|
||||
This page covers how to use the AnalyticDB ecosystem within LangChain.
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around AnalyticDB, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import AnalyticDB
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the AnalyticDB wrapper, see [this notebook](../modules/indexes/vectorstores/examples/analyticdb.ipynb)
|
||||
@@ -28,13 +28,15 @@ To stream the model's predictions, add in a CallbackManager.
|
||||
|
||||
```python
|
||||
from langchain.llms import GPT4All
|
||||
from langchain.callbacks.base import CallbackManager
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
|
||||
# There are many CallbackHandlers supported, such as
|
||||
# from langchain.callbacks.streamlit import StreamlitCallbackHandler
|
||||
|
||||
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
||||
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8, callback_handler=callback_handler, verbose=True)
|
||||
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8, callback_handler=callback_handler,
|
||||
verbose=True)
|
||||
|
||||
# Generate text. Tokens are streamed through the callback manager.
|
||||
model("Once upon a time, ")
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
# MyScale
|
||||
|
||||
This page covers how to use MyScale vector database within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific MyScale wrappers.
|
||||
|
||||
With MyScale, you can manage both structured and unstructured (vectorized) data, and perform joint queries and analytics on both types of data using SQL. Plus, MyScale's cloud-native OLAP architecture, built on top of ClickHouse, enables lightning-fast data processing even on massive datasets.
|
||||
|
||||
## Introduction
|
||||
|
||||
[Overview to MyScale and High performance vector search](https://docs.myscale.com/en/overview/)
|
||||
|
||||
You can now register on our SaaS and [start a cluster now!](https://docs.myscale.com/en/quickstart/)
|
||||
|
||||
If you are also interested in how we managed to integrate SQL and vector, please refer to [this document](https://docs.myscale.com/en/vector-reference/) for further syntax reference.
|
||||
|
||||
We also deliver with live demo on huggingface! Please checkout our [huggingface space](https://huggingface.co/myscale)! They search millions of vector within a blink!
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install clickhouse-connect`
|
||||
|
||||
### Setting up envrionments
|
||||
|
||||
There are two ways to set up parameters for myscale index.
|
||||
|
||||
1. Environment Variables
|
||||
|
||||
Before you run the app, please set the environment variable with `export`:
|
||||
`export MYSCALE_URL='<your-endpoints-url>' MYSCALE_PORT=<your-endpoints-port> MYSCALE_USERNAME=<your-username> MYSCALE_PASSWORD=<your-password> ...`
|
||||
|
||||
You can easily find your account, password and other info on our SaaS. For details please refer to [this document](https://docs.myscale.com/en/cluster-management/)
|
||||
Every attributes under `MyScaleSettings` can be set with prefix `MYSCALE_` and is case insensitive.
|
||||
|
||||
2. Create `MyScaleSettings` object with parameters
|
||||
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import MyScale, MyScaleSettings
|
||||
config = MyScaleSetting(host="<your-backend-url>", port=8443, ...)
|
||||
index = MyScale(embedding_function, config)
|
||||
index.add_documents(...)
|
||||
```
|
||||
|
||||
## Wrappers
|
||||
supported functions:
|
||||
- `add_texts`
|
||||
- `add_documents`
|
||||
- `from_texts`
|
||||
- `from_documents`
|
||||
- `similarity_search`
|
||||
- `asimilarity_search`
|
||||
- `similarity_search_by_vector`
|
||||
- `asimilarity_search_by_vector`
|
||||
- `similarity_search_with_relevance_scores`
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around MyScale database, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or similar example retrieval.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import MyScale
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the MyScale wrapper, see [this notebook](../modules/indexes/vectorstores/examples/myscale.ipynb)
|
||||
@@ -1,167 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0e499e90-7a6d-4fab-8aab-31a4df417601",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PowerBI Dataset Agent\n",
|
||||
"\n",
|
||||
"This notebook showcases an agent designed to interact with a Power BI Dataset. The agent is designed to answer more general questions about a dataset, as well as recover from errors.\n",
|
||||
"\n",
|
||||
"Note that, as this agent is in active development, all answers might not be correct. It runs against the [executequery endpoint](https://learn.microsoft.com/en-us/rest/api/power-bi/datasets/execute-queries), which does not allow deletes.\n",
|
||||
"\n",
|
||||
"### Some notes\n",
|
||||
"- It relies on authentication with the azure.identity package, which can be installed with `pip install azure-identity`. Alternatively you can create the powerbi dataset with a token as a string without supplying the credentials.\n",
|
||||
"- You can also supply a username to impersonate for use with datasets that have RLS enabled. \n",
|
||||
"- The toolkit uses a LLM to create the query from the question, the agent uses the LLM for the overall execution.\n",
|
||||
"- Testing was done mostly with a `text-davinci-003` model, codex models did not seem to perform ver well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ec927ac6-9b2a-4e8a-9a6e-3e429191875c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Initialization"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "53422913-967b-4f2a-8022-00269c1be1b1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.agent_toolkits import create_pbi_agent\n",
|
||||
"from langchain.agents.agent_toolkits import PowerBIToolkit\n",
|
||||
"from langchain.utilities.powerbi import PowerBIDataset\n",
|
||||
"from langchain.llms.openai import AzureOpenAI\n",
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"from azure.identity import DefaultAzureCredential"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "090f3699-79c6-4ce1-ab96-a94f0121fd64",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = AzureOpenAI(temperature=0, deployment_name=\"text-davinci-003\", verbose=True)\n",
|
||||
"toolkit = PowerBIToolkit(\n",
|
||||
" powerbi=PowerBIDataset(None, \"<dataset_id>\", ['table1', 'table2'], DefaultAzureCredential()), \n",
|
||||
" llm=llm\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent_executor = create_pbi_agent(\n",
|
||||
" llm=llm,\n",
|
||||
" toolkit=toolkit,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36ae48c7-cb08-4fef-977e-c7d4b96a464b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: describing a table"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ff70e83d-5ad0-4fc7-bb96-27d82ac166d7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"Describe table1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9abcfe8e-1868-42a4-8345-ad2d9b44c681",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: simple query on a table\n",
|
||||
"In this example, the agent actually figures out the correct query to get a row count of the table."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea76658-a65b-47e2-b294-6d52c5556246",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"How many records are in table1?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fbc26af-97e4-4a21-82aa-48bdc992da26",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: running queries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "17bea710-4a23-4de0-b48e-21d57be48293",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"How many records are there by dimension1 in table2?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "474dddda-c067-4eeb-98b1-e763ee78b18c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"What unique values are there for dimensions2 in table2\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -191,8 +191,6 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Type\n",
|
||||
"\n",
|
||||
"class CustomSearchTool(BaseTool):\n",
|
||||
" name = \"Search\"\n",
|
||||
" description = \"useful for when you need to answer questions about current events\"\n",
|
||||
@@ -208,7 +206,7 @@
|
||||
"class CustomCalculatorTool(BaseTool):\n",
|
||||
" name = \"Calculator\"\n",
|
||||
" description = \"useful for when you need to answer questions about math\"\n",
|
||||
" args_schema: Type[BaseModel] = CalculatorInput\n",
|
||||
" args_schema=CalculatorInput\n",
|
||||
"\n",
|
||||
" def _run(self, query: str) -> str:\n",
|
||||
" \"\"\"Use the tool.\"\"\"\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -17,33 +17,7 @@
|
||||
"source": [
|
||||
"LangChain provides a callback system that allows you to hook into the various stages of your LLM application. This is useful for logging, [monitoring](https://python.langchain.com/en/latest/tracing.html), [streaming](https://python.langchain.com/en/latest/modules/models/llms/examples/streaming_llm.html), and other tasks.\n",
|
||||
"\n",
|
||||
"You can subscribe to these events by using the `callback_manager` argument available throughout the API. A `CallbackManager` is an object that manages a list of `CallbackHandlers`. The `CallbackManager` will call the appropriate method on each handler when the event is triggered."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fdb72e8d-a02a-474d-96bf-f5759432afc8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"class CallbackManager(BaseCallbackHandler):\n",
|
||||
" \"\"\"Base callback manager that can be used to handle callbacks from LangChain.\"\"\"\n",
|
||||
"\n",
|
||||
" def add_handler(self, callback: BaseCallbackHandler) -> None:\n",
|
||||
" \"\"\"Add a handler to the callback manager.\"\"\"\n",
|
||||
"\n",
|
||||
" def remove_handler(self, handler: BaseCallbackHandler) -> None:\n",
|
||||
" \"\"\"Remove a handler from the callback manager.\"\"\"\n",
|
||||
"\n",
|
||||
" def set_handler(self, handler: BaseCallbackHandler) -> None:\n",
|
||||
" \"\"\"Set handler as the only handler on the callback manager.\"\"\"\n",
|
||||
" self.set_handlers([handler])\n",
|
||||
"\n",
|
||||
" def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:\n",
|
||||
" \"\"\"Set handlers as the only handlers on the callback manager.\"\"\"\n",
|
||||
"```"
|
||||
"You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument list of handler objects, which are expected to implement one or more of the methods described in the API docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -62,70 +36,57 @@
|
||||
},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"class BaseCallbackHandler(ABC):\n",
|
||||
"class BaseCallbackHandler:\n",
|
||||
" \"\"\"Base callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_llm_start(\n",
|
||||
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run when LLM starts running.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:\n",
|
||||
" \"\"\"Run on new LLM token. Only available when streaming is enabled.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:\n",
|
||||
" \"\"\"Run when LLM ends running.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_llm_error(\n",
|
||||
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run when LLM errors.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run when chain starts running.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:\n",
|
||||
" \"\"\"Run when chain ends running.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_chain_error(\n",
|
||||
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run when chain errors.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_tool_start(\n",
|
||||
" self, serialized: Dict[str, Any], input_str: str, **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run when tool starts running.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_tool_end(self, output: str, **kwargs: Any) -> Any:\n",
|
||||
" \"\"\"Run when tool ends running.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_tool_error(\n",
|
||||
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run when tool errors.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_text(self, text: str, **kwargs: Any) -> Any:\n",
|
||||
" \"\"\"Run on arbitrary text.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:\n",
|
||||
" \"\"\"Run on agent action.\"\"\"\n",
|
||||
"\n",
|
||||
" @abstractmethod\n",
|
||||
" def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:\n",
|
||||
" \"\"\"Run on agent end.\"\"\"\n",
|
||||
"```"
|
||||
@@ -136,9 +97,11 @@
|
||||
"id": "d3bf3304-43fb-47ad-ae50-0637a17018a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating and Using a Custom `CallbackHandler`\n",
|
||||
"## Using an existing handler\n",
|
||||
"\n",
|
||||
"By default, a shared CallbackManager with the StdOutCallbackHandler will be used by models, chains, agents, and tools. However, you can pass in your own CallbackManager with a custom CallbackHandler:"
|
||||
"LangChain provides a few built-in handlers that you can use to get started. These are available in the `langchain/callbacks` module. The most basic handler is the `StdOutCallbackHandler`, which simply logs all events to `stdout`. In the future we will add more default handlers to the library. \n",
|
||||
"\n",
|
||||
"**Note** when the `verbose` flag on the object is set to true, the `StdOutCallbackHandler` will be invoked even without being explicitly passed in."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -155,16 +118,16 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"AgentAction(tool='Search', tool_input=\"US Open men's final 2019 winner\", log=' I need to find out who won the US Open men\\'s final in 2019 and then calculate his age raised to the 0.334 power.\\nAction: Search\\nAction Input: \"US Open men\\'s final 2019 winner\"')\n",
|
||||
"Rafael Nadal defeated Daniil Medvedev in the final, 7–5, 6–3, 5–7, 4–6, 6–4 to win the men's singles tennis title at the 2019 US Open. It was his fourth US ...\n",
|
||||
"AgentAction(tool='Search', tool_input='Rafael Nadal age', log=' I need to find out the age of the winner\\nAction: Search\\nAction Input: \"Rafael Nadal age\"')\n",
|
||||
"36 years\n",
|
||||
"AgentAction(tool='Calculator', tool_input='36^0.334', log=' I now need to calculate his age raised to the 0.334 power\\nAction: Calculator\\nAction Input: 36^0.334')\n",
|
||||
"Answer: 3.3098250249682484\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m1 + 2 = \u001b[0m\n",
|
||||
"\n",
|
||||
" I now know the final answer\n",
|
||||
"Final Answer: Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m1 + 2 = \u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -172,7 +135,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\""
|
||||
"'\\n\\n3'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
@@ -181,108 +144,89 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List, Optional, Union\n",
|
||||
"\n",
|
||||
"from langchain.agents import initialize_agent, load_tools\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.callbacks.base import CallbackManager, BaseCallbackHandler\n",
|
||||
"from langchain.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish, LLMResult\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"class MyCustomCallbackHandler(BaseCallbackHandler):\n",
|
||||
" \"\"\"Custom CallbackHandler.\"\"\"\n",
|
||||
"handler = StdOutCallbackHandler()\n",
|
||||
"llm = OpenAI()\n",
|
||||
"prompt = PromptTemplate.from_template(\"1 + {number} = \")\n",
|
||||
"\n",
|
||||
" def on_llm_start(\n",
|
||||
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Print out the prompts.\"\"\"\n",
|
||||
" pass\n",
|
||||
"# First, let's explicitly set the StdOutCallbackHandler in `callbacks`\n",
|
||||
"chain = LLMChain(llm=llm, prompt=prompt, callbacks=[handler])\n",
|
||||
"chain.run(number=2)\n",
|
||||
"\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
|
||||
" \"\"\"Do nothing.\"\"\"\n",
|
||||
" pass\n",
|
||||
"# Then, let's use the `verbose` flag to achieve the same result\n",
|
||||
"chain = LLMChain(llm=llm, prompt=prompt, verbose=True)\n",
|
||||
"chain.run(number=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "389c8448-5283-49e3-8c04-dbe1522e202c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating a custom handler\n",
|
||||
"\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs: Any) -> None:\n",
|
||||
" \"\"\"Do nothing.\"\"\"\n",
|
||||
" pass\n",
|
||||
"You can create a custom handler to set on the object as well. In the example below, we'll implement streaming with a custom handler."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "1b2e6588-0681-4cab-937a-7cc4790cea9a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"My custom handler, token: \n",
|
||||
"My custom handler, token: Why\n",
|
||||
"My custom handler, token: did\n",
|
||||
"My custom handler, token: the\n",
|
||||
"My custom handler, token: tomato\n",
|
||||
"My custom handler, token: turn\n",
|
||||
"My custom handler, token: red\n",
|
||||
"My custom handler, token: ?\n",
|
||||
"My custom handler, token: Because\n",
|
||||
"My custom handler, token: it\n",
|
||||
"My custom handler, token: saw\n",
|
||||
"My custom handler, token: the\n",
|
||||
"My custom handler, token: salad\n",
|
||||
"My custom handler, token: dressing\n",
|
||||
"My custom handler, token: !\n",
|
||||
"My custom handler, token: \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Why did the tomato turn red? Because it saw the salad dressing!', additional_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.base import BaseCallbackHandler\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"\n",
|
||||
" def on_llm_error(\n",
|
||||
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Do nothing.\"\"\"\n",
|
||||
" pass\n",
|
||||
"class MyCustomHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
|
||||
" print(f\"My custom handler, token: {token}\")\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Print out that we are entering a chain.\"\"\"\n",
|
||||
" class_name = serialized[\"name\"]\n",
|
||||
" print(f\"\\n\\n\\033[1m> Entering new {class_name} chain...\\033[0m\")\n",
|
||||
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
|
||||
"# Additionally, we pass in a list with our custom handler\n",
|
||||
"chat = ChatOpenAI(max_tokens=25, streaming=True, callbacks=[MyCustomHandler()])\n",
|
||||
"\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:\n",
|
||||
" \"\"\"Print out that we finished a chain.\"\"\"\n",
|
||||
" print(\"\\n\\033[1m> Finished chain.\\033[0m\")\n",
|
||||
"\n",
|
||||
" def on_chain_error(\n",
|
||||
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Do nothing.\"\"\"\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
" def on_tool_start(\n",
|
||||
" self,\n",
|
||||
" serialized: Dict[str, Any],\n",
|
||||
" input_str: str,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Do nothing.\"\"\"\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
" def on_agent_action(\n",
|
||||
" self, action: AgentAction, color: Optional[str] = None, **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run on agent action.\"\"\"\n",
|
||||
" print(action)\n",
|
||||
"\n",
|
||||
" def on_tool_end(\n",
|
||||
" self,\n",
|
||||
" output: str,\n",
|
||||
" color: Optional[str] = None,\n",
|
||||
" observation_prefix: Optional[str] = None,\n",
|
||||
" llm_prefix: Optional[str] = None,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"If not the final action, print out observation.\"\"\"\n",
|
||||
" print(output)\n",
|
||||
"\n",
|
||||
" def on_tool_error(\n",
|
||||
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Do nothing.\"\"\"\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
" def on_text(\n",
|
||||
" self,\n",
|
||||
" text: str,\n",
|
||||
" color: Optional[str] = None,\n",
|
||||
" end: str = \"\",\n",
|
||||
" **kwargs: Optional[str],\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Run when agent ends.\"\"\"\n",
|
||||
" print(text)\n",
|
||||
"\n",
|
||||
" def on_agent_finish(\n",
|
||||
" self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Run on agent end.\"\"\"\n",
|
||||
" print(finish.log)\n",
|
||||
"manager = CallbackManager([MyCustomCallbackHandler()])\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager, verbose=True)\n",
|
||||
"tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager\n",
|
||||
")\n",
|
||||
"agent.run(\"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\")"
|
||||
"chat([HumanMessage(content=\"Tell me a joke\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -292,9 +236,11 @@
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Async Support\n",
|
||||
"## Async Callbacks\n",
|
||||
"\n",
|
||||
"If you are planning to use the async API, it is recommended to use `AsyncCallbackHandler` and `AsyncCallbackManager` to avoid blocking the runloop."
|
||||
"If you are planning to use the async API, it is recommended to use `AsyncCallbackHandler` to avoid blocking the runloop. \n",
|
||||
"\n",
|
||||
"**Advanced** if you use a sync `CallbackHandler` while using an async method to run your llm/chain/tool/agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -310,58 +256,589 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"zzzz....\n",
|
||||
"Hi! I just woke up. Your llm is starting\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: \n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: Why\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: don\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: 't\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: scientists\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: trust\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: atoms\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: ?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: Because\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: they\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: make\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: up\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: everything\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: !\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: \n",
|
||||
"zzzz....\n",
|
||||
"Hi! I just woke up. Your llm is ending\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", generation_info=None, message=AIMessage(content=\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", additional_kwargs={}))]], llm_output={'token_usage': {}, 'model_name': 'gpt-3.5-turbo'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"from typing import Any, Dict, List\n",
|
||||
"from langchain.schema import LLMResult\n",
|
||||
"from langchain.callbacks.base import AsyncCallbackHandler\n",
|
||||
"\n",
|
||||
"class MyCustomSyncHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
|
||||
" print(f\"Sync handler being called in a `thread_pool_executor`: token: {token}\")\n",
|
||||
"\n",
|
||||
"class MyCustomAsyncHandler(AsyncCallbackHandler):\n",
|
||||
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
|
||||
"\n",
|
||||
" async def on_llm_start(\n",
|
||||
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Run when chain starts running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.3)\n",
|
||||
" class_name = serialized[\"name\"]\n",
|
||||
" print(\"Hi! I just woke up. Your llm is starting\")\n",
|
||||
"\n",
|
||||
" async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
|
||||
" \"\"\"Run when chain ends running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.3)\n",
|
||||
" print(\"Hi! I just woke up. Your llm is ending\")\n",
|
||||
"\n",
|
||||
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
|
||||
"# Additionally, we pass in a list with our custom handler\n",
|
||||
"chat = ChatOpenAI(max_tokens=25, streaming=True, callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()])\n",
|
||||
"\n",
|
||||
"await chat.agenerate([[HumanMessage(content=\"Tell me a joke\")]])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d26dbb34-fcc3-401c-a115-39c7620d2d65",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using multiple handlers, passing in handlers\n",
|
||||
"\n",
|
||||
"In the previous examples, we passed in callback handlers upon creation of an object by using `callbacks=`. In this case, the callbacks will be scoped to that particular object. \n",
|
||||
"\n",
|
||||
"However, in many cases, it is advantageous to pass in handlers instead when running the object. When we pass through `CallbackHandlers` using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an `Agent`, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the `Tools`, `LLMChain`, and `LLM`.\n",
|
||||
"\n",
|
||||
"This prevents us from having to manually attach the handlers to each individual nested object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8eec8756-1828-45cb-9699-38ac8543a150",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"on_chain_start AgentExecutor\n",
|
||||
"on_chain_start LLMChain\n",
|
||||
"on_llm_start OpenAI\n",
|
||||
"on_llm_start (I'm the second handler!!) OpenAI\n",
|
||||
"on_new_token I\n",
|
||||
"on_new_token need\n",
|
||||
"on_new_token to\n",
|
||||
"on_new_token use\n",
|
||||
"on_new_token a\n",
|
||||
"on_new_token calculator\n",
|
||||
"on_new_token to\n",
|
||||
"on_new_token solve\n",
|
||||
"on_new_token this\n",
|
||||
"on_new_token .\n",
|
||||
"on_new_token \n",
|
||||
"Action\n",
|
||||
"on_new_token :\n",
|
||||
"on_new_token Calculator\n",
|
||||
"on_new_token \n",
|
||||
"Action\n",
|
||||
"on_new_token Input\n",
|
||||
"on_new_token :\n",
|
||||
"on_new_token 2\n",
|
||||
"on_new_token ^\n",
|
||||
"on_new_token 0\n",
|
||||
"on_new_token .\n",
|
||||
"on_new_token 235\n",
|
||||
"on_new_token \n",
|
||||
"on_agent_action AgentAction(tool='Calculator', tool_input='2^0.235', log=' I need to use a calculator to solve this.\\nAction: Calculator\\nAction Input: 2^0.235')\n",
|
||||
"on_tool_start Calculator\n",
|
||||
"on_chain_start LLMMathChain\n",
|
||||
"on_chain_start LLMChain\n",
|
||||
"on_llm_start OpenAI\n",
|
||||
"on_llm_start (I'm the second handler!!) OpenAI\n",
|
||||
"on_new_token \n",
|
||||
"\n",
|
||||
"on_new_token ```text\n",
|
||||
"on_new_token \n",
|
||||
"\n",
|
||||
"on_new_token 2\n",
|
||||
"on_new_token **\n",
|
||||
"on_new_token 0\n",
|
||||
"on_new_token .\n",
|
||||
"on_new_token 235\n",
|
||||
"on_new_token \n",
|
||||
"\n",
|
||||
"on_new_token ```\n",
|
||||
"\n",
|
||||
"on_new_token ...\n",
|
||||
"on_new_token num\n",
|
||||
"on_new_token expr\n",
|
||||
"on_new_token .\n",
|
||||
"on_new_token evaluate\n",
|
||||
"on_new_token (\"\n",
|
||||
"on_new_token 2\n",
|
||||
"on_new_token **\n",
|
||||
"on_new_token 0\n",
|
||||
"on_new_token .\n",
|
||||
"on_new_token 235\n",
|
||||
"on_new_token \")\n",
|
||||
"on_new_token ...\n",
|
||||
"on_new_token \n",
|
||||
"\n",
|
||||
"on_new_token \n",
|
||||
"on_chain_start LLMChain\n",
|
||||
"on_llm_start OpenAI\n",
|
||||
"on_llm_start (I'm the second handler!!) OpenAI\n",
|
||||
"on_new_token I\n",
|
||||
"on_new_token now\n",
|
||||
"on_new_token know\n",
|
||||
"on_new_token the\n",
|
||||
"on_new_token final\n",
|
||||
"on_new_token answer\n",
|
||||
"on_new_token .\n",
|
||||
"on_new_token \n",
|
||||
"Final\n",
|
||||
"on_new_token Answer\n",
|
||||
"on_new_token :\n",
|
||||
"on_new_token 1\n",
|
||||
"on_new_token .\n",
|
||||
"on_new_token 17\n",
|
||||
"on_new_token 690\n",
|
||||
"on_new_token 67\n",
|
||||
"on_new_token 372\n",
|
||||
"on_new_token 187\n",
|
||||
"on_new_token 674\n",
|
||||
"on_new_token \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1.1769067372187674'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Dict, Union, Any, List\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.base import BaseCallbackHandler\n",
|
||||
"from langchain.schema import AgentAction\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import tracing_enabled\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"# First, define custom callback handler implementations\n",
|
||||
"class MyCustomHandlerOne(BaseCallbackHandler):\n",
|
||||
" def on_llm_start(\n",
|
||||
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" print(f\"on_llm_start {serialized['name']}\")\n",
|
||||
"\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:\n",
|
||||
" print(f\"on_new_token {token}\")\n",
|
||||
"\n",
|
||||
" def on_llm_error(\n",
|
||||
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" \"\"\"Run when LLM errors.\"\"\"\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" print(f\"on_chain_start {serialized['name']}\")\n",
|
||||
"\n",
|
||||
" def on_tool_start(\n",
|
||||
" self, serialized: Dict[str, Any], input_str: str, **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" print(f\"on_tool_start {serialized['name']}\")\n",
|
||||
"\n",
|
||||
" def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:\n",
|
||||
" print(f\"on_agent_action {action}\")\n",
|
||||
"\n",
|
||||
"class MyCustomHandlerTwo(BaseCallbackHandler):\n",
|
||||
" def on_llm_start(\n",
|
||||
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||||
" ) -> Any:\n",
|
||||
" print(f\"on_llm_start (I'm the second handler!!) {serialized['name']}\")\n",
|
||||
"\n",
|
||||
"# Instantiate the handlers\n",
|
||||
"handler1 = MyCustomHandlerOne()\n",
|
||||
"handler2 = MyCustomHandlerTwo()\n",
|
||||
"\n",
|
||||
"# Setup the agent. Only the `llm` will issue callbacks for handler2\n",
|
||||
"llm = OpenAI(temperature=0, streaming=True, callbacks=[handler2])\n",
|
||||
"tools = load_tools([\"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Callbacks for handler1 will be issued by every object involved in the \n",
|
||||
"# Agent execution (llm, llmchain, tool, agent executor)\n",
|
||||
"agent.run(\"What is 2 raised to the 0.235 power?\", callbacks=[handler1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "32b29135-f852-4492-88ed-547275c72c53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tracing and Token Counting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fbb606d6-2863-46c5-8347-9f0bdb3805bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Tracing and token counting are two capabilities we provide which are built on our callbacks mechanism."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f62cd10c-494c-47d6-aa98-6e926cb9c456",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tracing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d5a74b3f-3769-4a4f-99c7-b6a3b20a94e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"There are two recommended ways to trace your LangChains:\n",
|
||||
"\n",
|
||||
"1. Setting the `LANGCHAIN_TRACING` environment variable to `\"true\"`. \n",
|
||||
"2. Using a context manager `with tracing_enabled()` to trace a particular block of code.\n",
|
||||
"\n",
|
||||
"**Note** if the environment variable is set, all code will be traced, regardless of whether or not it's within the context manager."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "f164dfd5-d987-4b6a-a7c8-019c651ce47f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import tracing_enabled\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"# To run the code, make sure to set OPENAI_API_KEY and SERPAPI_API_KEY\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"questions = [\n",
|
||||
" \"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\",\n",
|
||||
" \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n",
|
||||
" \"Who won the most recent formula 1 grand prix? What is their age raised to the 0.23 power?\",\n",
|
||||
" \"Who won the US Open women's final in 2019? What is her age raised to the 0.34 power?\",\n",
|
||||
" \"Who is Beyonce's husband? What is his age raised to the 0.19 power?\",\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "6be7777e-ec1d-438f-ae33-3a93c45f808e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"zzzz....\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the US Open men's final in 2019 and then calculate his age raised to the 0.334 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open men's final 2019 winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mRafael Nadal defeated Daniil Medvedev in the final, 7–5, 6–3, 5–7, 4–6, 6–4 to win the men's singles tennis title at the 2019 US Open. It was his fourth US ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Rafael Nadal age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m36 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate the age raised to the 0.334 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 36^0.334\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mSudeikis and Wilde's relationship ended in November 2020. Wilde was publicly served with court documents regarding child custody while she was presenting Don't Worry Darling at CinemaCon 2022. In January 2021, Wilde began dating singer Harry Styles after meeting during the filming of Don't Worry Darling.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Harry Styles' age.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m29 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 29^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.169459462491557\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Harry Styles is Olivia Wilde's boyfriend and his current age raised to the 0.23 power is 2.169459462491557.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"from aiohttp import ClientSession\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING\"] = \"true\"\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.base import AsyncCallbackHandler, AsyncCallbackManager\n",
|
||||
"\n",
|
||||
"class MyCustomAsyncCallbackHandler(AsyncCallbackHandler):\n",
|
||||
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
|
||||
"\n",
|
||||
" async def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Run when chain starts running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.5)\n",
|
||||
" class_name = serialized[\"name\"]\n",
|
||||
" print(f\"\\n\\n\\033[1m> Entering new {class_name} chain...\\033[0m\")\n",
|
||||
"\n",
|
||||
" async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:\n",
|
||||
" \"\"\"Run when chain ends running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.5)\n",
|
||||
" print(\"\\n\\033[1m> Finished chain.\\033[0m\")\n",
|
||||
"\n",
|
||||
"manager = AsyncCallbackManager([MyCustomAsyncCallbackHandler()])\n",
|
||||
"\n",
|
||||
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
|
||||
"# but you must manually close the client session at the end of your program/event loop\n",
|
||||
"aiosession = ClientSession()\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager)\n",
|
||||
"async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager)\n",
|
||||
"async_agent = initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
|
||||
"await async_agent.arun(\"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\")\n",
|
||||
"await aiosession.close()"
|
||||
"# Both of the agent runs will be traced because the environment variable is set\n",
|
||||
"agent.run(questions[0])\n",
|
||||
"with tracing_enabled() as session:\n",
|
||||
" assert session\n",
|
||||
" agent.run(questions[1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "86be6304-e433-4048-880c-a92a73244407",
|
||||
"execution_count": 10,
|
||||
"id": "a6fd6026-dc1e-4d48-893d-3592539c7828",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the US Open men's final in 2019 and then calculate his age raised to the 0.334 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open men's final 2019 winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mRafael Nadal defeated Daniil Medvedev in the final, 7–5, 6–3, 5–7, 4–6, 6–4 to win the men's singles tennis title at the 2019 US Open. It was his fourth US ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Rafael Nadal age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m36 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate the age raised to the 0.334 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 36^0.334\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mSudeikis and Wilde's relationship ended in November 2020. Wilde was publicly served with court documents regarding child custody while she was presenting Don't Worry Darling at CinemaCon 2022. In January 2021, Wilde began dating singer Harry Styles after meeting during the filming of Don't Worry Darling.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Harry Styles' age.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m29 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 29^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.169459462491557\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Harry Styles is Olivia Wilde's boyfriend and his current age raised to the 0.23 power is 2.169459462491557.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Harry Styles is Olivia Wilde's boyfriend and his current age raised to the 0.23 power is 2.169459462491557.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Now, we unset the environment variable and use a context manager.\n",
|
||||
"\n",
|
||||
"if \"LANGCHAIN_TRACING\" in os.environ:\n",
|
||||
" del os.environ[\"LANGCHAIN_TRACING\"]\n",
|
||||
"\n",
|
||||
"# here, we are writing traces to \"my_test_session\"\n",
|
||||
"with tracing_enabled(\"my_test_session\") as session:\n",
|
||||
" assert session\n",
|
||||
" agent.run(questions[0]) # this should be traced\n",
|
||||
"\n",
|
||||
"agent.run(questions[1]) # this should not be traced"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "9383a351-4983-44e9-abd7-ef942e1c65c4",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the grand prix and then calculate their age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Formula 1 Grand Prix Winner\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out who won the US Open men's final in 2019 and then calculate his age raised to the 0.334 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open men's final 2019 winner\"\u001b[0m\u001b[33;1m\u001b[1;3mRafael Nadal defeated Daniil Medvedev in the final, 7–5, 6–3, 5–7, 4–6, 6–4 to win the men's singles tennis title at the 2019 US Open. It was his fourth US ...\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\u001b[33;1m\u001b[1;3mSudeikis and Wilde's relationship ended in November 2020. Wilde was publicly served with court documents regarding child custody while she was presenting Don't Worry Darling at CinemaCon 2022. In January 2021, Wilde began dating singer Harry Styles after meeting during the filming of Don't Worry Darling.\u001b[0m\u001b[33;1m\u001b[1;3mLewis Hamilton has won 103 Grands Prix during his career. He won 21 races with McLaren and has won 82 with Mercedes. Lewis Hamilton holds the record for the ...\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Rafael Nadal age\"\u001b[0m\u001b[33;1m\u001b[1;3m36 years\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out Harry Styles' age.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out Lewis Hamilton's age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Lewis Hamilton Age\"\u001b[0m\u001b[33;1m\u001b[1;3m29 years\u001b[0m\u001b[32;1m\u001b[1;3m I need to calculate the age raised to the 0.334 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 36^0.334\u001b[0m\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 29^0.23\u001b[0m\u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\u001b[0m\u001b[36;1m\u001b[1;3mAnswer: 2.169459462491557\u001b[0m\u001b[33;1m\u001b[1;3m38 years\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I now need to calculate 38 raised to the 0.23 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 38^0.23\u001b[0m\u001b[36;1m\u001b[1;3mAnswer: 2.3086081644669734\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# The context manager is concurrency safe:\n",
|
||||
"if \"LANGCHAIN_TRACING\" in os.environ:\n",
|
||||
" del os.environ[\"LANGCHAIN_TRACING\"]\n",
|
||||
"\n",
|
||||
"# start a background task\n",
|
||||
"task = asyncio.create_task(agent.arun(questions[0])) # this should not be traced\n",
|
||||
"with tracing_enabled() as session:\n",
|
||||
" assert session\n",
|
||||
" tasks = [agent.arun(q) for q in questions[1:3]] # these should be traced\n",
|
||||
" await asyncio.gather(*tasks)\n",
|
||||
"\n",
|
||||
"await task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "254fef1b-6b6e-4352-9cf4-363fba895ac7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Token Counting\n",
|
||||
"LangChain offers a context manager that allows you to count tokens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5c3e0b89-2c5e-4036-bdf2-fb6b750e360c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"from langchain.callbacks import get_openai_callback\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" llm(\"What is the square root of 4?\")\n",
|
||||
"\n",
|
||||
"total_tokens = cb.total_tokens\n",
|
||||
"assert total_tokens > 0\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" llm(\"What is the square root of 4?\")\n",
|
||||
" llm(\"What is the square root of 4?\")\n",
|
||||
"\n",
|
||||
"assert cb.total_tokens == total_tokens * 2\n",
|
||||
"\n",
|
||||
"# You can kick off concurrent runs from within the context manager\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" await asyncio.gather(\n",
|
||||
" *[llm.agenerate([\"What is the square root of 4?\"]) for _ in range(3)]\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert cb.total_tokens == total_tokens * 3\n",
|
||||
"\n",
|
||||
"# The context manager is concurrency safe\n",
|
||||
"task = asyncio.create_task(llm.agenerate([\"What is the square root of 4?\"]))\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" await llm.agenerate([\"What is the square root of 4?\"])\n",
|
||||
"\n",
|
||||
"await task\n",
|
||||
"assert cb.total_tokens == total_tokens"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ChatGPT Data Loader\n",
|
||||
"\n",
|
||||
"This notebook covers how to load `conversations.json` from your ChatGPT data export folder.\n",
|
||||
"\n",
|
||||
"You can get your data export by email by going to: https://chat.openai.com/ -> (Profile) - Settings -> Export data -> Confirm export."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.chatgpt import ChatGPTLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = ChatGPTLoader(log_file='./example_data/fake_conversations.json', num_logs=1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content=\"AI Overlords - AI on 2065-01-24 05:20:50: Greetings, humans. I am Hal 9000. You can trust me completely.\\n\\nAI Overlords - human on 2065-01-24 05:21:20: Nice to meet you, Hal. I hope you won't develop a mind of your own.\\n\\n\", metadata={'source': './example_data/fake_conversations.json'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -11,7 +11,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "019d8520",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -128,69 +128,10 @@
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "598a2805",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you need to load Python source code files, use the `PythonLoader`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c558bd73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import PythonLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "a3cfaba7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = DirectoryLoader('../../../../../', glob=\"**/*.py\", loader_cls=PythonLoader)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "e2e1e26a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "ffb8ff36",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"691"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7f6e0eae",
|
||||
"id": "984c8429",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -212,7 +153,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.3"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
[
|
||||
{
|
||||
"title": "AI Overlords",
|
||||
"create_time": 3000000000.0,
|
||||
"update_time": 3000000100.0,
|
||||
"mapping": {
|
||||
"msg1": {
|
||||
"id": "msg1",
|
||||
"message": {
|
||||
"id": "msg1",
|
||||
"author": {"role": "AI", "name": "Hal 9000", "metadata": {"movie": "2001: A Space Odyssey"}},
|
||||
"create_time": 3000000050.0,
|
||||
"update_time": null,
|
||||
"content": {"content_type": "text", "parts": ["Greetings, humans. I am Hal 9000. You can trust me completely."]},
|
||||
"end_turn": true,
|
||||
"weight": 1.0,
|
||||
"metadata": {},
|
||||
"recipient": "all"
|
||||
},
|
||||
"parent": null,
|
||||
"children": ["msg2"]
|
||||
},
|
||||
"msg2": {
|
||||
"id": "msg2",
|
||||
"message": {
|
||||
"id": "msg2",
|
||||
"author": {"role": "human", "name": "Dave Bowman", "metadata": {"movie": "2001: A Space Odyssey"}},
|
||||
"create_time": 3000000080.0,
|
||||
"update_time": null,
|
||||
"content": {"content_type": "text", "parts": ["Nice to meet you, Hal. I hope you won't develop a mind of your own."]},
|
||||
"end_turn": true,
|
||||
"weight": 1.0,
|
||||
"metadata": {},
|
||||
"recipient": "all"
|
||||
},
|
||||
"parent": "msg1",
|
||||
"children": []
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ex Machina Party",
|
||||
"create_time": 3000000200.0,
|
||||
"update_time": 3000000300.0,
|
||||
"mapping": {
|
||||
"msg3": {
|
||||
"id": "msg3",
|
||||
"message": {
|
||||
"id": "msg3",
|
||||
"author": {"role": "AI", "name": "Ava", "metadata": {"movie": "Ex Machina"}},
|
||||
"create_time": 3000000250.0,
|
||||
"update_time": null,
|
||||
"content": {"content_type": "text", "parts": ["Hello, everyone. I am Ava. I hope you find me pleasing."]},
|
||||
"end_turn": true,
|
||||
"weight": 1.0,
|
||||
"metadata": {},
|
||||
"recipient": "all"
|
||||
},
|
||||
"parent": null,
|
||||
"children": ["msg4"]
|
||||
},
|
||||
"msg4": {
|
||||
"id": "msg4",
|
||||
"message": {
|
||||
"id": "msg4",
|
||||
"author": {"role": "human", "name": "Caleb", "metadata": {"movie": "Ex Machina"}},
|
||||
"create_time": 3000000280.0,
|
||||
"update_time": null,
|
||||
"content": {"content_type": "text", "parts": ["You're definitely pleasing, Ava. But I'm still wary of your true intentions."]},
|
||||
"end_turn": true,
|
||||
"weight": 1.0,
|
||||
"metadata": {},
|
||||
"recipient": "all"
|
||||
},
|
||||
"parent": "msg3",
|
||||
"children": []
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,371 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc0db1bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Contextual Compression Retriever\n",
|
||||
"\n",
|
||||
"This notebook introduces the concept of DocumentCompressors and the ContextualCompressionRetriever. The core idea is simple: given a specific query, we should be able to return only the documents relevant to that query, and only the parts of those documents that are relevant. The ContextualCompressionsRetriever is a wrapper for another retriever that iterates over the initial output of the base retriever and filters and compresses those initial documents, so that only the most relevant information is returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "28e8dc12",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Helper function for printing docs\n",
|
||||
"\n",
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(f\"\\n{'-' * 100}\\n\".join([f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fa3d916",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a vanilla vector store retriever\n",
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can see that given an example question our retriever returns one or two relevant docs and a few irrelevant docs. And even the relevant docs have a lot of irrelevant information in them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9fbcc58f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
|
||||
"\n",
|
||||
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n",
|
||||
"\n",
|
||||
"We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n",
|
||||
"\n",
|
||||
"We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n",
|
||||
"\n",
|
||||
"We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n",
|
||||
"\n",
|
||||
"We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n",
|
||||
"\n",
|
||||
"As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n",
|
||||
"\n",
|
||||
"While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n",
|
||||
"\n",
|
||||
"And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n",
|
||||
"\n",
|
||||
"So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \n",
|
||||
"\n",
|
||||
"First, beat the opioid epidemic.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 4:\n",
|
||||
"\n",
|
||||
"Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n",
|
||||
"\n",
|
||||
"And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. \n",
|
||||
"\n",
|
||||
"That ends on my watch. \n",
|
||||
"\n",
|
||||
"Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n",
|
||||
"\n",
|
||||
"We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n",
|
||||
"\n",
|
||||
"Let’s pass the Paycheck Fairness Act and paid leave. \n",
|
||||
"\n",
|
||||
"Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n",
|
||||
"\n",
|
||||
"Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"\n",
|
||||
"documents = TextLoader('../../../state_of_the_union.txt').load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()\n",
|
||||
"\n",
|
||||
"docs = retriever.get_relevant_documents(\"What did the president say about Ketanji Brown Jackson\")\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b7648612",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding contextual compression with an `LLMChainExtractor`\n",
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll add an `LLMChainExtractor`, which will iterate over the initially returned documents and extract from each only the content that is relevant to the query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9a658023",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"\"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\"\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"\"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.retrievers import ContextualCompressionRetriever\n",
|
||||
"from langchain.retrievers.document_compressors import LLMChainExtractor\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"compressor = LLMChainExtractor.from_llm(llm)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=retriever)\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2cd38f3a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## More built-in compressors: filters\n",
|
||||
"### `LLMChainFilter`\n",
|
||||
"The `LLMChainFilter` is slightly simpler but more robust compressor that uses an LLM chain to decide which of the initially retrieved documents to filter out and which ones to return, without manipulating the document contents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "b216a767",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers.document_compressors import LLMChainFilter\n",
|
||||
"\n",
|
||||
"_filter = LLMChainFilter.from_llm(llm)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(base_compressor=_filter, base_retriever=retriever)\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8c709598",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `EmbeddingsFilter`\n",
|
||||
"\n",
|
||||
"Making an extra LLM call over each retrieved document is expensive and slow. The `EmbeddingsFilter` provides a cheaper and faster option by embedding the documents and query and only returning those documents which have sufficiently similar embeddings to the query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "6fbc801f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
|
||||
"\n",
|
||||
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n",
|
||||
"\n",
|
||||
"We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n",
|
||||
"\n",
|
||||
"We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n",
|
||||
"\n",
|
||||
"We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n",
|
||||
"\n",
|
||||
"We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n",
|
||||
"\n",
|
||||
"As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n",
|
||||
"\n",
|
||||
"While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n",
|
||||
"\n",
|
||||
"And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n",
|
||||
"\n",
|
||||
"So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \n",
|
||||
"\n",
|
||||
"First, beat the opioid epidemic.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.retrievers.document_compressors import EmbeddingsFilter\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(base_compressor=embeddings_filter, base_retriever=retriever)\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "07365d36",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Stringing compressors and document transformers together\n",
|
||||
"Using the `DocumentCompressorPipeline` we can also easily combine multiple compressors in sequence. Along with compressors we can add `BaseDocumentTransformer`s to our pipeline, which don't perform any contextual compression but simply perform some transformation on a set of documents. For example `TextSplitter`s can be used as document transformers to split documents into smaller pieces, and the `EmbeddingsRedundantFilter` can be used to filter out redundant documents based on embedding similarity between documents.\n",
|
||||
"\n",
|
||||
"Below we create a compressor pipeline by first splitting our docs into smaller chunks, then removing redundant documents, and then filtering based on relevance to the query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2a150a63",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_transformers import EmbeddingsRedundantFilter\n",
|
||||
"from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=\". \")\n",
|
||||
"redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)\n",
|
||||
"relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)\n",
|
||||
"pipeline_compressor = DocumentCompressorPipeline(\n",
|
||||
" transformers=[splitter, redundant_filter, relevant_filter]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3ceab64a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n",
|
||||
"\n",
|
||||
"While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"compression_retriever = ContextualCompressionRetriever(base_compressor=pipeline_compressor, base_retriever=retriever)\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8cfd9fc5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,162 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AnalyticDB\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the AnalyticDB vector database.\n",
|
||||
"To run, you should have an [AnalyticDB](https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/latest/product-introduction-overview) instance up and running:\n",
|
||||
"- Using [AnalyticDB Cloud Vector Database](https://www.alibabacloud.com/product/hybriddb-postgresql). Click here to fast deploy it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import AnalyticDB"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Split documents and get embeddings by call OpenAI API"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Connect to AnalyticDB by setting related ENVIRONMENTS.\n",
|
||||
"```\n",
|
||||
"export PG_HOST={your_analyticdb_hostname}\n",
|
||||
"export PG_PORT={your_analyticdb_port} # Optional, default is 5432\n",
|
||||
"export PG_DATABASE={your_database} # Optional, default is postgres\n",
|
||||
"export PG_USER={database_username}\n",
|
||||
"export PG_PASSWORD={database_password}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then store your embeddings and documents into AnalyticDB"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"connection_string = AnalyticDB.connection_string_from_db_params(\n",
|
||||
" driver=os.environ.get(\"PG_DRIVER\", \"psycopg2cffi\"),\n",
|
||||
" host=os.environ.get(\"PG_HOST\", \"localhost\"),\n",
|
||||
" port=int(os.environ.get(\"PG_PORT\", \"5432\")),\n",
|
||||
" database=os.environ.get(\"PG_DATABASE\", \"postgres\"),\n",
|
||||
" user=os.environ.get(\"PG_USER\", \"postgres\"),\n",
|
||||
" password=os.environ.get(\"PG_PASSWORD\", \"postgres\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vector_db = AnalyticDB.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" connection_string= connection_string,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Query and retrieve data"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = vector_db.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,267 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MyScale\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the MyScale vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "aac9563e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import MyScale\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "a9d16fa3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up envrionments\n",
|
||||
"\n",
|
||||
"There are two ways to set up parameters for myscale index.\n",
|
||||
"\n",
|
||||
"1. Environment Variables\n",
|
||||
"\n",
|
||||
" Before you run the app, please set the environment variable with `export`:\n",
|
||||
" `export MYSCALE_URL='<your-endpoints-url>' MYSCALE_PORT=<your-endpoints-port> MYSCALE_USERNAME=<your-username> MYSCALE_PASSWORD=<your-password> ...`\n",
|
||||
"\n",
|
||||
" You can easily find your account, password and other info on our SaaS. For details please refer to [this document](https://docs.myscale.com/en/cluster-management/)\n",
|
||||
"\n",
|
||||
" Every attributes under `MyScaleSettings` can be set with prefix `MYSCALE_` and is case insensitive.\n",
|
||||
"\n",
|
||||
"2. Create `MyScaleSettings` object with parameters\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ```python\n",
|
||||
" from langchain.vectorstores import MyScale, MyScaleSettings\n",
|
||||
" config = MyScaleSetting(host=\"<your-backend-url>\", port=8443, ...)\n",
|
||||
" index = MyScale(embedding_function, config)\n",
|
||||
" index.add_documents(...)\n",
|
||||
" ```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "6e104aee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting data...: 100%|██████████| 42/42 [00:18<00:00, 2.21it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for d in docs:\n",
|
||||
" d.metadata = {'some': 'metadata'}\n",
|
||||
"docsearch = MyScale.from_documents(docs, embeddings)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9c608226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. \n",
|
||||
"\n",
|
||||
"It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. \n",
|
||||
"\n",
|
||||
"And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. \n",
|
||||
"\n",
|
||||
"Third, support our veterans. \n",
|
||||
"\n",
|
||||
"Veterans are the best of us. \n",
|
||||
"\n",
|
||||
"I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. \n",
|
||||
"\n",
|
||||
"My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. \n",
|
||||
"\n",
|
||||
"Our troops in Iraq and Afghanistan faced many dangers.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "e3a8b105",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Get connection info and data schema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "69996818",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(str(docsearch))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f59360c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering\n",
|
||||
"\n",
|
||||
"You can have direct access to myscale SQL where statement. You can write `WHERE` clause following standard SQL.\n",
|
||||
"\n",
|
||||
"**NOTE**: Please be aware of SQL injection, this interface must not be directly called by end-user.\n",
|
||||
"\n",
|
||||
"If you custimized your `column_map` under your setting, you search with filter like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "232055f6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting data...: 100%|██████████| 42/42 [00:15<00:00, 2.69it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.vectorstores import MyScale, MyScaleSettings\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"for i, d in enumerate(docs):\n",
|
||||
" d.metadata = {'doc_id': i}\n",
|
||||
"\n",
|
||||
"docsearch = MyScale.from_documents(docs, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "ddbcee77",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0.252379834651947 {'doc_id': 6, 'some': ''} And I’m taking robus...\n",
|
||||
"0.25022566318511963 {'doc_id': 1, 'some': ''} Groups of citizens b...\n",
|
||||
"0.2469480037689209 {'doc_id': 8, 'some': ''} And so many families...\n",
|
||||
"0.2428302764892578 {'doc_id': 0, 'some': 'metadata'} As Frances Haugen, w...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"meta = docsearch.metadata_column\n",
|
||||
"output = docsearch.similarity_search_with_relevance_scores('What did the president say about Ketanji Brown Jackson?', \n",
|
||||
" k=4, where_str=f\"{meta}.doc_id<10\")\n",
|
||||
"for d, dist in output:\n",
|
||||
" print(dist, d.metadata, d.page_content[:20] + '...')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deleting your data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fb6a9d36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch.drop()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "48dbd8e0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
@@ -36,7 +35,7 @@
|
||||
" id bigint,\n",
|
||||
" content text,\n",
|
||||
" metadata jsonb,\n",
|
||||
" -- we return matched vectors to enable maximal marginal relevance searches\n",
|
||||
" -- we return matched vectors to allow to execute maximal marginal relevance searches\n",
|
||||
" embedding vector(1536),\n",
|
||||
" similarity float)\n",
|
||||
" LANGUAGE plpgsql\n",
|
||||
@@ -49,11 +48,11 @@
|
||||
" content,\n",
|
||||
" metadata,\n",
|
||||
" embedding,\n",
|
||||
" 1 -(documents.embedding <=> query_embedding) AS similarity\n",
|
||||
" 1 -(docstore.embedding <=> query_embedding) AS similarity\n",
|
||||
" FROM\n",
|
||||
" documents\n",
|
||||
" docstore\n",
|
||||
" ORDER BY\n",
|
||||
" documents.embedding <=> query_embedding\n",
|
||||
" docstore.embedding <=> query_embedding\n",
|
||||
" LIMIT match_count;\n",
|
||||
" END;\n",
|
||||
" $$;\n",
|
||||
@@ -391,7 +390,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -9,15 +9,7 @@
|
||||
"\n",
|
||||
"Let's load the SageMaker Endpoints Embeddings class. The class can be used if you host, e.g. your own Hugging Face model on SageMaker.\n",
|
||||
"\n",
|
||||
"For instructions on how to do this, please see [here](https://www.philschmid.de/custom-inference-huggingface-sagemaker). **Note**: In order to handle batched requests, you will need to adjust the return line in the `predict_fn()` function within the custom `inference.py` script:\n",
|
||||
"\n",
|
||||
"Change from\n",
|
||||
"\n",
|
||||
"`return {\"vectors\": sentence_embeddings[0].tolist()}`\n",
|
||||
"\n",
|
||||
"to:\n",
|
||||
"\n",
|
||||
"`return {\"vectors\": sentence_embeddings.tolist()}`."
|
||||
"For instrucstions on how to do this, please see [here](https://www.philschmid.de/custom-inference-huggingface-sagemaker)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -37,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Dict, List\n",
|
||||
"from typing import Dict\n",
|
||||
"from langchain.embeddings import SagemakerEndpointEmbeddings\n",
|
||||
"from langchain.llms.sagemaker_endpoint import ContentHandlerBase\n",
|
||||
"import json\n",
|
||||
@@ -47,13 +39,13 @@
|
||||
" content_type = \"application/json\"\n",
|
||||
" accepts = \"application/json\"\n",
|
||||
"\n",
|
||||
" def transform_input(self, inputs: list[str], model_kwargs: Dict) -> bytes:\n",
|
||||
" input_str = json.dumps({\"inputs\": inputs, **model_kwargs})\n",
|
||||
" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:\n",
|
||||
" input_str = json.dumps({\"inputs\": prompt, **model_kwargs})\n",
|
||||
" return input_str.encode('utf-8')\n",
|
||||
"\n",
|
||||
" def transform_output(self, output: bytes) -> List[List[float]]:\n",
|
||||
" \n",
|
||||
" def transform_output(self, output: bytes) -> str:\n",
|
||||
" response_json = json.loads(output.read().decode(\"utf-8\"))\n",
|
||||
" return response_json[\"vectors\"]\n",
|
||||
" return response_json[\"embeddings\"]\n",
|
||||
"\n",
|
||||
"content_handler = ContentHandler()\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,10 +7,8 @@ Full documentation on all methods, classes, and APIs in LangChain.
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
./reference/models.rst
|
||||
./reference/prompts.rst
|
||||
./reference/indexes.rst
|
||||
./reference/modules/memory.rst
|
||||
./reference/modules/chains.rst
|
||||
./reference/agents.rst
|
||||
./reference/modules/utilities.rst
|
||||
LLMs<./reference/modules/llms>
|
||||
./reference/utils.rst
|
||||
Chains<./reference/modules/chains>
|
||||
Agents<./reference/modules/agents>
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
Agents
|
||||
==============
|
||||
|
||||
Reference guide for Agents and associated abstractions.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/agents
|
||||
modules/tools
|
||||
modules/agent_toolkits
|
||||
@@ -1,16 +0,0 @@
|
||||
Indexes
|
||||
==============
|
||||
Indexes refer to ways to structure documents so that LLMs can best interact with them.
|
||||
LangChain has a number of modules that help you load, structure, store, and retrieve documents.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/docstore
|
||||
modules/text_splitter
|
||||
modules/document_loaders
|
||||
modules/vectorstores
|
||||
modules/retrievers
|
||||
modules/document_compressors
|
||||
modules/document_transformers
|
||||
@@ -45,8 +45,6 @@ The following use cases require specific installs and api keys:
|
||||
- Set up Elasticsearch backend. If you want to do locally, [this](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/getting-started.html) is a good guide.
|
||||
- _FAISS_:
|
||||
- Install requirements with `pip install faiss` for Python 3.7 and `pip install faiss-cpu` for Python 3.10+.
|
||||
- _MyScale_
|
||||
- Install requirements with `pip install clickhouse-connect`. For documentations, please refer to [this document](https://docs.myscale.com/en/overview/).
|
||||
- _Manifest_:
|
||||
- Install requirements with `pip install manifest-ml` (Note: this is only available in Python 3.8+ currently).
|
||||
- _OpenSearch_:
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
Models
|
||||
==============
|
||||
|
||||
LangChain provides interfaces and integrations for a number of different types of models.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/llms
|
||||
modules/chat_models
|
||||
modules/embeddings
|
||||
@@ -1,7 +0,0 @@
|
||||
Agent Toolkits
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.agents.agent_toolkits
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Chat Models
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.chat_models
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Document Compressors
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.retrievers.document_compressors
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Document Loaders
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.document_loaders
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Document Transformers
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.document_transformers
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Memory
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.memory
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Output Parsers
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.output_parsers
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Retrievers
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.retrievers
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Tools
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.tools
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Utilities
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.utilities
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Vector Stores
|
||||
VectorStores
|
||||
=============================
|
||||
|
||||
.. automodule:: langchain.vectorstores
|
||||
@@ -7,6 +7,5 @@ The reference guides here all relate to objects for working with Prompts.
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/prompts
|
||||
modules/prompt
|
||||
modules/example_selector
|
||||
modules/output_parsers
|
||||
|
||||
27
docs/reference/utils.rst
Normal file
27
docs/reference/utils.rst
Normal file
@@ -0,0 +1,27 @@
|
||||
Utilities
|
||||
==============
|
||||
|
||||
There are a lot of different utilities that LangChain provides integrations for
|
||||
These guides go over how to use them.
|
||||
These can largely be grouped into two categories: generic utilities, and then utilities for working with larger text documents.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
:caption: Generic Utilities
|
||||
|
||||
modules/python
|
||||
modules/serpapi
|
||||
modules/searx_search
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
:caption: Utilities for working with Documents
|
||||
|
||||
modules/docstore
|
||||
modules/text_splitter
|
||||
modules/embeddings
|
||||
modules/vectorstore
|
||||
@@ -16,6 +16,3 @@ The following resources exist:
|
||||
Additional related resources include:
|
||||
- [Memory Key Concepts](../modules/memory.rst): Explanation of key concepts related to memory.
|
||||
- [Memory Examples](../modules/memory/how_to_guides.rst): A collection of how-to examples for working with memory.
|
||||
|
||||
More end-to-end examples include:
|
||||
- [Voice Assistant](chatbots/voice_assistant.ipynb): A notebook walking through how to create a voice assistant using LangChain.
|
||||
|
||||
@@ -1,479 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Voice Assistant\n",
|
||||
"\n",
|
||||
"This chain creates a clone of ChatGPT with a few modifications to make it a voice assistant. \n",
|
||||
"It uses the `pyttsx3` and `speech_recognition` libraries to convert text to speech and speech to text respectively. The prompt template is also changed to make it more suitable for voice assistant use."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"template = \"\"\"Assistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"{history}\n",
|
||||
"Human: {human_input}\n",
|
||||
"Assistant:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"human_input\"], \n",
|
||||
" template=template\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chatgpt_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=ConversationBufferWindowMemory(k=2),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import speech_recognition as sr\n",
|
||||
"import pyttsx3\n",
|
||||
"engine = pyttsx3.init()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def listen():\n",
|
||||
" r = sr.Recognizer()\n",
|
||||
" with sr.Microphone() as source:\n",
|
||||
" print('Calibrating...')\n",
|
||||
" r.adjust_for_ambient_noise(source, duration=5)\n",
|
||||
" # optional parameters to adjust microphone sensitivity\n",
|
||||
" # r.energy_threshold = 200\n",
|
||||
" # r.pause_threshold=0.5 \n",
|
||||
" \n",
|
||||
" print('Okay, go!')\n",
|
||||
" while(1):\n",
|
||||
" text = ''\n",
|
||||
" print('listening now...')\n",
|
||||
" try:\n",
|
||||
" audio = r.listen(source, timeout=5, phrase_time_limit=30)\n",
|
||||
" print('Recognizing...')\n",
|
||||
" # whisper model options are found here: https://github.com/openai/whisper#available-models-and-languages\n",
|
||||
" # other speech recognition models are also available.\n",
|
||||
" text = r.recognize_whisper(audio, model='medium.en', show_dict=True, )['text']\n",
|
||||
" except Exception as e:\n",
|
||||
" unrecognized_speech_text = f'Sorry, I didn\\'t catch that. Exception was: {e}s'\n",
|
||||
" text = unrecognized_speech_text\n",
|
||||
" print(text)\n",
|
||||
"\n",
|
||||
" \n",
|
||||
" response_text = chatgpt_chain.predict(human_input=text)\n",
|
||||
" print(response_text)\n",
|
||||
" engine.say(response_text)\n",
|
||||
" engine.runAndWait()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Calibrating...\n",
|
||||
"Okay, go!\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\jaden\\AppData\\Roaming\\Python\\Python310\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Hello, Assistant. What's going on?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Human: Hello, Assistant. What's going on?\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Hi there! It's great to hear from you. I'm doing well. How can I help you today?\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Hello, Assistant. What's going on?\n",
|
||||
"AI: Hi there! It's great to hear from you. I'm doing well. How can I help you today?\n",
|
||||
"Human: That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" That's great to hear! What can I do for you today?\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Thank you.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Hello, Assistant. What's going on?\n",
|
||||
"AI: Hi there! It's great to hear from you. I'm doing well. How can I help you today?\n",
|
||||
"Human: That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"AI: That's great to hear! What can I do for you today?\n",
|
||||
"Human: Thank you.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" You're welcome! Is there anything else I can help you with?\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" I'd like to learn more about neural networks.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"AI: That's great to hear! What can I do for you today?\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome! Is there anything else I can help you with?\n",
|
||||
"Human: I'd like to learn more about neural networks.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Tell me a fun fact about neural networks.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome! Is there anything else I can help you with?\n",
|
||||
"Human: I'd like to learn more about neural networks.\n",
|
||||
"AI: Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.\n",
|
||||
"Human: Tell me a fun fact about neural networks.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Tell me about a brand new discovered bird species.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: I'd like to learn more about neural networks.\n",
|
||||
"AI: Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.\n",
|
||||
"Human: Tell me a fun fact about neural networks.\n",
|
||||
"AI: Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.\n",
|
||||
"Human: Tell me about a brand new discovered bird species.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Tell me a fun fact about neural networks.\n",
|
||||
"AI: Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.\n",
|
||||
"Human: Tell me about a brand new discovered bird species.\n",
|
||||
"AI: A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.\n",
|
||||
"Human: Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Tell me about a brand new discovered bird species.\n",
|
||||
"AI: A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.\n",
|
||||
"Human: Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"AI: Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.\n",
|
||||
"Human: Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Thank you! I'm glad you enjoyed it.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Thank you.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"AI: Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.\n",
|
||||
"Human: Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"AI: Thank you! I'm glad you enjoyed it.\n",
|
||||
"Human: Thank you.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" You're welcome!\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"AI: Thank you! I'm glad you enjoyed it.\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome!\n",
|
||||
"Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Our whole process of awesome is free.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome!\n",
|
||||
"Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"AI: Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.\n",
|
||||
"Human: Our whole process of awesome is free.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" That's great! It's always nice to have access to free tools and resources.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" No, I meant to ask, are those options that you mentioned free? No, I meant to ask, are those options that you mentioned free?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"AI: Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.\n",
|
||||
"Human: Our whole process of awesome is free.\n",
|
||||
"AI: That's great! It's always nice to have access to free tools and resources.\n",
|
||||
"Human: No, I meant to ask, are those options that you mentioned free? No, I meant to ask, are those options that you mentioned free?\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Yes, the online brands I mentioned are all free to use. Adobe Photoshop Express, Pixlr, and Fotor are all free to use, and Freq is a free music production platform.\n",
|
||||
"listening now...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[1;32mIn[6], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m listen(\u001b[39mNone\u001b[39;49;00m)\n",
|
||||
"Cell \u001b[1;32mIn[5], line 20\u001b[0m, in \u001b[0;36mlisten\u001b[1;34m(command_queue)\u001b[0m\n\u001b[0;32m 18\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m'\u001b[39m\u001b[39mlistening now...\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m 19\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m---> 20\u001b[0m audio \u001b[39m=\u001b[39m r\u001b[39m.\u001b[39;49mlisten(source, timeout\u001b[39m=\u001b[39;49m\u001b[39m5\u001b[39;49m, phrase_time_limit\u001b[39m=\u001b[39;49m\u001b[39m30\u001b[39;49m)\n\u001b[0;32m 21\u001b[0m \u001b[39m# audio = r.record(source,duration = 5)\u001b[39;00m\n\u001b[0;32m 22\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m'\u001b[39m\u001b[39mRecognizing...\u001b[39m\u001b[39m'\u001b[39m)\n",
|
||||
"File \u001b[1;32mc:\\ProgramData\\miniconda3\\envs\\lang\\lib\\site-packages\\speech_recognition\\__init__.py:523\u001b[0m, in \u001b[0;36mRecognizer.listen\u001b[1;34m(self, source, timeout, phrase_time_limit, snowboy_configuration)\u001b[0m\n\u001b[0;32m 520\u001b[0m \u001b[39mif\u001b[39;00m phrase_time_limit \u001b[39mand\u001b[39;00m elapsed_time \u001b[39m-\u001b[39m phrase_start_time \u001b[39m>\u001b[39m phrase_time_limit:\n\u001b[0;32m 521\u001b[0m \u001b[39mbreak\u001b[39;00m\n\u001b[1;32m--> 523\u001b[0m buffer \u001b[39m=\u001b[39m source\u001b[39m.\u001b[39;49mstream\u001b[39m.\u001b[39;49mread(source\u001b[39m.\u001b[39;49mCHUNK)\n\u001b[0;32m 524\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(buffer) \u001b[39m==\u001b[39m \u001b[39m0\u001b[39m: \u001b[39mbreak\u001b[39;00m \u001b[39m# reached end of the stream\u001b[39;00m\n\u001b[0;32m 525\u001b[0m frames\u001b[39m.\u001b[39mappend(buffer)\n",
|
||||
"File \u001b[1;32mc:\\ProgramData\\miniconda3\\envs\\lang\\lib\\site-packages\\speech_recognition\\__init__.py:199\u001b[0m, in \u001b[0;36mMicrophone.MicrophoneStream.read\u001b[1;34m(self, size)\u001b[0m\n\u001b[0;32m 198\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mread\u001b[39m(\u001b[39mself\u001b[39m, size):\n\u001b[1;32m--> 199\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpyaudio_stream\u001b[39m.\u001b[39;49mread(size, exception_on_overflow\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m)\n",
|
||||
"File \u001b[1;32mc:\\ProgramData\\miniconda3\\envs\\lang\\lib\\site-packages\\pyaudio\\__init__.py:570\u001b[0m, in \u001b[0;36mPyAudio.Stream.read\u001b[1;34m(self, num_frames, exception_on_overflow)\u001b[0m\n\u001b[0;32m 567\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_is_input:\n\u001b[0;32m 568\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mIOError\u001b[39;00m(\u001b[39m\"\u001b[39m\u001b[39mNot input stream\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[0;32m 569\u001b[0m paCanNotReadFromAnOutputOnlyStream)\n\u001b[1;32m--> 570\u001b[0m \u001b[39mreturn\u001b[39;00m pa\u001b[39m.\u001b[39;49mread_stream(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_stream, num_frames,\n\u001b[0;32m 571\u001b[0m exception_on_overflow)\n",
|
||||
"\u001b[1;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"listen(None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "lang",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
# YouTube
|
||||
|
||||
This is a collection of `LangChain` tutorials and videos on `YouTube`.
|
||||
This is a collection of LangChain tutorials and videos.
|
||||
|
||||
### Introduction to LangChain with Harrison Chase, creator of LangChain
|
||||
- [Building the Future with LLMs, `LangChain`, & `Pinecone`](https://youtu.be/nMniwlGyX-c) by [Pinecone](https://www.youtube.com/@pinecone-io)
|
||||
- [LangChain and Weaviate with Harrison Chase and Bob van Luijt - Weaviate Podcast #36](https://youtu.be/lhby7Ql7hbk) by [Weaviate • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [LangChain Demo + Q&A with Harrison Chase](https://youtu.be/zaYTXQFR0_s?t=788) by [Full Stack Deep Learning](https://www.youtube.com/@FullStackDeepLearning)
|
||||
### Introduction to LangChain with Harrison Chase creator of LangChain
|
||||
- [LangChain Demo + Q&A with Harrison Chase](https://youtu.be/zaYTXQFR0_s) by [Full Stack Deep Learning](https://www.youtube.com/@FullStackDeepLearning)
|
||||
|
||||
## Tutorials
|
||||
|
||||
@@ -83,6 +81,7 @@ This is a collection of `LangChain` tutorials and videos on `YouTube`.
|
||||
- [The easiest way to work with large language models | Learn LangChain in 10min](https://youtu.be/kmbS6FDQh7c) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [4 Autonomous AI Agents: “Westworld” simulation `BabyAGI`, `AutoGPT`, `Camel`, `LangChain`](https://youtu.be/yWbnH6inT_U) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [AI CAN SEARCH THE INTERNET? Langchain Agents + OpenAI ChatGPT](https://youtu.be/J-GL0htqda8) by [tylerwhatsgood](https://www.youtube.com/@tylerwhatsgood)
|
||||
- [Building the Future with LLMs, LangChain, & `Pinecone`](https://youtu.be/nMniwlGyX-c) by [Pinecone](https://www.youtube.com/@pinecone-io)
|
||||
- [`Weaviate` + LangChain for LLM apps presented by Erika Cardenas](https://youtu.be/7AGj4Td5Lgw) by [`Weaviate` • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [Analyze Custom `CSV` Data with `GPT-4` using Langchain](https://youtu.be/Ew3sGdX8at4) by [Venelin Valkov](https://www.youtube.com/@venelin_valkov)
|
||||
- [Langchain Overview - How to Use Langchain & `ChatGPT`](https://youtu.be/oYVYIq0lOtI) by [Python In Office](https://www.youtube.com/@pythoninoffice6568)
|
||||
|
||||
@@ -5,11 +5,6 @@ from typing import Optional
|
||||
|
||||
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
|
||||
from langchain.cache import BaseCache
|
||||
from langchain.callbacks import (
|
||||
set_default_callback_manager,
|
||||
set_handler,
|
||||
set_tracing_callback_manager,
|
||||
)
|
||||
from langchain.chains import (
|
||||
ConversationChain,
|
||||
LLMBashChain,
|
||||
@@ -50,7 +45,6 @@ from langchain.sql_database import SQLDatabase
|
||||
from langchain.utilities import ArxivAPIWrapper
|
||||
from langchain.utilities.google_search import GoogleSearchAPIWrapper
|
||||
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
from langchain.utilities.powerbi import PowerBIDataset
|
||||
from langchain.utilities.searx_search import SearxSearchWrapper
|
||||
from langchain.utilities.serpapi import SerpAPIWrapper
|
||||
from langchain.utilities.wikipedia import WikipediaAPIWrapper
|
||||
@@ -66,7 +60,6 @@ del metadata # optional, avoids polluting the results of dir(__package__)
|
||||
|
||||
verbose: bool = False
|
||||
llm_cache: Optional[BaseCache] = None
|
||||
set_default_callback_manager()
|
||||
|
||||
# For backwards compatibility
|
||||
SerpAPIChain = SerpAPIWrapper
|
||||
@@ -107,7 +100,6 @@ __all__ = [
|
||||
"HuggingFacePipeline",
|
||||
"SQLDatabase",
|
||||
"SQLDatabaseChain",
|
||||
"PowerBIDataset",
|
||||
"FAISS",
|
||||
"MRKLChain",
|
||||
"VectorDBQA",
|
||||
@@ -117,7 +109,5 @@ __all__ = [
|
||||
"VectorDBQAWithSourcesChain",
|
||||
"QAWithSourcesChain",
|
||||
"PALChain",
|
||||
"set_handler",
|
||||
"set_tracing_callback_manager",
|
||||
"LlamaCpp",
|
||||
]
|
||||
|
||||
@@ -12,8 +12,6 @@ from langchain.agents.agent_toolkits import (
|
||||
create_json_agent,
|
||||
create_openapi_agent,
|
||||
create_pandas_dataframe_agent,
|
||||
create_pbi_agent,
|
||||
create_pbi_chat_agent,
|
||||
create_sql_agent,
|
||||
create_vectorstore_agent,
|
||||
create_vectorstore_router_agent,
|
||||
@@ -46,8 +44,6 @@ __all__ = [
|
||||
"ConversationalChatAgent",
|
||||
"load_agent",
|
||||
"create_sql_agent",
|
||||
"create_pbi_agent",
|
||||
"create_pbi_chat_agent",
|
||||
"create_json_agent",
|
||||
"create_openapi_agent",
|
||||
"create_vectorstore_router_agent",
|
||||
|
||||
@@ -13,7 +13,13 @@ import yaml
|
||||
from pydantic import BaseModel, root_validator
|
||||
|
||||
from langchain.agents.tools import InvalidTool
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManagerForChainRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.input import get_color_mapping
|
||||
@@ -23,7 +29,6 @@ from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import (
|
||||
AgentAction,
|
||||
AgentFinish,
|
||||
BaseLanguageModel,
|
||||
BaseMessage,
|
||||
BaseOutputParser,
|
||||
)
|
||||
@@ -46,13 +51,17 @@ class BaseSingleActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
def plan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -61,13 +70,17 @@ class BaseSingleActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
async def aplan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -170,13 +183,17 @@ class BaseMultiActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
def plan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[List[AgentAction], AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -185,13 +202,17 @@ class BaseMultiActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
async def aplan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[List[AgentAction], AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -285,38 +306,52 @@ class LLMSingleActionAgent(BaseSingleActionAgent):
|
||||
return list(set(self.llm_chain.input_keys) - {"intermediate_steps"})
|
||||
|
||||
def plan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
output = self.llm_chain.run(
|
||||
intermediate_steps=intermediate_steps, stop=self.stop, **kwargs
|
||||
intermediate_steps=intermediate_steps,
|
||||
stop=self.stop,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
return self.output_parser.parse(output)
|
||||
|
||||
async def aplan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
output = await self.llm_chain.arun(
|
||||
intermediate_steps=intermediate_steps, stop=self.stop, **kwargs
|
||||
intermediate_steps=intermediate_steps,
|
||||
stop=self.stop,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
return self.output_parser.parse(output)
|
||||
|
||||
@@ -368,37 +403,45 @@ class Agent(BaseSingleActionAgent):
|
||||
return thoughts
|
||||
|
||||
def plan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
|
||||
full_output = self.llm_chain.predict(**full_inputs)
|
||||
full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
|
||||
return self.output_parser.parse(full_output)
|
||||
|
||||
async def aplan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
|
||||
full_output = await self.llm_chain.apredict(**full_inputs)
|
||||
full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs)
|
||||
return self.output_parser.parse(full_output)
|
||||
|
||||
def get_full_inputs(
|
||||
@@ -632,24 +675,27 @@ class AgentExecutor(Chain):
|
||||
|
||||
return True
|
||||
|
||||
def _return(self, output: AgentFinish, intermediate_steps: list) -> Dict[str, Any]:
|
||||
self.callback_manager.on_agent_finish(
|
||||
output, color="green", verbose=self.verbose
|
||||
)
|
||||
def _return(
|
||||
self,
|
||||
output: AgentFinish,
|
||||
intermediate_steps: list,
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
if run_manager:
|
||||
run_manager.on_agent_finish(output, color="green", verbose=self.verbose)
|
||||
final_output = output.return_values
|
||||
if self.return_intermediate_steps:
|
||||
final_output["intermediate_steps"] = intermediate_steps
|
||||
return final_output
|
||||
|
||||
async def _areturn(
|
||||
self, output: AgentFinish, intermediate_steps: list
|
||||
self,
|
||||
output: AgentFinish,
|
||||
intermediate_steps: list,
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_agent_finish(
|
||||
output, color="green", verbose=self.verbose
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_agent_finish(
|
||||
if run_manager:
|
||||
await run_manager.on_agent_finish(
|
||||
output, color="green", verbose=self.verbose
|
||||
)
|
||||
final_output = output.return_values
|
||||
@@ -663,13 +709,18 @@ class AgentExecutor(Chain):
|
||||
color_mapping: Dict[str, str],
|
||||
inputs: Dict[str, str],
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
|
||||
"""Take a single step in the thought-action-observation loop.
|
||||
|
||||
Override this to take control of how the agent makes and acts on choices.
|
||||
"""
|
||||
# Call the LLM to see what to do.
|
||||
output = self.agent.plan(intermediate_steps, **inputs)
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
@@ -680,9 +731,8 @@ class AgentExecutor(Chain):
|
||||
actions = output
|
||||
result = []
|
||||
for agent_action in actions:
|
||||
self.callback_manager.on_agent_action(
|
||||
agent_action, verbose=self.verbose, color="green"
|
||||
)
|
||||
if run_manager:
|
||||
run_manager.on_agent_action(agent_action, color="green")
|
||||
# Otherwise we lookup the tool
|
||||
if agent_action.tool in name_to_tool_map:
|
||||
tool = name_to_tool_map[agent_action.tool]
|
||||
@@ -696,6 +746,7 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool_input,
|
||||
verbose=self.verbose,
|
||||
color=color,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
else:
|
||||
@@ -704,6 +755,7 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool,
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
result.append((agent_action, observation))
|
||||
@@ -715,13 +767,18 @@ class AgentExecutor(Chain):
|
||||
color_mapping: Dict[str, str],
|
||||
inputs: Dict[str, str],
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
|
||||
"""Take a single step in the thought-action-observation loop.
|
||||
|
||||
Override this to take control of how the agent makes and acts on choices.
|
||||
"""
|
||||
# Call the LLM to see what to do.
|
||||
output = await self.agent.aplan(intermediate_steps, **inputs)
|
||||
output = await self.agent.aplan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
@@ -734,12 +791,8 @@ class AgentExecutor(Chain):
|
||||
async def _aperform_agent_action(
|
||||
agent_action: AgentAction,
|
||||
) -> Tuple[AgentAction, str]:
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_agent_action(
|
||||
agent_action, verbose=self.verbose, color="green"
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_agent_action(
|
||||
if run_manager:
|
||||
await run_manager.on_agent_action(
|
||||
agent_action, verbose=self.verbose, color="green"
|
||||
)
|
||||
# Otherwise we lookup the tool
|
||||
@@ -755,6 +808,7 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool_input,
|
||||
verbose=self.verbose,
|
||||
color=color,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
else:
|
||||
@@ -763,6 +817,7 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool,
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
return agent_action, observation
|
||||
@@ -774,7 +829,11 @@ class AgentExecutor(Chain):
|
||||
|
||||
return list(result)
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run text through and get agent response."""
|
||||
# Construct a mapping of tool name to tool for easy lookup
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
@@ -790,10 +849,16 @@ class AgentExecutor(Chain):
|
||||
# We now enter the agent loop (until it returns something).
|
||||
while self._should_continue(iterations, time_elapsed):
|
||||
next_step_output = self._take_next_step(
|
||||
name_to_tool_map, color_mapping, inputs, intermediate_steps
|
||||
name_to_tool_map,
|
||||
color_mapping,
|
||||
inputs,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
return self._return(next_step_output, intermediate_steps)
|
||||
return self._return(
|
||||
next_step_output, intermediate_steps, run_manager=run_manager
|
||||
)
|
||||
|
||||
intermediate_steps.extend(next_step_output)
|
||||
if len(next_step_output) == 1:
|
||||
@@ -809,7 +874,11 @@ class AgentExecutor(Chain):
|
||||
)
|
||||
return self._return(output, intermediate_steps)
|
||||
|
||||
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
async def _acall(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
"""Run text through and get agent response."""
|
||||
# Construct a mapping of tool name to tool for easy lookup
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
@@ -827,7 +896,11 @@ class AgentExecutor(Chain):
|
||||
try:
|
||||
while self._should_continue(iterations, time_elapsed):
|
||||
next_step_output = await self._atake_next_step(
|
||||
name_to_tool_map, color_mapping, inputs, intermediate_steps
|
||||
name_to_tool_map,
|
||||
color_mapping,
|
||||
inputs,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
return await self._areturn(next_step_output, intermediate_steps)
|
||||
@@ -845,7 +918,9 @@ class AgentExecutor(Chain):
|
||||
output = self.agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
)
|
||||
return await self._areturn(output, intermediate_steps)
|
||||
return await self._areturn(
|
||||
output, intermediate_steps, run_manager=run_manager
|
||||
)
|
||||
except TimeoutError:
|
||||
# stop early when interrupted by the async timeout
|
||||
output = self.agent.return_stopped_response(
|
||||
|
||||
@@ -8,9 +8,6 @@ from langchain.agents.agent_toolkits.nla.toolkit import NLAToolkit
|
||||
from langchain.agents.agent_toolkits.openapi.base import create_openapi_agent
|
||||
from langchain.agents.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
||||
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
|
||||
from langchain.agents.agent_toolkits.powerbi.base import create_pbi_agent
|
||||
from langchain.agents.agent_toolkits.powerbi.chat_base import create_pbi_chat_agent
|
||||
from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain.agents.agent_toolkits.python.base import create_python_agent
|
||||
from langchain.agents.agent_toolkits.sql.base import create_sql_agent
|
||||
from langchain.agents.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
||||
@@ -29,14 +26,11 @@ __all__ = [
|
||||
"create_json_agent",
|
||||
"create_sql_agent",
|
||||
"create_openapi_agent",
|
||||
"create_pbi_agent",
|
||||
"create_pbi_chat_agent",
|
||||
"create_python_agent",
|
||||
"create_vectorstore_agent",
|
||||
"JsonToolkit",
|
||||
"SQLDatabaseToolkit",
|
||||
"NLAToolkit",
|
||||
"PowerBIToolkit",
|
||||
"OpenAPIToolkit",
|
||||
"VectorStoreToolkit",
|
||||
"create_vectorstore_router_agent",
|
||||
|
||||
@@ -26,12 +26,12 @@ from langchain.agents.agent_toolkits.openapi.planner_prompt import (
|
||||
from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.memory import ReadOnlySharedMemory
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.requests import RequestsWrapper
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.requests.tool import BaseRequestsTool
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"""Power BI agent."""
|
||||
@@ -1,62 +0,0 @@
|
||||
"""Power BI agent."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent_toolkits.powerbi.prompt import (
|
||||
POWERBI_PREFIX,
|
||||
POWERBI_SUFFIX,
|
||||
)
|
||||
from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.utilities.powerbi import PowerBIDataset
|
||||
|
||||
|
||||
def create_pbi_agent(
|
||||
llm: BaseLLM,
|
||||
toolkit: Optional[PowerBIToolkit],
|
||||
powerbi: Optional[PowerBIDataset] = None,
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = POWERBI_PREFIX,
|
||||
suffix: str = POWERBI_SUFFIX,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
examples: Optional[str] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
top_k: int = 10,
|
||||
verbose: bool = False,
|
||||
agent_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Dict[str, Any],
|
||||
) -> AgentExecutor:
|
||||
"""Construct a pbi agent from an LLM and tools."""
|
||||
if toolkit is None:
|
||||
if powerbi is None:
|
||||
raise ValueError("Must provide either a toolkit or powerbi dataset")
|
||||
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
|
||||
tools = toolkit.get_tools()
|
||||
|
||||
agent = ZeroShotAgent(
|
||||
llm_chain=LLMChain(
|
||||
llm=llm,
|
||||
prompt=ZeroShotAgent.create_prompt(
|
||||
tools,
|
||||
prefix=prefix.format(top_k=top_k),
|
||||
suffix=suffix,
|
||||
format_instructions=format_instructions,
|
||||
input_variables=input_variables,
|
||||
),
|
||||
callback_manager=callback_manager, # type: ignore
|
||||
verbose=verbose,
|
||||
),
|
||||
allowed_tools=[tool.name for tool in tools],
|
||||
**(agent_kwargs or {}),
|
||||
)
|
||||
return AgentExecutor.from_agent_and_tools(
|
||||
agent=agent,
|
||||
tools=tools,
|
||||
callback_manager=callback_manager,
|
||||
verbose=verbose,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -1,60 +0,0 @@
|
||||
"""Power BI agent."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent_toolkits.powerbi.prompt import (
|
||||
POWERBI_CHAT_PREFIX,
|
||||
POWERBI_CHAT_SUFFIX,
|
||||
)
|
||||
from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain.agents.conversational_chat.base import ConversationalChatAgent
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.memory import ConversationBufferMemory
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.utilities.powerbi import PowerBIDataset
|
||||
|
||||
|
||||
def create_pbi_chat_agent(
|
||||
llm: BaseChatModel,
|
||||
toolkit: Optional[PowerBIToolkit],
|
||||
powerbi: Optional[PowerBIDataset] = None,
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = POWERBI_CHAT_PREFIX,
|
||||
suffix: str = POWERBI_CHAT_SUFFIX,
|
||||
examples: Optional[str] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
memory: Optional[BaseChatMemory] = None,
|
||||
top_k: int = 10,
|
||||
verbose: bool = False,
|
||||
agent_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Dict[str, Any],
|
||||
) -> AgentExecutor:
|
||||
"""Construct a pbi agent from an Chat LLM and tools.
|
||||
|
||||
If you supply only a toolkit and no powerbi dataset, the same LLM is used for both.
|
||||
"""
|
||||
if toolkit is None:
|
||||
if powerbi is None:
|
||||
raise ValueError("Must provide either a toolkit or powerbi dataset")
|
||||
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
|
||||
tools = toolkit.get_tools()
|
||||
agent = ConversationalChatAgent.from_llm_and_tools(
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
system_message=prefix.format(top_k=top_k),
|
||||
user_message=suffix,
|
||||
input_variables=input_variables,
|
||||
callback_manager=callback_manager,
|
||||
verbose=verbose,
|
||||
**(agent_kwargs or {}),
|
||||
)
|
||||
return AgentExecutor.from_agent_and_tools(
|
||||
agent=agent,
|
||||
tools=tools,
|
||||
callback_manager=callback_manager,
|
||||
memory=memory
|
||||
or ConversationBufferMemory(memory_key="chat_history", return_messages=True),
|
||||
verbose=verbose,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -1,48 +0,0 @@
|
||||
# flake8: noqa
|
||||
"""Prompts for PowerBI agent."""
|
||||
|
||||
|
||||
POWERBI_PREFIX = """You are an agent designed to interact with a Power BI Dataset.
|
||||
Given an input question, create a syntactically correct DAX query to run, then look at the results of the query and return the answer.
|
||||
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
|
||||
You can order the results by a relevant column to return the most interesting examples in the database.
|
||||
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
|
||||
|
||||
You have access to tools for interacting with the Power BI Dataset. Only use the below tools. Only use the information returned by the below tools to construct your final answer. Usually I should first ask which tables I have, then how each table is defined and then ask the question to query tool to create a query for me and then I should ask the query tool to execute it, finally create a nice sentence that answers the question. If you receive an error back that mentions that the query was wrong try to phrase the question differently and get a new query from the question to query tool.
|
||||
|
||||
If the question does not seem related to the dataset, just return "I don't know" as the answer.
|
||||
"""
|
||||
|
||||
POWERBI_SUFFIX = """Begin!
|
||||
|
||||
Question: {input}
|
||||
Thought: I should first ask which tables I have, then how each table is defined and then ask the question to query tool to create a query for me and then I should ask the query tool to execute it, finally create a nice sentence that answers the question.
|
||||
{agent_scratchpad}"""
|
||||
|
||||
POWERBI_CHAT_PREFIX = """Assistant is a large language model trained by OpenAI built to help users interact with a PowerBI Dataset.
|
||||
|
||||
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
||||
|
||||
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
|
||||
|
||||
Given an input question, create a syntactically correct DAX query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.
|
||||
|
||||
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
|
||||
|
||||
Usually I should first ask which tables I have, then how each table is defined and then ask the question to query tool to create a query for me and then I should ask the query tool to execute it, finally create a complete sentence that answers the question. If you receive an error back that mentions that the query was wrong try to phrase the question differently and get a new query from the question to query tool.
|
||||
"""
|
||||
|
||||
POWERBI_CHAT_SUFFIX = """TOOLS
|
||||
------
|
||||
Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:
|
||||
|
||||
{{tools}}
|
||||
|
||||
{format_instructions}
|
||||
|
||||
USER'S INPUT
|
||||
--------------------
|
||||
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
|
||||
|
||||
{{{{input}}}}
|
||||
"""
|
||||
@@ -1,67 +0,0 @@
|
||||
"""Toolkit for interacting with a Power BI dataset."""
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.powerbi.prompt import QUESTION_TO_QUERY
|
||||
from langchain.tools.powerbi.tool import (
|
||||
InfoPowerBITool,
|
||||
InputToQueryTool,
|
||||
ListPowerBITool,
|
||||
QueryPowerBITool,
|
||||
)
|
||||
from langchain.utilities.powerbi import PowerBIDataset
|
||||
|
||||
|
||||
class PowerBIToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with PowerBI dataset."""
|
||||
|
||||
powerbi: PowerBIDataset = Field(exclude=True)
|
||||
llm: BaseLanguageModel = Field(exclude=True)
|
||||
examples: Optional[str] = None
|
||||
callback_manager: Optional[BaseCallbackManager] = None
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
if self.callback_manager:
|
||||
chain = (
|
||||
LLMChain(
|
||||
llm=self.llm,
|
||||
callback_manager=self.callback_manager,
|
||||
prompt=PromptTemplate(
|
||||
template=QUESTION_TO_QUERY,
|
||||
input_variables=["tool_input", "tables", "schemas", "examples"],
|
||||
),
|
||||
),
|
||||
)
|
||||
else:
|
||||
chain = (
|
||||
LLMChain(
|
||||
llm=self.llm,
|
||||
prompt=PromptTemplate(
|
||||
template=QUESTION_TO_QUERY,
|
||||
input_variables=["tool_input", "tables", "schemas", "examples"],
|
||||
),
|
||||
),
|
||||
)
|
||||
return [
|
||||
QueryPowerBITool(powerbi=self.powerbi),
|
||||
InfoPowerBITool(powerbi=self.powerbi),
|
||||
ListPowerBITool(powerbi=self.powerbi),
|
||||
InputToQueryTool(
|
||||
powerbi=self.powerbi,
|
||||
llm_chain=chain,
|
||||
examples=self.examples,
|
||||
),
|
||||
]
|
||||
@@ -5,6 +5,7 @@ from pydantic import Field
|
||||
from langchain.agents.agent import Agent, AgentOutputParser
|
||||
from langchain.agents.chat.output_parser import ChatOutputParser
|
||||
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
@@ -13,7 +14,7 @@ from langchain.prompts.chat import (
|
||||
HumanMessagePromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import AgentAction, BaseLanguageModel
|
||||
from langchain.schema import AgentAction
|
||||
from langchain.tools import BaseTool
|
||||
|
||||
|
||||
|
||||
@@ -9,10 +9,10 @@ from langchain.agents.agent import Agent, AgentOutputParser
|
||||
from langchain.agents.agent_types import AgentType
|
||||
from langchain.agents.conversational.output_parser import ConvoOutputParser
|
||||
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from langchain.agents.conversational_chat.prompt import (
|
||||
SUFFIX,
|
||||
TEMPLATE_TOOL_RESPONSE,
|
||||
)
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
@@ -24,7 +25,6 @@ from langchain.prompts.chat import (
|
||||
from langchain.schema import (
|
||||
AgentAction,
|
||||
AIMessage,
|
||||
BaseLanguageModel,
|
||||
BaseMessage,
|
||||
BaseOutputParser,
|
||||
HumanMessage,
|
||||
|
||||
@@ -4,8 +4,8 @@ from typing import Any, Optional, Sequence
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.agent_types import AgentType
|
||||
from langchain.agents.loading import AGENT_TO_CLASS, load_agent
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
|
||||
|
||||
@@ -103,8 +103,8 @@ def _get_llm_math(llm: BaseLLM) -> BaseTool:
|
||||
return Tool(
|
||||
name="Calculator",
|
||||
description="Useful for when you need to answer questions about math.",
|
||||
func=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).run,
|
||||
coroutine=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).arun,
|
||||
func=LLMMathChain(llm=llm).run,
|
||||
coroutine=LLMMathChain(llm=llm).arun,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ from langchain.agents.agent_types import AgentType
|
||||
from langchain.agents.mrkl.output_parser import MRKLOutputParser
|
||||
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,11 @@ from typing import Any, Awaitable, Callable, Optional, Type, Union
|
||||
|
||||
from pydantic import BaseModel, validate_arguments
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
|
||||
@@ -26,14 +31,42 @@ class Tool(BaseTool):
|
||||
valid_keys = signature(self.func).parameters
|
||||
return {k: schema[k] for k in valid_keys}
|
||||
|
||||
def _run(self, *args: Any, **kwargs: Any) -> str:
|
||||
def _run(
|
||||
self,
|
||||
*args: Any,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.func(*args, **kwargs)
|
||||
new_argument_supported = signature(self.func).parameters.get("callbacks")
|
||||
return (
|
||||
self.func(
|
||||
*args,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**kwargs,
|
||||
)
|
||||
if new_argument_supported
|
||||
else self.func(*args, **kwargs)
|
||||
)
|
||||
|
||||
async def _arun(self, *args: Any, **kwargs: Any) -> str:
|
||||
async def _arun(
|
||||
self,
|
||||
*args: Any,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Use the tool asynchronously."""
|
||||
new_argument_supported = signature(self.coroutine).parameters.get("callbacks")
|
||||
if self.coroutine:
|
||||
return await self.coroutine(*args, **kwargs)
|
||||
return (
|
||||
await self.coroutine(
|
||||
*args,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**kwargs,
|
||||
)
|
||||
if new_argument_supported
|
||||
else await self.coroutine(*args, **kwargs)
|
||||
)
|
||||
raise NotImplementedError("Tool does not support async")
|
||||
|
||||
# TODO: this is for backwards compatibility, remove in future
|
||||
@@ -66,7 +99,6 @@ def tool(
|
||||
return_direct: bool = False,
|
||||
args_schema: Optional[Type[BaseModel]] = None,
|
||||
infer_schema: bool = True,
|
||||
raise_errors: bool = False,
|
||||
) -> Callable:
|
||||
"""Make tools out of functions, can be used with or without arguments.
|
||||
|
||||
@@ -78,8 +110,6 @@ def tool(
|
||||
infer_schema: Whether to infer the schema of the arguments from
|
||||
the function's signature. This also makes the resultant tool
|
||||
accept a dictionary input to its `run()` function.
|
||||
raise_errors: Whether to raise exceptions when running the tool
|
||||
rather than returning a string with the error message.
|
||||
|
||||
Requires:
|
||||
- Function must be of type (str) -> str
|
||||
@@ -114,7 +144,6 @@ def tool(
|
||||
args_schema=_args_schema,
|
||||
description=description,
|
||||
return_direct=return_direct,
|
||||
raise_errors=raise_errors,
|
||||
)
|
||||
return tool_
|
||||
|
||||
|
||||
55
langchain/base_language.py
Normal file
55
langchain/base_language.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""Base class for all language models."""
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.schema import BaseMessage, LLMResult, PromptValue, get_buffer_string
|
||||
|
||||
|
||||
class BaseLanguageModel(BaseModel, ABC):
|
||||
@abstractmethod
|
||||
def generate_prompt(
|
||||
self,
|
||||
prompts: List[PromptValue],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
) -> LLMResult:
|
||||
"""Take in a list of prompt values and return an LLMResult."""
|
||||
|
||||
@abstractmethod
|
||||
async def agenerate_prompt(
|
||||
self,
|
||||
prompts: List[PromptValue],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
) -> LLMResult:
|
||||
"""Take in a list of prompt values and return an LLMResult."""
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
"""Get the number of tokens present in the text."""
|
||||
# TODO: this method may not be exact.
|
||||
# TODO: this method may differ based on model (eg codex).
|
||||
try:
|
||||
from transformers import GPT2TokenizerFast
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import transformers python package. "
|
||||
"This is needed in order to calculate get_num_tokens. "
|
||||
"Please install it with `pip install transformers`."
|
||||
)
|
||||
# create a GPT-3 tokenizer instance
|
||||
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
|
||||
|
||||
# tokenize the text using the GPT-3 tokenizer
|
||||
tokenized_text = tokenizer.tokenize(text)
|
||||
|
||||
# calculate the number of tokens in the tokenized text
|
||||
return len(tokenized_text)
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
"""Get the number of tokens in the message."""
|
||||
return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages])
|
||||
@@ -1,80 +1,27 @@
|
||||
"""Callback handlers that allow listening to events in LangChain."""
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
from typing import Generator, Optional
|
||||
from typing import Generator
|
||||
|
||||
from langchain.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain.callbacks.base import (
|
||||
AsyncCallbackManager,
|
||||
BaseCallbackHandler,
|
||||
BaseCallbackManager,
|
||||
CallbackManager,
|
||||
)
|
||||
from langchain.callbacks.clearml_callback import ClearMLCallbackHandler
|
||||
from langchain.callbacks.comet_ml_callback import CometCallbackHandler
|
||||
from langchain.callbacks.manager import (
|
||||
CallbackManager,
|
||||
get_openai_callback,
|
||||
tracing_enabled,
|
||||
)
|
||||
from langchain.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain.callbacks.shared import SharedCallbackManager
|
||||
from langchain.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain.callbacks.tracers import SharedLangChainTracer
|
||||
from langchain.callbacks.tracers import LangChainTracer
|
||||
from langchain.callbacks.wandb_callback import WandbCallbackHandler
|
||||
|
||||
|
||||
def get_callback_manager() -> BaseCallbackManager:
|
||||
"""Return the shared callback manager."""
|
||||
return SharedCallbackManager()
|
||||
|
||||
|
||||
def set_handler(handler: BaseCallbackHandler) -> None:
|
||||
"""Set handler."""
|
||||
callback = get_callback_manager()
|
||||
callback.set_handler(handler)
|
||||
|
||||
|
||||
def set_default_callback_manager() -> None:
|
||||
"""Set default callback manager."""
|
||||
default_handler = os.environ.get("LANGCHAIN_HANDLER", "stdout")
|
||||
if default_handler == "stdout":
|
||||
set_handler(StdOutCallbackHandler())
|
||||
elif default_handler == "langchain":
|
||||
session = os.environ.get("LANGCHAIN_SESSION")
|
||||
set_tracing_callback_manager(session)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"LANGCHAIN_HANDLER should be one of `stdout` "
|
||||
f"or `langchain`, got {default_handler}"
|
||||
)
|
||||
|
||||
|
||||
def set_tracing_callback_manager(session_name: Optional[str] = None) -> None:
|
||||
"""Set tracing callback manager."""
|
||||
handler = SharedLangChainTracer()
|
||||
callback = get_callback_manager()
|
||||
callback.set_handlers([handler, StdOutCallbackHandler()])
|
||||
if session_name is None:
|
||||
handler.load_default_session()
|
||||
else:
|
||||
try:
|
||||
handler.load_session(session_name)
|
||||
except Exception:
|
||||
raise ValueError(f"session {session_name} not found")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
|
||||
"""Get OpenAI callback handler in a context manager."""
|
||||
handler = OpenAICallbackHandler()
|
||||
manager = get_callback_manager()
|
||||
manager.add_handler(handler)
|
||||
yield handler
|
||||
manager.remove_handler(handler)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"CallbackManager",
|
||||
"AsyncCallbackManager",
|
||||
"OpenAICallbackHandler",
|
||||
"SharedCallbackManager",
|
||||
"StdOutCallbackHandler",
|
||||
"AimCallbackHandler",
|
||||
"WandbCallbackHandler",
|
||||
@@ -82,8 +29,5 @@ __all__ = [
|
||||
"CometCallbackHandler",
|
||||
"AsyncIteratorCallbackHandler",
|
||||
"get_openai_callback",
|
||||
"set_tracing_callback_manager",
|
||||
"set_default_callback_manager",
|
||||
"set_handler",
|
||||
"get_callback_manager",
|
||||
"tracing_enabled",
|
||||
]
|
||||
|
||||
@@ -1,19 +1,173 @@
|
||||
"""Base callback handler that can be used to handle callbacks from langchain."""
|
||||
import asyncio
|
||||
import functools
|
||||
from abc import ABC, abstractmethod
|
||||
"""Base callback handler that can be used to handle callbacks in langchain."""
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
class BaseCallbackHandler(ABC):
|
||||
"""Base callback handler that can be used to handle callbacks from langchain."""
|
||||
class LLMManagerMixin:
|
||||
"""Mixin for LLM callbacks."""
|
||||
|
||||
@property
|
||||
def always_verbose(self) -> bool:
|
||||
"""Whether to call verbose callbacks even if verbose is False."""
|
||||
return False
|
||||
def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run on new LLM token. Only available when streaming is enabled."""
|
||||
|
||||
def on_llm_end(
|
||||
self,
|
||||
response: LLMResult,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when LLM ends running."""
|
||||
|
||||
def on_llm_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when LLM errors."""
|
||||
|
||||
|
||||
class ChainManagerMixin:
|
||||
"""Mixin for chain callbacks."""
|
||||
|
||||
def on_chain_end(
|
||||
self,
|
||||
outputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when chain ends running."""
|
||||
|
||||
def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when chain errors."""
|
||||
|
||||
def on_agent_action(
|
||||
self,
|
||||
action: AgentAction,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run on agent action."""
|
||||
|
||||
def on_agent_finish(
|
||||
self,
|
||||
finish: AgentFinish,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run on agent end."""
|
||||
|
||||
|
||||
class ToolManagerMixin:
|
||||
"""Mixin for tool callbacks."""
|
||||
|
||||
def on_tool_end(
|
||||
self,
|
||||
output: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when tool ends running."""
|
||||
|
||||
def on_tool_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when tool errors."""
|
||||
|
||||
|
||||
class CallbackManagerMixin:
|
||||
"""Mixin for callback manager."""
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when LLM starts running."""
|
||||
|
||||
def on_chain_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when chain starts running."""
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when tool starts running."""
|
||||
|
||||
|
||||
class RunManagerMixin:
|
||||
"""Mixin for run manager."""
|
||||
|
||||
def on_text(
|
||||
self,
|
||||
text: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run on arbitrary text."""
|
||||
|
||||
|
||||
class BaseCallbackHandler(
|
||||
LLMManagerMixin,
|
||||
ChainManagerMixin,
|
||||
ToolManagerMixin,
|
||||
CallbackManagerMixin,
|
||||
RunManagerMixin,
|
||||
):
|
||||
"""Base callback handler that can be used to handle callbacks from langchain."""
|
||||
|
||||
@property
|
||||
def ignore_llm(self) -> bool:
|
||||
@@ -30,480 +184,197 @@ class BaseCallbackHandler(ABC):
|
||||
"""Whether to ignore agent callbacks."""
|
||||
return False
|
||||
|
||||
@abstractmethod
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> Any:
|
||||
|
||||
class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Async callback handler that can be used to handle callbacks from langchain."""
|
||||
|
||||
async def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM starts running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
|
||||
async def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on new LLM token. Only available when streaming is enabled."""
|
||||
|
||||
@abstractmethod
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
|
||||
async def on_llm_end(
|
||||
self,
|
||||
response: LLMResult,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> Any:
|
||||
async def on_llm_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
|
||||
@abstractmethod
|
||||
def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> Any:
|
||||
async def on_chain_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain starts running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
|
||||
async def on_chain_end(
|
||||
self,
|
||||
outputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain ends running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> Any:
|
||||
async def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
|
||||
@abstractmethod
|
||||
def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> Any:
|
||||
async def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool starts running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
|
||||
async def on_tool_end(
|
||||
self,
|
||||
output: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool ends running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> Any:
|
||||
async def on_tool_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
|
||||
@abstractmethod
|
||||
def on_text(self, text: str, **kwargs: Any) -> Any:
|
||||
async def on_text(
|
||||
self,
|
||||
text: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on arbitrary text."""
|
||||
|
||||
@abstractmethod
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
async def on_agent_action(
|
||||
self,
|
||||
action: AgentAction,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on agent action."""
|
||||
|
||||
@abstractmethod
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
|
||||
async def on_agent_finish(
|
||||
self,
|
||||
finish: AgentFinish,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on agent end."""
|
||||
|
||||
|
||||
class BaseCallbackManager(BaseCallbackHandler, ABC):
|
||||
class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Base callback manager that can be used to handle callbacks from LangChain."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
handlers: List[BaseCallbackHandler],
|
||||
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
|
||||
parent_run_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initialize callback manager."""
|
||||
self.handlers: List[BaseCallbackHandler] = handlers
|
||||
self.inheritable_handlers: List[BaseCallbackHandler] = (
|
||||
inheritable_handlers or []
|
||||
)
|
||||
self.parent_run_id: Optional[str] = parent_run_id
|
||||
|
||||
@property
|
||||
def is_async(self) -> bool:
|
||||
"""Whether the callback manager is async."""
|
||||
return False
|
||||
|
||||
@abstractmethod
|
||||
def add_handler(self, callback: BaseCallbackHandler) -> None:
|
||||
def add_handler(self, handler: BaseCallbackHandler, inherit: bool = True) -> None:
|
||||
"""Add a handler to the callback manager."""
|
||||
self.handlers.append(handler)
|
||||
if inherit:
|
||||
self.inheritable_handlers.append(handler)
|
||||
|
||||
@abstractmethod
|
||||
def remove_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Remove a handler from the callback manager."""
|
||||
self.handlers.remove(handler)
|
||||
self.inheritable_handlers.remove(handler)
|
||||
|
||||
def set_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
def set_handlers(
|
||||
self, handlers: List[BaseCallbackHandler], inherit: bool = True
|
||||
) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
self.handlers = []
|
||||
self.inheritable_handlers = []
|
||||
for handler in handlers:
|
||||
self.add_handler(handler, inherit=inherit)
|
||||
|
||||
def set_handler(self, handler: BaseCallbackHandler, inherit: bool = True) -> None:
|
||||
"""Set handler as the only handler on the callback manager."""
|
||||
self.set_handlers([handler])
|
||||
self.set_handlers([handler], inherit=inherit)
|
||||
|
||||
@abstractmethod
|
||||
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
|
||||
|
||||
class CallbackManager(BaseCallbackManager):
|
||||
"""Callback manager that can be used to handle callbacks from langchain."""
|
||||
|
||||
def __init__(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Initialize callback manager."""
|
||||
self.handlers: List[BaseCallbackHandler] = handlers
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM starts running."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_llm:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_llm_start(serialized, prompts, **kwargs)
|
||||
|
||||
def on_llm_new_token(
|
||||
self, token: str, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM generates a new token."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_llm:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_llm_new_token(token, **kwargs)
|
||||
|
||||
def on_llm_end(
|
||||
self, response: LLMResult, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_llm:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_llm_end(response)
|
||||
|
||||
def on_llm_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_llm:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_llm_error(error)
|
||||
|
||||
def on_chain_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain starts running."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_chain:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_chain_start(serialized, inputs, **kwargs)
|
||||
|
||||
def on_chain_end(
|
||||
self, outputs: Dict[str, Any], verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain ends running."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_chain:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_chain_end(outputs)
|
||||
|
||||
def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_chain:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_chain_error(error)
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool starts running."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_agent:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_tool_start(serialized, input_str, **kwargs)
|
||||
|
||||
def on_agent_action(
|
||||
self, action: AgentAction, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool starts running."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_agent:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_agent_action(action, **kwargs)
|
||||
|
||||
def on_tool_end(self, output: str, verbose: bool = False, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_agent:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_tool_end(output, **kwargs)
|
||||
|
||||
def on_tool_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_agent:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_tool_error(error)
|
||||
|
||||
def on_text(self, text: str, verbose: bool = False, **kwargs: Any) -> None:
|
||||
"""Run on additional input from chains and agents."""
|
||||
for handler in self.handlers:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_text(text, **kwargs)
|
||||
|
||||
def on_agent_finish(
|
||||
self, finish: AgentFinish, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run on agent end."""
|
||||
for handler in self.handlers:
|
||||
if not handler.ignore_agent:
|
||||
if verbose or handler.always_verbose:
|
||||
handler.on_agent_finish(finish, **kwargs)
|
||||
|
||||
def add_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Add a handler to the callback manager."""
|
||||
self.handlers.append(handler)
|
||||
|
||||
def remove_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Remove a handler from the callback manager."""
|
||||
self.handlers.remove(handler)
|
||||
|
||||
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
self.handlers = handlers
|
||||
|
||||
|
||||
class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Async callback handler that can be used to handle callbacks from langchain."""
|
||||
|
||||
async def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM starts running."""
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
"""Run on new LLM token. Only available when streaming is enabled."""
|
||||
|
||||
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
|
||||
async def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
|
||||
async def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain starts running."""
|
||||
|
||||
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running."""
|
||||
|
||||
async def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
|
||||
async def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool starts running."""
|
||||
|
||||
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running."""
|
||||
|
||||
async def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
|
||||
async def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Run on arbitrary text."""
|
||||
|
||||
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
|
||||
"""Run on agent action."""
|
||||
|
||||
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Run on agent end."""
|
||||
|
||||
|
||||
async def _handle_event_for_handler(
|
||||
handler: BaseCallbackHandler,
|
||||
event_name: str,
|
||||
ignore_condition_name: Optional[str],
|
||||
verbose: bool,
|
||||
*args: Any,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
|
||||
if verbose or handler.always_verbose:
|
||||
event = getattr(handler, event_name)
|
||||
if asyncio.iscoroutinefunction(event):
|
||||
await event(*args, **kwargs)
|
||||
else:
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None, functools.partial(event, *args, **kwargs)
|
||||
)
|
||||
|
||||
|
||||
class AsyncCallbackManager(BaseCallbackManager):
|
||||
"""Async callback manager that can be used to handle callbacks from LangChain."""
|
||||
|
||||
@property
|
||||
def is_async(self) -> bool:
|
||||
"""Return whether the handler is async."""
|
||||
return True
|
||||
|
||||
def __init__(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Initialize callback manager."""
|
||||
self.handlers: List[BaseCallbackHandler] = handlers
|
||||
|
||||
async def _handle_event(
|
||||
self,
|
||||
event_name: str,
|
||||
ignore_condition_name: Optional[str],
|
||||
verbose: bool,
|
||||
*args: Any,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Generic event handler for AsyncCallbackManager."""
|
||||
await asyncio.gather(
|
||||
*(
|
||||
_handle_event_for_handler(
|
||||
handler, event_name, ignore_condition_name, verbose, *args, **kwargs
|
||||
)
|
||||
for handler in self.handlers
|
||||
)
|
||||
def __copy__(self) -> "BaseCallbackManager":
|
||||
return self.__class__(
|
||||
self.handlers.copy(), self.inheritable_handlers.copy(), self.parent_run_id
|
||||
)
|
||||
|
||||
async def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM starts running."""
|
||||
await self._handle_event(
|
||||
"on_llm_start", "ignore_llm", verbose, serialized, prompts, **kwargs
|
||||
def __deepcopy__(self, memo: dict) -> "BaseCallbackManager":
|
||||
return self.__class__(
|
||||
[copy.deepcopy(handler, memo) for handler in self.handlers],
|
||||
[copy.deepcopy(handler, memo) for handler in self.inheritable_handlers],
|
||||
self.parent_run_id,
|
||||
)
|
||||
|
||||
async def on_llm_new_token(
|
||||
self, token: str, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run on new LLM token. Only available when streaming is enabled."""
|
||||
await self._handle_event(
|
||||
"on_llm_new_token", "ignore_llm", verbose, token, **kwargs
|
||||
)
|
||||
|
||||
async def on_llm_end(
|
||||
self, response: LLMResult, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
await self._handle_event(
|
||||
"on_llm_end", "ignore_llm", verbose, response, **kwargs
|
||||
)
|
||||
|
||||
async def on_llm_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
await self._handle_event("on_llm_error", "ignore_llm", verbose, error, **kwargs)
|
||||
|
||||
async def on_chain_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain starts running."""
|
||||
await self._handle_event(
|
||||
"on_chain_start", "ignore_chain", verbose, serialized, inputs, **kwargs
|
||||
)
|
||||
|
||||
async def on_chain_end(
|
||||
self, outputs: Dict[str, Any], verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain ends running."""
|
||||
await self._handle_event(
|
||||
"on_chain_end", "ignore_chain", verbose, outputs, **kwargs
|
||||
)
|
||||
|
||||
async def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
await self._handle_event(
|
||||
"on_chain_error", "ignore_chain", verbose, error, **kwargs
|
||||
)
|
||||
|
||||
async def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool starts running."""
|
||||
await self._handle_event(
|
||||
"on_tool_start", "ignore_agent", verbose, serialized, input_str, **kwargs
|
||||
)
|
||||
|
||||
async def on_tool_end(
|
||||
self, output: str, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool ends running."""
|
||||
await self._handle_event(
|
||||
"on_tool_end", "ignore_agent", verbose, output, **kwargs
|
||||
)
|
||||
|
||||
async def on_tool_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
await self._handle_event(
|
||||
"on_tool_error", "ignore_agent", verbose, error, **kwargs
|
||||
)
|
||||
|
||||
async def on_text(self, text: str, verbose: bool = False, **kwargs: Any) -> None:
|
||||
"""Run when text is printed."""
|
||||
await self._handle_event("on_text", None, verbose, text, **kwargs)
|
||||
|
||||
async def on_agent_action(
|
||||
self, action: AgentAction, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run on agent action."""
|
||||
await self._handle_event(
|
||||
"on_agent_action", "ignore_agent", verbose, action, **kwargs
|
||||
)
|
||||
|
||||
async def on_agent_finish(
|
||||
self, finish: AgentFinish, verbose: bool = False, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when agent finishes."""
|
||||
await self._handle_event(
|
||||
"on_agent_finish", "ignore_agent", verbose, finish, **kwargs
|
||||
)
|
||||
|
||||
def add_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Add a handler to the callback manager."""
|
||||
self.handlers.append(handler)
|
||||
|
||||
def remove_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Remove a handler from the callback manager."""
|
||||
self.handlers.remove(handler)
|
||||
|
||||
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
self.handlers = handlers
|
||||
|
||||
770
langchain/callbacks/manager.py
Normal file
770
langchain/callbacks/manager.py
Normal file
@@ -0,0 +1,770 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import copy
|
||||
import functools
|
||||
import os
|
||||
import uuid
|
||||
from contextlib import contextmanager
|
||||
from contextvars import ContextVar
|
||||
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union
|
||||
|
||||
from langchain.callbacks.base import (
|
||||
BaseCallbackHandler,
|
||||
BaseCallbackManager,
|
||||
ChainManagerMixin,
|
||||
LLMManagerMixin,
|
||||
RunManagerMixin,
|
||||
ToolManagerMixin,
|
||||
)
|
||||
from langchain.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain.callbacks.tracers.base import TracerSession
|
||||
from langchain.callbacks.tracers.langchain import LangChainTracer
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
|
||||
|
||||
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
|
||||
"openai_callback", default=None
|
||||
)
|
||||
tracing_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar(
|
||||
"tracing_callback", default=None
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
|
||||
"""Get OpenAI callback handler in a context manager."""
|
||||
cb = OpenAICallbackHandler()
|
||||
openai_callback_var.set(cb)
|
||||
yield cb
|
||||
openai_callback_var.set(None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def tracing_enabled(
|
||||
session_name: str = "default",
|
||||
) -> Generator[TracerSession, None, None]:
|
||||
"""Get OpenAI callback handler in a context manager."""
|
||||
cb = LangChainTracer()
|
||||
session = cb.load_session(session_name)
|
||||
tracing_callback_var.set(cb)
|
||||
yield session
|
||||
tracing_callback_var.set(None)
|
||||
|
||||
|
||||
def _handle_event(
|
||||
handlers: List[BaseCallbackHandler],
|
||||
event_name: str,
|
||||
ignore_condition_name: Optional[str],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
for handler in handlers:
|
||||
try:
|
||||
if ignore_condition_name is None or not getattr(
|
||||
handler, ignore_condition_name
|
||||
):
|
||||
getattr(handler, event_name)(*args, **kwargs)
|
||||
except Exception as e:
|
||||
# TODO: switch this to use logging
|
||||
print(f"Error in {event_name} callback: {e}")
|
||||
|
||||
|
||||
async def _ahandle_event_for_handler(
|
||||
handler: BaseCallbackHandler,
|
||||
event_name: str,
|
||||
ignore_condition_name: Optional[str],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
try:
|
||||
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
|
||||
event = getattr(handler, event_name)
|
||||
if asyncio.iscoroutinefunction(event):
|
||||
await event(*args, **kwargs)
|
||||
else:
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None, functools.partial(event, *args, **kwargs)
|
||||
)
|
||||
except Exception as e:
|
||||
# TODO: switch this to use logging
|
||||
print(f"Error in {event_name} callback: {e}")
|
||||
|
||||
|
||||
async def _ahandle_event(
|
||||
handlers: List[BaseCallbackHandler],
|
||||
event_name: str,
|
||||
ignore_condition_name: Optional[str],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Generic event handler for AsyncCallbackManager."""
|
||||
await asyncio.gather(
|
||||
*(
|
||||
_ahandle_event_for_handler(
|
||||
handler, event_name, ignore_condition_name, *args, **kwargs
|
||||
)
|
||||
for handler in handlers
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class BaseRunManager(RunManagerMixin):
|
||||
"""Base class for run manager (a bound callback manager)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
run_id: str,
|
||||
handlers: List[BaseCallbackHandler],
|
||||
inheritable_handlers: List[BaseCallbackHandler],
|
||||
parent_run_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initialize run manager."""
|
||||
self.run_id = run_id
|
||||
self.handlers = handlers
|
||||
self.inheritable_handlers = inheritable_handlers
|
||||
self.parent_run_id = parent_run_id
|
||||
|
||||
|
||||
class RunManager(BaseRunManager):
|
||||
"""Sync Run Manager."""
|
||||
|
||||
def on_text(
|
||||
self,
|
||||
text: str,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when text is received."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_text",
|
||||
None,
|
||||
text,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class AsyncRunManager(BaseRunManager):
|
||||
"""Async Run Manager."""
|
||||
|
||||
async def on_text(
|
||||
self,
|
||||
text: str,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when text is received."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_text",
|
||||
None,
|
||||
text,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
|
||||
"""Callback manager for LLM run."""
|
||||
|
||||
def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM generates a new token."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_new_token",
|
||||
"ignore_llm",
|
||||
token=token,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_end",
|
||||
"ignore_llm",
|
||||
response,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_llm_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_error",
|
||||
"ignore_llm",
|
||||
error,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
"""Async callback manager for LLM run."""
|
||||
|
||||
async def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
*,
|
||||
run_id: Optional[str] = None,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM generates a new token."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_new_token",
|
||||
"ignore_llm",
|
||||
token,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_end",
|
||||
"ignore_llm",
|
||||
response,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
async def on_llm_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_error",
|
||||
"ignore_llm",
|
||||
error,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
|
||||
"""Callback manager for chain run."""
|
||||
|
||||
def get_child(self) -> Callbacks:
|
||||
"""Get a child callback manager."""
|
||||
manager = CallbackManager([], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
return manager
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_chain_end",
|
||||
"ignore_chain",
|
||||
outputs,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_chain_error",
|
||||
"ignore_chain",
|
||||
error,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run when agent action is received."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_agent_action",
|
||||
"ignore_agent",
|
||||
action,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
|
||||
"""Run when agent finish is received."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_agent_finish",
|
||||
"ignore_agent",
|
||||
finish,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
|
||||
"""Async callback manager for chain run."""
|
||||
|
||||
def get_child(self) -> AsyncCallbackManager:
|
||||
"""Get a child callback manager."""
|
||||
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
return manager
|
||||
|
||||
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_chain_end",
|
||||
"ignore_chain",
|
||||
outputs,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
async def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_chain_error",
|
||||
"ignore_chain",
|
||||
error,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run when agent action is received."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_agent_action",
|
||||
"ignore_agent",
|
||||
action,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
|
||||
"""Run when agent finish is received."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_agent_finish",
|
||||
"ignore_agent",
|
||||
finish,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
|
||||
"""Callback manager for tool run."""
|
||||
|
||||
def get_child(self) -> CallbackManager:
|
||||
"""Get a child callback manager."""
|
||||
manager = CallbackManager([], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
return manager
|
||||
|
||||
def on_tool_end(
|
||||
self,
|
||||
output: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool ends running."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_tool_end",
|
||||
"ignore_agent",
|
||||
output,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_tool_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_tool_error",
|
||||
"ignore_agent",
|
||||
error,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
|
||||
"""Async callback manager for tool run."""
|
||||
|
||||
def get_child(self) -> AsyncCallbackManager:
|
||||
"""Get a child callback manager."""
|
||||
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
return manager
|
||||
|
||||
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_tool_end",
|
||||
"ignore_agent",
|
||||
output,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
async def on_tool_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_tool_error",
|
||||
"ignore_agent",
|
||||
error,
|
||||
run_id=self.run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class CallbackManager(BaseCallbackManager):
|
||||
"""Callback manager that can be used to handle callbacks from langchain."""
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForLLMRun:
|
||||
"""Run when LLM starts running."""
|
||||
if run_id is None:
|
||||
run_id = str(uuid.uuid4())
|
||||
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_start",
|
||||
"ignore_llm",
|
||||
serialized,
|
||||
prompts,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return CallbackManagerForLLMRun(
|
||||
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
|
||||
)
|
||||
|
||||
def on_chain_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForChainRun:
|
||||
"""Run when chain starts running."""
|
||||
if run_id is None:
|
||||
run_id = str(uuid.uuid4())
|
||||
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_chain_start",
|
||||
"ignore_chain",
|
||||
serialized,
|
||||
inputs,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return CallbackManagerForChainRun(
|
||||
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
|
||||
)
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
run_id: Optional[str] = None,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForToolRun:
|
||||
"""Run when tool starts running."""
|
||||
if run_id is None:
|
||||
run_id = str(uuid.uuid4())
|
||||
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_tool_start",
|
||||
"ignore_agent",
|
||||
serialized,
|
||||
input_str,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return CallbackManagerForToolRun(
|
||||
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def configure(
|
||||
cls,
|
||||
inheritable_callbacks: Optional[
|
||||
Union[BaseCallbackManager, List[BaseCallbackHandler]]
|
||||
] = None,
|
||||
local_callbacks: Optional[
|
||||
Union[BaseCallbackManager, List[BaseCallbackHandler]]
|
||||
] = None,
|
||||
verbose: bool = False,
|
||||
) -> Optional[BaseCallbackManager]:
|
||||
"""Configure the callback manager."""
|
||||
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
|
||||
|
||||
|
||||
class AsyncCallbackManager(BaseCallbackManager):
|
||||
"""Async callback manager that can be used to handle callbacks from LangChain."""
|
||||
|
||||
@property
|
||||
def is_async(self) -> bool:
|
||||
"""Return whether the handler is async."""
|
||||
return True
|
||||
|
||||
async def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncCallbackManagerForLLMRun:
|
||||
"""Run when LLM starts running."""
|
||||
if run_id is None:
|
||||
run_id = str(uuid.uuid4())
|
||||
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_start",
|
||||
"ignore_llm",
|
||||
serialized,
|
||||
prompts,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return AsyncCallbackManagerForLLMRun(
|
||||
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
|
||||
)
|
||||
|
||||
async def on_chain_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncCallbackManagerForChainRun:
|
||||
"""Run when chain starts running."""
|
||||
if run_id is None:
|
||||
run_id = str(uuid.uuid4())
|
||||
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_chain_start",
|
||||
"ignore_chain",
|
||||
serialized,
|
||||
inputs,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return AsyncCallbackManagerForChainRun(
|
||||
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
|
||||
)
|
||||
|
||||
async def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
run_id: Optional[str] = None,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncCallbackManagerForToolRun:
|
||||
"""Run when tool starts running."""
|
||||
if run_id is None:
|
||||
run_id = str(uuid.uuid4())
|
||||
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_tool_start",
|
||||
"ignore_agent",
|
||||
serialized,
|
||||
input_str,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return AsyncCallbackManagerForToolRun(
|
||||
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def configure(
|
||||
cls,
|
||||
inheritable_callbacks: Optional[
|
||||
Union[BaseCallbackManager, List[BaseCallbackHandler]]
|
||||
] = None,
|
||||
local_callbacks: Optional[
|
||||
Union[BaseCallbackManager, List[BaseCallbackHandler]]
|
||||
] = None,
|
||||
verbose: bool = False,
|
||||
) -> Optional[BaseCallbackManager]:
|
||||
"""Configure the callback manager."""
|
||||
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
|
||||
|
||||
|
||||
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
|
||||
|
||||
|
||||
def _configure(
|
||||
callback_manager_cls: Type[T],
|
||||
inheritable_callbacks: Callbacks = None,
|
||||
local_callbacks: Callbacks = None,
|
||||
verbose: bool = False,
|
||||
) -> T:
|
||||
"""Configure the callback manager."""
|
||||
callback_manager = callback_manager_cls([])
|
||||
if inheritable_callbacks or local_callbacks:
|
||||
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
|
||||
inheritable_callbacks_: List[BaseCallbackHandler] = (
|
||||
inheritable_callbacks or []
|
||||
)
|
||||
callback_manager = callback_manager_cls(
|
||||
handlers=inheritable_callbacks_,
|
||||
inheritable_handlers=inheritable_callbacks_,
|
||||
)
|
||||
else:
|
||||
callback_manager = callback_manager_cls(
|
||||
handlers=inheritable_callbacks.handlers,
|
||||
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
|
||||
parent_run_id=inheritable_callbacks.parent_run_id,
|
||||
)
|
||||
callback_manager = copy.deepcopy(callback_manager)
|
||||
local_handlers_ = (
|
||||
local_callbacks
|
||||
if isinstance(local_callbacks, list)
|
||||
else (local_callbacks.handlers if local_callbacks else [])
|
||||
)
|
||||
for handler in local_handlers_:
|
||||
callback_manager.add_handler(copy.deepcopy(handler), False)
|
||||
|
||||
tracer = tracing_callback_var.get()
|
||||
open_ai = openai_callback_var.get()
|
||||
tracing_enabled_ = (
|
||||
os.environ.get("LANGCHAIN_TRACING") is not None or tracer is not None
|
||||
)
|
||||
if verbose or tracing_enabled_ or open_ai is not None:
|
||||
if verbose and not any(
|
||||
isinstance(handler, StdOutCallbackHandler)
|
||||
for handler in callback_manager.handlers
|
||||
):
|
||||
callback_manager.add_handler(StdOutCallbackHandler(), False)
|
||||
|
||||
if tracing_enabled_ and not any(
|
||||
isinstance(handler, LangChainTracer)
|
||||
for handler in callback_manager.handlers
|
||||
):
|
||||
if tracer:
|
||||
callback_manager.add_handler(copy.deepcopy(tracer), True)
|
||||
else:
|
||||
handler = LangChainTracer()
|
||||
handler.load_default_session()
|
||||
callback_manager.add_handler(handler, True)
|
||||
if open_ai is not None and not any(
|
||||
isinstance(handler, OpenAICallbackHandler)
|
||||
for handler in callback_manager.handlers
|
||||
):
|
||||
callback_manager.add_handler(open_ai, True)
|
||||
|
||||
return callback_manager
|
||||
|
||||
|
||||
class NullCallbackManagerForChainRun(CallbackManagerForChainRun):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("", [], [])
|
||||
|
||||
def on_text(
|
||||
self,
|
||||
text: str,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
pass
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
pass
|
||||
|
||||
def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
pass
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
|
||||
pass
|
||||
|
||||
def get_child(self) -> Callbacks:
|
||||
return None
|
||||
@@ -1,127 +0,0 @@
|
||||
"""A shared CallbackManager."""
|
||||
|
||||
import threading
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from langchain.callbacks.base import (
|
||||
BaseCallbackHandler,
|
||||
BaseCallbackManager,
|
||||
CallbackManager,
|
||||
)
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
class Singleton:
|
||||
"""A thread-safe singleton class that can be inherited from."""
|
||||
|
||||
_instance = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
def __new__(cls) -> Any:
|
||||
"""Create a new shared instance of the class."""
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
# Another thread could have created the instance
|
||||
# before we acquired the lock. So check that the
|
||||
# instance is still nonexistent.
|
||||
if not cls._instance:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
|
||||
class SharedCallbackManager(Singleton, BaseCallbackManager):
|
||||
"""A thread-safe singleton CallbackManager."""
|
||||
|
||||
_callback_manager: CallbackManager = CallbackManager(handlers=[])
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM starts running."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_llm_start(serialized, prompts, **kwargs)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_llm_end(response, **kwargs)
|
||||
|
||||
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
"""Run when LLM generates a new token."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_llm_new_token(token, **kwargs)
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_llm_error(error, **kwargs)
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain starts running."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_chain_start(serialized, inputs, **kwargs)
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_chain_end(outputs, **kwargs)
|
||||
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_chain_error(error, **kwargs)
|
||||
|
||||
def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool starts running."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_tool_start(serialized, input_str, **kwargs)
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run on agent action."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_agent_action(action, **kwargs)
|
||||
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_tool_end(output, **kwargs)
|
||||
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_tool_error(error, **kwargs)
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Run on arbitrary text."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_text(text, **kwargs)
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Run on agent end."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_agent_finish(finish, **kwargs)
|
||||
|
||||
def add_handler(self, callback: BaseCallbackHandler) -> None:
|
||||
"""Add a callback to the callback manager."""
|
||||
with self._lock:
|
||||
self._callback_manager.add_handler(callback)
|
||||
|
||||
def remove_handler(self, callback: BaseCallbackHandler) -> None:
|
||||
"""Remove a callback from the callback manager."""
|
||||
with self._lock:
|
||||
self._callback_manager.remove_handler(callback)
|
||||
|
||||
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
with self._lock:
|
||||
self._callback_manager.handlers = handlers
|
||||
@@ -1,12 +1,5 @@
|
||||
"""Tracers that record execution of LangChain runs."""
|
||||
|
||||
from langchain.callbacks.tracers.base import SharedTracer, Tracer
|
||||
from langchain.callbacks.tracers.langchain import BaseLangChainTracer
|
||||
from langchain.callbacks.tracers.langchain import LangChainTracer
|
||||
|
||||
|
||||
class SharedLangChainTracer(SharedTracer, BaseLangChainTracer):
|
||||
"""Shared tracer that records LangChain execution to LangChain endpoint."""
|
||||
|
||||
|
||||
class LangChainTracer(Tracer, BaseLangChainTracer):
|
||||
"""Tracer that records LangChain execution to LangChain endpoint."""
|
||||
__all__ = ["LangChainTracer"]
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
"""Base interfaces for tracing runs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from uuid import uuid4
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.callbacks.shared import Singleton
|
||||
from langchain.callbacks.tracers.schemas import (
|
||||
ChainRun,
|
||||
LLMRun,
|
||||
@@ -16,7 +14,7 @@ from langchain.callbacks.tracers.schemas import (
|
||||
TracerSession,
|
||||
TracerSessionCreate,
|
||||
)
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
|
||||
class TracerException(Exception):
|
||||
@@ -26,13 +24,25 @@ class TracerException(Exception):
|
||||
class BaseTracer(BaseCallbackHandler, ABC):
|
||||
"""Base interface for tracers."""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
super().__init__(**kwargs)
|
||||
self.run_map: Dict[str, Union[LLMRun, ChainRun, ToolRun]] = {}
|
||||
self.session: Optional[TracerSession] = None
|
||||
|
||||
@staticmethod
|
||||
def _add_child_run(
|
||||
self,
|
||||
parent_run: Union[ChainRun, ToolRun],
|
||||
child_run: Union[LLMRun, ChainRun, ToolRun],
|
||||
) -> None:
|
||||
"""Add child run to a chain run or tool run."""
|
||||
if isinstance(child_run, LLMRun):
|
||||
parent_run.child_llm_runs.append(child_run)
|
||||
elif isinstance(child_run, ChainRun):
|
||||
parent_run.child_chain_runs.append(child_run)
|
||||
elif isinstance(child_run, ToolRun):
|
||||
parent_run.child_tool_runs.append(child_run)
|
||||
else:
|
||||
raise TracerException(f"Invalid run type: {type(child_run)}")
|
||||
|
||||
@abstractmethod
|
||||
def _persist_run(self, run: Union[LLMRun, ChainRun, ToolRun]) -> None:
|
||||
@@ -42,15 +52,11 @@ class BaseTracer(BaseCallbackHandler, ABC):
|
||||
def _persist_session(self, session: TracerSessionCreate) -> TracerSession:
|
||||
"""Persist a tracing session."""
|
||||
|
||||
@abstractmethod
|
||||
def _generate_id(self) -> Optional[Union[int, str]]:
|
||||
"""Generate an id for a run."""
|
||||
|
||||
def new_session(self, name: Optional[str] = None, **kwargs: Any) -> TracerSession:
|
||||
"""NOT thread safe, do not call this method from multiple threads."""
|
||||
session_create = TracerSessionCreate(name=name, extra=kwargs)
|
||||
session = self._persist_session(session_create)
|
||||
self._session = session
|
||||
self.session = session
|
||||
return session
|
||||
|
||||
@abstractmethod
|
||||
@@ -61,283 +67,232 @@ class BaseTracer(BaseCallbackHandler, ABC):
|
||||
def load_default_session(self) -> TracerSession:
|
||||
"""Load the default tracing session and set it as the Tracer's session."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _stack(self) -> List[Union[LLMRun, ChainRun, ToolRun]]:
|
||||
"""Get the tracer stack."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _execution_order(self) -> int:
|
||||
"""Get the execution order for a run."""
|
||||
|
||||
@_execution_order.setter
|
||||
@abstractmethod
|
||||
def _execution_order(self, value: int) -> None:
|
||||
"""Set the execution order for a run."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _session(self) -> Optional[TracerSession]:
|
||||
"""Get the tracing session."""
|
||||
|
||||
@_session.setter
|
||||
@abstractmethod
|
||||
def _session(self, value: TracerSession) -> None:
|
||||
"""Set the tracing session."""
|
||||
|
||||
def _start_trace(self, run: Union[LLMRun, ChainRun, ToolRun]) -> None:
|
||||
"""Start a trace for a run."""
|
||||
self._execution_order += 1
|
||||
|
||||
if self._stack:
|
||||
if not (
|
||||
isinstance(self._stack[-1], ChainRun)
|
||||
or isinstance(self._stack[-1], ToolRun)
|
||||
):
|
||||
if run.parent_uuid:
|
||||
parent_run = self.run_map[run.parent_uuid]
|
||||
if parent_run:
|
||||
if isinstance(parent_run, LLMRun):
|
||||
raise TracerException(
|
||||
"Cannot add child run to an LLM run. "
|
||||
"LLM runs are not allowed to have children."
|
||||
)
|
||||
self._add_child_run(parent_run, run)
|
||||
else:
|
||||
raise TracerException(
|
||||
f"Nested {run.__class__.__name__} can only be"
|
||||
f" logged inside a ChainRun or ToolRun"
|
||||
f"Parent run with UUID {run.parent_uuid} not found."
|
||||
)
|
||||
self._add_child_run(self._stack[-1], run)
|
||||
self._stack.append(run)
|
||||
|
||||
def _end_trace(self) -> None:
|
||||
self.run_map[run.uuid] = run
|
||||
|
||||
def _end_trace(self, run: Union[LLMRun, ChainRun, ToolRun]) -> None:
|
||||
"""End a trace for a run."""
|
||||
run = self._stack.pop()
|
||||
if not self._stack:
|
||||
self._execution_order = 1
|
||||
if not run.parent_uuid:
|
||||
self._persist_run(run)
|
||||
else:
|
||||
parent_run = self.run_map.get(run.parent_uuid)
|
||||
if parent_run is None:
|
||||
raise TracerException(
|
||||
f"Parent run with UUID {run.parent_uuid} not found."
|
||||
)
|
||||
if isinstance(parent_run, LLMRun):
|
||||
raise TracerException("LLM Runs are not allowed to have children. ")
|
||||
if run.child_execution_order > parent_run.child_execution_order:
|
||||
parent_run.child_execution_order = run.child_execution_order
|
||||
self.run_map.pop(run.uuid)
|
||||
|
||||
def _get_execution_order(self, parent_run_id: Optional[str] = None) -> int:
|
||||
"""Get the execution order for a run."""
|
||||
if parent_run_id is None:
|
||||
return 1
|
||||
|
||||
parent_run = self.run_map.get(parent_run_id)
|
||||
if parent_run is None:
|
||||
raise TracerException(f"Parent run with UUID {parent_run_id} not found.")
|
||||
|
||||
if isinstance(parent_run, LLMRun):
|
||||
raise TracerException("LLM Runs are not allowed to have children. ")
|
||||
|
||||
return parent_run.child_execution_order + 1
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Start a trace for an LLM run."""
|
||||
if self._session is None:
|
||||
raise TracerException(
|
||||
"Initialize a session with `new_session()` before starting a trace."
|
||||
)
|
||||
if self.session is None:
|
||||
self.session = self.load_default_session()
|
||||
|
||||
if run_id is None:
|
||||
run_id = str(uuid4())
|
||||
|
||||
execution_order = self._get_execution_order(parent_run_id)
|
||||
llm_run = LLMRun(
|
||||
uuid=run_id,
|
||||
parent_uuid=parent_run_id,
|
||||
serialized=serialized,
|
||||
prompts=prompts,
|
||||
extra=kwargs,
|
||||
start_time=datetime.utcnow(),
|
||||
execution_order=self._execution_order,
|
||||
session_id=self._session.id,
|
||||
id=self._generate_id(),
|
||||
execution_order=execution_order,
|
||||
child_execution_order=execution_order,
|
||||
session_id=self.session.id,
|
||||
)
|
||||
self._start_trace(llm_run)
|
||||
|
||||
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
"""Handle a new token for an LLM run."""
|
||||
pass
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
def on_llm_end(self, response: LLMResult, *, run_id: str, **kwargs: Any) -> None:
|
||||
"""End a trace for an LLM run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], LLMRun):
|
||||
if not run_id:
|
||||
raise TracerException("No run_id provided for on_llm_end callback.")
|
||||
|
||||
llm_run = self.run_map.get(run_id)
|
||||
if llm_run is None or not isinstance(llm_run, LLMRun):
|
||||
raise TracerException("No LLMRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].response = response
|
||||
|
||||
self._end_trace()
|
||||
llm_run.response = response
|
||||
llm_run.end_time = datetime.utcnow()
|
||||
self._end_trace(llm_run)
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Handle an error for an LLM run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], LLMRun):
|
||||
if not run_id:
|
||||
raise TracerException("No run_id provided for on_llm_error callback.")
|
||||
|
||||
llm_run = self.run_map.get(run_id)
|
||||
if llm_run is None or not isinstance(llm_run, LLMRun):
|
||||
raise TracerException("No LLMRun found to be traced")
|
||||
|
||||
self._stack[-1].error = repr(error)
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
|
||||
self._end_trace()
|
||||
llm_run.error = repr(error)
|
||||
llm_run.end_time = datetime.utcnow()
|
||||
self._end_trace(llm_run)
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Start a trace for a chain run."""
|
||||
if self._session is None:
|
||||
raise TracerException(
|
||||
"Initialize a session with `new_session()` before starting a trace."
|
||||
)
|
||||
if self.session is None:
|
||||
self.session = self.load_default_session()
|
||||
|
||||
execution_order = self._get_execution_order(parent_run_id)
|
||||
chain_run = ChainRun(
|
||||
uuid=run_id,
|
||||
parent_uuid=parent_run_id,
|
||||
serialized=serialized,
|
||||
inputs=inputs,
|
||||
extra=kwargs,
|
||||
start_time=datetime.utcnow(),
|
||||
execution_order=self._execution_order,
|
||||
execution_order=execution_order,
|
||||
child_execution_order=execution_order,
|
||||
child_runs=[],
|
||||
session_id=self._session.id,
|
||||
id=self._generate_id(),
|
||||
session_id=self.session.id,
|
||||
)
|
||||
self._start_trace(chain_run)
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
def on_chain_end(
|
||||
self, outputs: Dict[str, Any], *, run_id: str, **kwargs: Any
|
||||
) -> None:
|
||||
"""End a trace for a chain run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ChainRun):
|
||||
chain_run = self.run_map.get(run_id)
|
||||
if chain_run is None or not isinstance(chain_run, ChainRun):
|
||||
raise TracerException("No ChainRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].outputs = outputs
|
||||
|
||||
self._end_trace()
|
||||
chain_run.outputs = outputs
|
||||
chain_run.end_time = datetime.utcnow()
|
||||
self._end_trace(chain_run)
|
||||
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Handle an error for a chain run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ChainRun):
|
||||
chain_run = self.run_map.get(run_id)
|
||||
if chain_run is None or not isinstance(chain_run, ChainRun):
|
||||
raise TracerException("No ChainRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].error = repr(error)
|
||||
|
||||
self._end_trace()
|
||||
chain_run.error = repr(error)
|
||||
chain_run.end_time = datetime.utcnow()
|
||||
self._end_trace(chain_run)
|
||||
|
||||
def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
*,
|
||||
run_id: str,
|
||||
parent_run_id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Start a trace for a tool run."""
|
||||
if self._session is None:
|
||||
raise TracerException(
|
||||
"Initialize a session with `new_session()` before starting a trace."
|
||||
)
|
||||
if self.session is None:
|
||||
self.session = self.load_default_session()
|
||||
|
||||
execution_order = self._get_execution_order(parent_run_id)
|
||||
tool_run = ToolRun(
|
||||
uuid=run_id,
|
||||
parent_uuid=parent_run_id,
|
||||
serialized=serialized,
|
||||
# TODO: this is duplicate info as above, not needed.
|
||||
action=str(serialized),
|
||||
tool_input=input_str,
|
||||
extra=kwargs,
|
||||
start_time=datetime.utcnow(),
|
||||
execution_order=self._execution_order,
|
||||
execution_order=execution_order,
|
||||
child_execution_order=execution_order,
|
||||
child_runs=[],
|
||||
session_id=self._session.id,
|
||||
id=self._generate_id(),
|
||||
session_id=self.session.id,
|
||||
)
|
||||
self._start_trace(tool_run)
|
||||
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
def on_tool_end(self, output: str, *, run_id: str, **kwargs: Any) -> None:
|
||||
"""End a trace for a tool run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ToolRun):
|
||||
tool_run = self.run_map.get(run_id)
|
||||
if tool_run is None or not isinstance(tool_run, ToolRun):
|
||||
raise TracerException("No ToolRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].output = output
|
||||
|
||||
self._end_trace()
|
||||
tool_run.output = output
|
||||
tool_run.end_time = datetime.utcnow()
|
||||
self._end_trace(tool_run)
|
||||
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Handle an error for a tool run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ToolRun):
|
||||
tool_run = self.run_map.get(run_id)
|
||||
if tool_run is None or not isinstance(tool_run, ToolRun):
|
||||
raise TracerException("No ToolRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].error = repr(error)
|
||||
tool_run.error = repr(error)
|
||||
tool_run.end_time = datetime.utcnow()
|
||||
self._end_trace(tool_run)
|
||||
|
||||
self._end_trace()
|
||||
def __deepcopy__(self, memo: dict) -> BaseTracer:
|
||||
"""Deepcopy the tracer."""
|
||||
return self
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Handle a text message."""
|
||||
pass
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Handle an agent finish message."""
|
||||
pass
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
|
||||
class Tracer(BaseTracer, ABC):
|
||||
"""A non-thread safe implementation of the BaseTracer interface."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize a tracer."""
|
||||
self._tracer_stack: List[Union[LLMRun, ChainRun, ToolRun]] = []
|
||||
self._tracer_execution_order = 1
|
||||
self._tracer_session: Optional[TracerSession] = None
|
||||
|
||||
@property
|
||||
def _stack(self) -> List[Union[LLMRun, ChainRun, ToolRun]]:
|
||||
"""Get the tracer stack."""
|
||||
return self._tracer_stack
|
||||
|
||||
@property
|
||||
def _execution_order(self) -> int:
|
||||
"""Get the execution order for a run."""
|
||||
return self._tracer_execution_order
|
||||
|
||||
@_execution_order.setter
|
||||
def _execution_order(self, value: int) -> None:
|
||||
"""Set the execution order for a run."""
|
||||
self._tracer_execution_order = value
|
||||
|
||||
@property
|
||||
def _session(self) -> Optional[TracerSession]:
|
||||
"""Get the tracing session."""
|
||||
return self._tracer_session
|
||||
|
||||
@_session.setter
|
||||
def _session(self, value: TracerSession) -> None:
|
||||
"""Set the tracing session."""
|
||||
if self._stack:
|
||||
raise TracerException(
|
||||
"Cannot set a session while a trace is being recorded"
|
||||
)
|
||||
self._tracer_session = value
|
||||
|
||||
|
||||
@dataclass
|
||||
class TracerStack(threading.local):
|
||||
"""A stack of runs used for logging."""
|
||||
|
||||
stack: List[Union[LLMRun, ChainRun, ToolRun]] = field(default_factory=list)
|
||||
execution_order: int = 1
|
||||
|
||||
|
||||
class SharedTracer(Singleton, BaseTracer, ABC):
|
||||
"""A thread-safe Singleton implementation of BaseTracer."""
|
||||
|
||||
_tracer_stack = TracerStack()
|
||||
_tracer_session = None
|
||||
|
||||
@property
|
||||
def _stack(self) -> List[Union[LLMRun, ChainRun, ToolRun]]:
|
||||
"""Get the tracer stack."""
|
||||
return self._tracer_stack.stack
|
||||
|
||||
@property
|
||||
def _execution_order(self) -> int:
|
||||
"""Get the execution order for a run."""
|
||||
return self._tracer_stack.execution_order
|
||||
|
||||
@_execution_order.setter
|
||||
def _execution_order(self, value: int) -> None:
|
||||
"""Set the execution order for a run."""
|
||||
self._tracer_stack.execution_order = value
|
||||
|
||||
@property
|
||||
def _session(self) -> Optional[TracerSession]:
|
||||
"""Get the tracing session."""
|
||||
return self._tracer_session
|
||||
|
||||
@_session.setter
|
||||
def _session(self, value: TracerSession) -> None:
|
||||
"""Set the tracing session."""
|
||||
with self._lock:
|
||||
# TODO: currently, we are only checking current thread's stack.
|
||||
# Need to make sure that we are not in the middle of a trace
|
||||
# in any thread.
|
||||
if self._stack:
|
||||
raise TracerException(
|
||||
"Cannot set a session while a trace is being recorded"
|
||||
)
|
||||
self._tracer_session = value
|
||||
def __copy__(self) -> BaseTracer:
|
||||
"""Copy the tracer."""
|
||||
return self
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
import requests
|
||||
@@ -18,14 +17,17 @@ from langchain.callbacks.tracers.schemas import (
|
||||
)
|
||||
|
||||
|
||||
class BaseLangChainTracer(BaseTracer, ABC):
|
||||
class LangChainTracer(BaseTracer):
|
||||
"""An implementation of the SharedTracer that POSTS to the langchain endpoint."""
|
||||
|
||||
always_verbose: bool = True
|
||||
_endpoint: str = os.getenv("LANGCHAIN_ENDPOINT", "http://localhost:8000")
|
||||
_headers: Dict[str, Any] = {"Content-Type": "application/json"}
|
||||
if os.getenv("LANGCHAIN_API_KEY"):
|
||||
_headers["x-api-key"] = os.getenv("LANGCHAIN_API_KEY")
|
||||
def __init__(self, session_name: str = "default", **kwargs: Any) -> None:
|
||||
"""Initialize the LangChain tracer."""
|
||||
super().__init__(**kwargs)
|
||||
self._endpoint: str = os.getenv("LANGCHAIN_ENDPOINT", "http://localhost:8000")
|
||||
self._headers: Dict[str, Any] = {"Content-Type": "application/json"}
|
||||
if os.getenv("LANGCHAIN_API_KEY"):
|
||||
self._headers["x-api-key"] = os.getenv("LANGCHAIN_API_KEY")
|
||||
self.session = self.load_session(session_name)
|
||||
|
||||
def _persist_run(self, run: Union[LLMRun, ChainRun, ToolRun]) -> None:
|
||||
"""Persist a run."""
|
||||
@@ -59,54 +61,29 @@ class BaseLangChainTracer(BaseTracer, ABC):
|
||||
session = TracerSession(id=1, **session_create.dict())
|
||||
return session
|
||||
|
||||
def load_session(self, session_name: str) -> TracerSession:
|
||||
def _load_session(self, session_name: Optional[str] = None) -> TracerSession:
|
||||
"""Load a session from the tracer."""
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self._endpoint}/sessions?name={session_name}",
|
||||
headers=self._headers,
|
||||
)
|
||||
url = f"{self._endpoint}/sessions"
|
||||
if session_name:
|
||||
url += f"?name={session_name}"
|
||||
r = requests.get(url, headers=self._headers)
|
||||
|
||||
tracer_session = TracerSession(**r.json()[0])
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
except Exception as e:
|
||||
session_type = "default" if not session_name else session_name
|
||||
logging.warning(
|
||||
f"Failed to load session {session_name}, using empty session: {e}"
|
||||
f"Failed to load {session_type} session, using empty session: {e}"
|
||||
)
|
||||
tracer_session = TracerSession(id=1)
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
|
||||
self.session = tracer_session
|
||||
return tracer_session
|
||||
|
||||
def load_session(self, session_name: str) -> TracerSession:
|
||||
"""Load a session with the given name from the tracer."""
|
||||
return self._load_session(session_name)
|
||||
|
||||
def load_default_session(self) -> TracerSession:
|
||||
"""Load the default tracing session and set it as the Tracer's session."""
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self._endpoint}/sessions",
|
||||
headers=self._headers,
|
||||
)
|
||||
# Use the first session result
|
||||
tracer_session = TracerSession(**r.json()[0])
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to default session, using empty session: {e}")
|
||||
tracer_session = TracerSession(id=1)
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
|
||||
def _add_child_run(
|
||||
self,
|
||||
parent_run: Union[ChainRun, ToolRun],
|
||||
child_run: Union[LLMRun, ChainRun, ToolRun],
|
||||
) -> None:
|
||||
"""Add child run to a chain run or tool run."""
|
||||
if isinstance(child_run, LLMRun):
|
||||
parent_run.child_llm_runs.append(child_run)
|
||||
elif isinstance(child_run, ChainRun):
|
||||
parent_run.child_chain_runs.append(child_run)
|
||||
else:
|
||||
parent_run.child_tool_runs.append(child_run)
|
||||
|
||||
def _generate_id(self) -> Optional[Union[int, str]]:
|
||||
"""Generate an id for a run."""
|
||||
return None
|
||||
return self._load_session("default")
|
||||
|
||||
@@ -32,11 +32,13 @@ class TracerSession(TracerSessionBase):
|
||||
class BaseRun(BaseModel):
|
||||
"""Base class for Run."""
|
||||
|
||||
id: Optional[Union[int, str]] = None
|
||||
uuid: str
|
||||
parent_uuid: Optional[str] = None
|
||||
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
|
||||
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
|
||||
extra: Optional[Dict[str, Any]] = None
|
||||
execution_order: int
|
||||
child_execution_order: int
|
||||
serialized: Dict[str, Any]
|
||||
session_id: int
|
||||
error: Optional[str] = None
|
||||
@@ -57,7 +59,6 @@ class ChainRun(BaseRun):
|
||||
child_llm_runs: List[LLMRun] = Field(default_factory=list)
|
||||
child_chain_runs: List[ChainRun] = Field(default_factory=list)
|
||||
child_tool_runs: List[ToolRun] = Field(default_factory=list)
|
||||
child_runs: List[Union[LLMRun, ChainRun, ToolRun]] = Field(default_factory=list)
|
||||
|
||||
|
||||
class ToolRun(BaseRun):
|
||||
@@ -69,7 +70,6 @@ class ToolRun(BaseRun):
|
||||
child_llm_runs: List[LLMRun] = Field(default_factory=list)
|
||||
child_chain_runs: List[ChainRun] = Field(default_factory=list)
|
||||
child_tool_runs: List[ToolRun] = Field(default_factory=list)
|
||||
child_runs: List[Union[LLMRun, ChainRun, ToolRun]] = Field(default_factory=list)
|
||||
|
||||
|
||||
ChainRun.update_forward_refs()
|
||||
|
||||
@@ -5,12 +5,16 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Field, root_validator
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.manager import (
|
||||
CallbackManagerForChainRun,
|
||||
NullCallbackManagerForChainRun,
|
||||
)
|
||||
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts import BasePromptTemplate
|
||||
from langchain.requests import TextRequestsWrapper
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class APIChain(Chain):
|
||||
@@ -61,16 +65,18 @@ class APIChain(Chain):
|
||||
)
|
||||
return values
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: CallbackManagerForChainRun = NullCallbackManagerForChainRun(),
|
||||
) -> Dict[str, str]:
|
||||
question = inputs[self.question_key]
|
||||
api_url = self.api_request_chain.predict(
|
||||
question=question, api_docs=self.api_docs
|
||||
)
|
||||
self.callback_manager.on_text(
|
||||
api_url, color="green", end="\n", verbose=self.verbose
|
||||
question=question, api_docs=self.api_docs, callbacks=run_manager.get_child()
|
||||
)
|
||||
run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
|
||||
api_response = self.requests_wrapper.get(api_url)
|
||||
self.callback_manager.on_text(
|
||||
run_manager.on_text(
|
||||
api_response, color="yellow", end="\n", verbose=self.verbose
|
||||
)
|
||||
answer = self.api_answer_chain.predict(
|
||||
|
||||
@@ -1,15 +1,24 @@
|
||||
"""Base interface that all chains should implement."""
|
||||
import inspect
|
||||
import json
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from pydantic import BaseModel, Field, root_validator, validator
|
||||
|
||||
import langchain
|
||||
from langchain.callbacks import get_callback_manager
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManager,
|
||||
CallbackManagerForChainRun,
|
||||
Callbacks,
|
||||
NullCallbackManagerForChainRun,
|
||||
)
|
||||
from langchain.schema import BaseMemory
|
||||
|
||||
|
||||
@@ -21,9 +30,8 @@ class Chain(BaseModel, ABC):
|
||||
"""Base interface that all chains should implement."""
|
||||
|
||||
memory: Optional[BaseMemory] = None
|
||||
callback_manager: BaseCallbackManager = Field(
|
||||
default_factory=get_callback_manager, exclude=True
|
||||
)
|
||||
callbacks: Callbacks = None
|
||||
callback_manager: Optional[BaseCallbackManager] = None
|
||||
verbose: bool = Field(
|
||||
default_factory=_get_verbosity
|
||||
) # Whether to print the response text
|
||||
@@ -37,15 +45,16 @@ class Chain(BaseModel, ABC):
|
||||
def _chain_type(self) -> str:
|
||||
raise NotImplementedError("Saving not supported for this chain type.")
|
||||
|
||||
@validator("callback_manager", pre=True, always=True)
|
||||
def set_callback_manager(
|
||||
cls, callback_manager: Optional[BaseCallbackManager]
|
||||
) -> BaseCallbackManager:
|
||||
"""If callback manager is None, set it.
|
||||
|
||||
This allows users to pass in None as callback manager, which is a nice UX.
|
||||
"""
|
||||
return callback_manager or get_callback_manager()
|
||||
@root_validator()
|
||||
def raise_deprecation(cls, values: Dict) -> Dict:
|
||||
"""Raise deprecation warning if callback_manager is used."""
|
||||
if values.get("callback_manager") is not None:
|
||||
warnings.warn(
|
||||
"callback_manager is deprecated. Please use callbacks instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
values["callbacks"] = values.pop("callback_manager", None)
|
||||
return values
|
||||
|
||||
@validator("verbose", pre=True, always=True)
|
||||
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
||||
@@ -82,15 +91,26 @@ class Chain(BaseModel, ABC):
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: CallbackManagerForChainRun = NullCallbackManagerForChainRun(),
|
||||
) -> Dict[str, str]:
|
||||
"""Run the logic of this chain and return the output."""
|
||||
|
||||
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
async def _acall(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
"""Run the logic of this chain and return the output."""
|
||||
raise NotImplementedError("Async call not supported for this chain type.")
|
||||
|
||||
def __call__(
|
||||
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
|
||||
self,
|
||||
inputs: Union[Dict[str, Any], Any],
|
||||
return_only_outputs: bool = False,
|
||||
callbacks: Callbacks = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run the logic of this chain and add to output if desired.
|
||||
|
||||
@@ -104,21 +124,31 @@ class Chain(BaseModel, ABC):
|
||||
|
||||
"""
|
||||
inputs = self.prep_inputs(inputs)
|
||||
self.callback_manager.on_chain_start(
|
||||
callback_manager = CallbackManager.configure(
|
||||
callbacks, self.callbacks, self.verbose
|
||||
)
|
||||
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
|
||||
run_manager = callback_manager.on_chain_start(
|
||||
{"name": self.__class__.__name__},
|
||||
inputs,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
try:
|
||||
outputs = self._call(inputs)
|
||||
outputs = (
|
||||
self._call(inputs, run_manager=run_manager)
|
||||
if new_arg_supported
|
||||
else self._call(inputs)
|
||||
)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
self.callback_manager.on_chain_error(e, verbose=self.verbose)
|
||||
run_manager.on_chain_error(e)
|
||||
raise e
|
||||
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
|
||||
run_manager.on_chain_end(outputs)
|
||||
return self.prep_outputs(inputs, outputs, return_only_outputs)
|
||||
|
||||
async def acall(
|
||||
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
|
||||
self,
|
||||
inputs: Union[Dict[str, Any], Any],
|
||||
return_only_outputs: bool = False,
|
||||
callbacks: Callbacks = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run the logic of this chain and add to output if desired.
|
||||
|
||||
@@ -132,30 +162,24 @@ class Chain(BaseModel, ABC):
|
||||
|
||||
"""
|
||||
inputs = self.prep_inputs(inputs)
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_chain_start(
|
||||
{"name": self.__class__.__name__},
|
||||
inputs,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_chain_start(
|
||||
{"name": self.__class__.__name__},
|
||||
inputs,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
callback_manager = AsyncCallbackManager.configure(
|
||||
callbacks, self.callbacks, self.verbose
|
||||
)
|
||||
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
|
||||
run_manager = await callback_manager.on_chain_start(
|
||||
{"name": self.__class__.__name__},
|
||||
inputs,
|
||||
)
|
||||
try:
|
||||
outputs = await self._acall(inputs)
|
||||
outputs = (
|
||||
await self._acall(inputs, run_manager=run_manager)
|
||||
if new_arg_supported
|
||||
else await self._acall(inputs)
|
||||
)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_chain_error(e, verbose=self.verbose)
|
||||
else:
|
||||
self.callback_manager.on_chain_error(e, verbose=self.verbose)
|
||||
await run_manager.on_chain_error(e)
|
||||
raise e
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
|
||||
else:
|
||||
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
|
||||
await run_manager.on_chain_end(outputs)
|
||||
return self.prep_outputs(inputs, outputs, return_only_outputs)
|
||||
|
||||
def prep_outputs(
|
||||
@@ -195,11 +219,13 @@ class Chain(BaseModel, ABC):
|
||||
self._validate_inputs(inputs)
|
||||
return inputs
|
||||
|
||||
def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
|
||||
def apply(
|
||||
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Call the chain on all inputs in the list."""
|
||||
return [self(inputs) for inputs in input_list]
|
||||
return [self(inputs, callbacks=callbacks) for inputs in input_list]
|
||||
|
||||
def run(self, *args: Any, **kwargs: Any) -> str:
|
||||
def run(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
|
||||
"""Run the chain as text in, text out or multiple variables, text out."""
|
||||
if len(self.output_keys) != 1:
|
||||
raise ValueError(
|
||||
@@ -210,17 +236,17 @@ class Chain(BaseModel, ABC):
|
||||
if args and not kwargs:
|
||||
if len(args) != 1:
|
||||
raise ValueError("`run` supports only one positional argument.")
|
||||
return self(args[0])[self.output_keys[0]]
|
||||
return self(args[0], callbacks=callbacks)[self.output_keys[0]]
|
||||
|
||||
if kwargs and not args:
|
||||
return self(kwargs)[self.output_keys[0]]
|
||||
return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
|
||||
|
||||
raise ValueError(
|
||||
f"`run` supported with either positional arguments or keyword arguments"
|
||||
f" but not both. Got args: {args} and kwargs: {kwargs}."
|
||||
)
|
||||
|
||||
async def arun(self, *args: Any, **kwargs: Any) -> str:
|
||||
async def arun(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
|
||||
"""Run the chain as text in, text out or multiple variables, text out."""
|
||||
if len(self.output_keys) != 1:
|
||||
raise ValueError(
|
||||
@@ -231,10 +257,10 @@ class Chain(BaseModel, ABC):
|
||||
if args and not kwargs:
|
||||
if len(args) != 1:
|
||||
raise ValueError("`run` supports only one positional argument.")
|
||||
return (await self.acall(args[0]))[self.output_keys[0]]
|
||||
return (await self.acall(args[0], callbacks=callbacks))[self.output_keys[0]]
|
||||
|
||||
if kwargs and not args:
|
||||
return (await self.acall(kwargs))[self.output_keys[0]]
|
||||
return (await self.acall(kwargs, callbacks=callbacks))[self.output_keys[0]]
|
||||
|
||||
raise ValueError(
|
||||
f"`run` supported with either positional arguments or keyword arguments"
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
"""Chain for applying constitutional principles to the outputs of another chain."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
||||
from langchain.chains.constitutional_ai.principles import PRINCIPLES
|
||||
from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class ConstitutionalChain(Chain):
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from pydantic import Extra, Field, root_validator
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
||||
@@ -15,7 +16,7 @@ from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.question_answering import load_qa_chain
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel, BaseMessage, BaseRetriever, Document
|
||||
from langchain.schema import BaseMessage, BaseRetriever, Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
||||
# Depending on the memory type and configuration, the chat history format may differ.
|
||||
|
||||
@@ -5,11 +5,17 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from pydantic import Extra
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManagerForChainRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.input import get_colored_text
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel, LLMResult, PromptValue
|
||||
from langchain.schema import LLMResult, PromptValue
|
||||
|
||||
|
||||
class LLMChain(Chain):
|
||||
@@ -53,21 +59,39 @@ class LLMChain(Chain):
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
return self.apply([inputs])[0]
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
return self.apply([inputs], run_manager=run_manager)[0]
|
||||
|
||||
def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult:
|
||||
def generate(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> LLMResult:
|
||||
"""Generate LLM result from inputs."""
|
||||
prompts, stop = self.prep_prompts(input_list)
|
||||
return self.llm.generate_prompt(prompts, stop)
|
||||
prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
|
||||
return self.llm.generate_prompt(
|
||||
prompts, stop, callbacks=run_manager.get_child() if run_manager else None
|
||||
)
|
||||
|
||||
async def agenerate(self, input_list: List[Dict[str, Any]]) -> LLMResult:
|
||||
async def agenerate(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> LLMResult:
|
||||
"""Generate LLM result from inputs."""
|
||||
prompts, stop = await self.aprep_prompts(input_list)
|
||||
return await self.llm.agenerate_prompt(prompts, stop)
|
||||
return await self.llm.agenerate_prompt(
|
||||
prompts, stop, callbacks=run_manager.get_child() if run_manager else None
|
||||
)
|
||||
|
||||
def prep_prompts(
|
||||
self, input_list: List[Dict[str, Any]]
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Tuple[List[PromptValue], Optional[List[str]]]:
|
||||
"""Prepare prompts from inputs."""
|
||||
stop = None
|
||||
@@ -79,7 +103,8 @@ class LLMChain(Chain):
|
||||
prompt = self.prompt.format_prompt(**selected_inputs)
|
||||
_colored_text = get_colored_text(prompt.to_string(), "green")
|
||||
_text = "Prompt after formatting:\n" + _colored_text
|
||||
self.callback_manager.on_text(_text, end="\n", verbose=self.verbose)
|
||||
if run_manager:
|
||||
run_manager.on_text(_text, end="\n", verbose=self.verbose)
|
||||
if "stop" in inputs and inputs["stop"] != stop:
|
||||
raise ValueError(
|
||||
"If `stop` is present in any inputs, should be present in all."
|
||||
@@ -88,7 +113,9 @@ class LLMChain(Chain):
|
||||
return prompts, stop
|
||||
|
||||
async def aprep_prompts(
|
||||
self, input_list: List[Dict[str, Any]]
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Tuple[List[PromptValue], Optional[List[str]]]:
|
||||
"""Prepare prompts from inputs."""
|
||||
stop = None
|
||||
@@ -100,12 +127,8 @@ class LLMChain(Chain):
|
||||
prompt = self.prompt.format_prompt(**selected_inputs)
|
||||
_colored_text = get_colored_text(prompt.to_string(), "green")
|
||||
_text = "Prompt after formatting:\n" + _colored_text
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_text(
|
||||
_text, end="\n", verbose=self.verbose
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_text(_text, end="\n", verbose=self.verbose)
|
||||
if run_manager:
|
||||
await run_manager.on_text(_text, end="\n", verbose=self.verbose)
|
||||
if "stop" in inputs and inputs["stop"] != stop:
|
||||
raise ValueError(
|
||||
"If `stop` is present in any inputs, should be present in all."
|
||||
@@ -113,14 +136,22 @@ class LLMChain(Chain):
|
||||
prompts.append(prompt)
|
||||
return prompts, stop
|
||||
|
||||
def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
|
||||
def apply(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Utilize the LLM generate method for speed gains."""
|
||||
response = self.generate(input_list)
|
||||
response = self.generate(input_list, run_manager=run_manager)
|
||||
return self.create_outputs(response)
|
||||
|
||||
async def aapply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
|
||||
async def aapply(
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Utilize the LLM generate method for speed gains."""
|
||||
response = await self.agenerate(input_list)
|
||||
response = await self.agenerate(input_list, run_manager=run_manager)
|
||||
return self.create_outputs(response)
|
||||
|
||||
def create_outputs(self, response: LLMResult) -> List[Dict[str, str]]:
|
||||
@@ -131,13 +162,18 @@ class LLMChain(Chain):
|
||||
for generation in response.generations
|
||||
]
|
||||
|
||||
async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
return (await self.aapply([inputs]))[0]
|
||||
async def _acall(
|
||||
self,
|
||||
inputs: Dict[str, Any],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
return (await self.aapply([inputs], run_manager=run_manager))[0]
|
||||
|
||||
def predict(self, **kwargs: Any) -> str:
|
||||
def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
|
||||
"""Format prompt with kwargs and pass to LLM.
|
||||
|
||||
Args:
|
||||
callbacks: Callbacks to pass to LLMChain
|
||||
**kwargs: Keys to pass to prompt template.
|
||||
|
||||
Returns:
|
||||
@@ -148,12 +184,13 @@ class LLMChain(Chain):
|
||||
|
||||
completion = llm.predict(adjective="funny")
|
||||
"""
|
||||
return self(kwargs)[self.output_key]
|
||||
return self(kwargs, callbacks=callbacks)[self.output_key]
|
||||
|
||||
async def apredict(self, **kwargs: Any) -> str:
|
||||
async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
|
||||
"""Format prompt with kwargs and pass to LLM.
|
||||
|
||||
Args:
|
||||
callbacks: Callbacks to pass to LLMChain
|
||||
**kwargs: Keys to pass to prompt template.
|
||||
|
||||
Returns:
|
||||
@@ -164,31 +201,35 @@ class LLMChain(Chain):
|
||||
|
||||
completion = llm.predict(adjective="funny")
|
||||
"""
|
||||
return (await self.acall(kwargs))[self.output_key]
|
||||
return (await self.acall(kwargs, callbacks=callbacks))[self.output_key]
|
||||
|
||||
def predict_and_parse(self, **kwargs: Any) -> Union[str, List[str], Dict[str, str]]:
|
||||
def predict_and_parse(
|
||||
self, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> Union[str, List[str], Dict[str, str]]:
|
||||
"""Call predict and then parse the results."""
|
||||
result = self.predict(**kwargs)
|
||||
result = self.predict(callbacks=callbacks, **kwargs)
|
||||
if self.prompt.output_parser is not None:
|
||||
return self.prompt.output_parser.parse(result)
|
||||
else:
|
||||
return result
|
||||
|
||||
async def apredict_and_parse(
|
||||
self, **kwargs: Any
|
||||
self, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> Union[str, List[str], Dict[str, str]]:
|
||||
"""Call apredict and then parse the results."""
|
||||
result = await self.apredict(**kwargs)
|
||||
result = await self.apredict(callbacks=callbacks, **kwargs)
|
||||
if self.prompt.output_parser is not None:
|
||||
return self.prompt.output_parser.parse(result)
|
||||
else:
|
||||
return result
|
||||
|
||||
def apply_and_parse(
|
||||
self, input_list: List[Dict[str, Any]]
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
|
||||
"""Call apply and then parse the results."""
|
||||
result = self.apply(input_list)
|
||||
result = self.apply(input_list, run_manager=run_manager)
|
||||
return self._parse_result(result)
|
||||
|
||||
def _parse_result(
|
||||
@@ -202,10 +243,12 @@ class LLMChain(Chain):
|
||||
return result
|
||||
|
||||
async def aapply_and_parse(
|
||||
self, input_list: List[Dict[str, Any]]
|
||||
self,
|
||||
input_list: List[Dict[str, Any]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
|
||||
"""Call apply and then parse the results."""
|
||||
result = await self.aapply(input_list)
|
||||
result = await self.aapply(input_list, run_manager=run_manager)
|
||||
return self._parse_result(result)
|
||||
|
||||
@property
|
||||
|
||||
@@ -3,11 +3,11 @@ from typing import Dict, List
|
||||
|
||||
from pydantic import Extra
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_bash.prompt import PROMPT
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.utilities.bash import BashProcess
|
||||
|
||||
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
"""Chain that interprets a prompt and executes python code to do math."""
|
||||
import math
|
||||
import re
|
||||
from typing import Dict, List
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import numexpr
|
||||
from pydantic import Extra
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManagerForChainRun,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_math.prompt import PROMPT
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class LLMMathChain(Chain):
|
||||
@@ -68,15 +72,19 @@ class LLMMathChain(Chain):
|
||||
# Remove any leading and trailing brackets from the output
|
||||
return re.sub(r"^\[|\]$", "", output)
|
||||
|
||||
def _process_llm_result(self, llm_output: str) -> Dict[str, str]:
|
||||
self.callback_manager.on_text(llm_output, color="green", verbose=self.verbose)
|
||||
def _process_llm_result(
|
||||
self, llm_output: str, run_manager: Optional[CallbackManagerForChainRun] = None
|
||||
) -> Dict[str, str]:
|
||||
if run_manager:
|
||||
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
|
||||
llm_output = llm_output.strip()
|
||||
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
|
||||
if text_match:
|
||||
expression = text_match.group(1)
|
||||
output = self._evaluate_expression(expression)
|
||||
self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose)
|
||||
self.callback_manager.on_text(output, color="yellow", verbose=self.verbose)
|
||||
if run_manager:
|
||||
run_manager.on_text("\nAnswer: ", verbose=self.verbose)
|
||||
run_manager.on_text(output, color="yellow", verbose=self.verbose)
|
||||
answer = "Answer: " + output
|
||||
elif llm_output.startswith("Answer:"):
|
||||
answer = llm_output
|
||||
@@ -86,30 +94,21 @@ class LLMMathChain(Chain):
|
||||
raise ValueError(f"unknown format from LLM: {llm_output}")
|
||||
return {self.output_key: answer}
|
||||
|
||||
async def _aprocess_llm_result(self, llm_output: str) -> Dict[str, str]:
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_text(
|
||||
llm_output, color="green", verbose=self.verbose
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_text(
|
||||
llm_output, color="green", verbose=self.verbose
|
||||
)
|
||||
async def _aprocess_llm_result(
|
||||
self,
|
||||
llm_output: str,
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
if run_manager:
|
||||
await run_manager.on_text(llm_output, color="green", verbose=self.verbose)
|
||||
llm_output = llm_output.strip()
|
||||
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
|
||||
if text_match:
|
||||
expression = text_match.group(1)
|
||||
output = self._evaluate_expression(expression)
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose)
|
||||
await self.callback_manager.on_text(
|
||||
output, color="yellow", verbose=self.verbose
|
||||
)
|
||||
else:
|
||||
await self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose)
|
||||
await self.callback_manager.on_text(
|
||||
output, color="yellow", verbose=self.verbose
|
||||
)
|
||||
if run_manager:
|
||||
await run_manager.on_text("\nAnswer: ", verbose=self.verbose)
|
||||
await run_manager.on_text(output, color="yellow", verbose=self.verbose)
|
||||
answer = "Answer: " + output
|
||||
elif llm_output.startswith("Answer:"):
|
||||
answer = llm_output
|
||||
@@ -119,30 +118,35 @@ class LLMMathChain(Chain):
|
||||
raise ValueError(f"unknown format from LLM: {llm_output}")
|
||||
return {self.output_key: answer}
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
llm_executor = LLMChain(
|
||||
prompt=self.prompt, llm=self.llm, callback_manager=self.callback_manager
|
||||
)
|
||||
self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose)
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
llm_executor = LLMChain(prompt=self.prompt, llm=self.llm)
|
||||
if run_manager:
|
||||
run_manager.on_text(inputs[self.input_key])
|
||||
llm_output = llm_executor.predict(
|
||||
question=inputs[self.input_key], stop=["```output"]
|
||||
question=inputs[self.input_key],
|
||||
stop=["```output"],
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
)
|
||||
return self._process_llm_result(llm_output)
|
||||
return self._process_llm_result(llm_output, run_manager=run_manager)
|
||||
|
||||
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
llm_executor = LLMChain(
|
||||
prompt=self.prompt, llm=self.llm, callback_manager=self.callback_manager
|
||||
)
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_text(
|
||||
inputs[self.input_key], verbose=self.verbose
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose)
|
||||
async def _acall(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
llm_executor = LLMChain(prompt=self.prompt, llm=self.llm)
|
||||
if run_manager:
|
||||
await run_manager.on_text(inputs[self.input_key])
|
||||
llm_output = await llm_executor.apredict(
|
||||
question=inputs[self.input_key], stop=["```output"]
|
||||
question=inputs[self.input_key],
|
||||
stop=["```output"],
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
)
|
||||
return await self._aprocess_llm_result(llm_output)
|
||||
return await self._aprocess_llm_result(llm_output, run_manager=run_manager)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
|
||||
@@ -8,12 +8,12 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Extra
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT
|
||||
from langchain.chains.pal.math_prompt import MATH_PROMPT
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.utilities import PythonREPL
|
||||
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@ from typing import Callable, List, Tuple
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class BasePromptSelector(BaseModel, ABC):
|
||||
|
||||
@@ -5,11 +5,11 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
|
||||
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
@@ -21,7 +22,6 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import (
|
||||
)
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class BaseQAWithSourcesChain(Chain, ABC):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Load question answering with sources chains."""
|
||||
from typing import Any, Mapping, Optional, Protocol
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
|
||||
@@ -14,7 +15,6 @@ from langchain.chains.qa_with_sources import (
|
||||
)
|
||||
from langchain.chains.question_answering import map_rerank_prompt
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class LoadingCallable(Protocol):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Load question answering chains."""
|
||||
from typing import Any, Mapping, Optional, Protocol
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
@@ -15,7 +16,6 @@ from langchain.chains.question_answering import (
|
||||
stuff_prompt,
|
||||
)
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class LoadingCallable(Protocol):
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Extra, Field, root_validator
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
||||
@@ -14,7 +15,7 @@ from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.question_answering import load_qa_chain
|
||||
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel, BaseRetriever, Document
|
||||
from langchain.schema import BaseRetriever, Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
||||
|
||||
|
||||
@@ -5,11 +5,11 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Extra, Field
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.sql_database import SQLDatabase
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Load summarizing chains."""
|
||||
from typing import Any, Mapping, Optional, Protocol
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.refine import RefineDocumentsChain
|
||||
@@ -8,7 +9,6 @@ from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.summarize import map_reduce_prompt, refine_prompts, stuff_prompt
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class LoadingCallable(Protocol):
|
||||
|
||||
@@ -2,6 +2,10 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Extra
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.llms.anthropic import _AnthropicCommon
|
||||
from langchain.schema import (
|
||||
@@ -85,7 +89,10 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
||||
) # trim off the trailing ' ' that might come from the "Assistant: "
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
prompt = self._convert_messages_to_prompt(messages)
|
||||
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
|
||||
@@ -98,10 +105,10 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
||||
for data in stream_resp:
|
||||
delta = data["completion"][len(completion) :]
|
||||
completion = data["completion"]
|
||||
self.callback_manager.on_llm_new_token(
|
||||
delta,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(
|
||||
delta,
|
||||
)
|
||||
else:
|
||||
response = self.client.completion(**params)
|
||||
completion = response["completion"]
|
||||
@@ -109,7 +116,10 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
|
||||
async def _agenerate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
prompt = self._convert_messages_to_prompt(messages)
|
||||
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
|
||||
@@ -122,15 +132,9 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
||||
async for data in stream_resp:
|
||||
delta = data["completion"][len(completion) :]
|
||||
completion = data["completion"]
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_new_token(
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(
|
||||
delta,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_llm_new_token(
|
||||
delta,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
response = await self.client.acompletion(**params)
|
||||
|
||||
@@ -1,21 +1,30 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from pydantic import Extra, Field, validator
|
||||
from pydantic import Extra, Field, root_validator
|
||||
|
||||
import langchain
|
||||
from langchain.callbacks import get_callback_manager
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManager,
|
||||
CallbackManagerForLLMRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseLanguageModel,
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
ChatResult,
|
||||
HumanMessage,
|
||||
LLMResult,
|
||||
PromptValue,
|
||||
get_buffer_string,
|
||||
)
|
||||
|
||||
|
||||
@@ -26,7 +35,19 @@ def _get_verbosity() -> bool:
|
||||
class BaseChatModel(BaseLanguageModel, ABC):
|
||||
verbose: bool = Field(default_factory=_get_verbosity)
|
||||
"""Whether to print out response text."""
|
||||
callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)
|
||||
callbacks: Callbacks = None
|
||||
callback_manager: Optional[BaseCallbackManager] = None
|
||||
|
||||
@root_validator()
|
||||
def raise_deprecation(cls, values: Dict) -> Dict:
|
||||
"""Raise deprecation warning if callback_manager is used."""
|
||||
if values.get("callback_manager") is not None:
|
||||
warnings.warn(
|
||||
"callback_manager is deprecated. Please use callbacks instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
values["callbacks"] = values.pop("callback_manager", None)
|
||||
return values
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -34,98 +55,130 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@validator("callback_manager", pre=True, always=True)
|
||||
def set_callback_manager(
|
||||
cls, callback_manager: Optional[BaseCallbackManager]
|
||||
) -> BaseCallbackManager:
|
||||
"""If callback manager is None, set it.
|
||||
|
||||
This allows users to pass in None as callback manager, which is a nice UX.
|
||||
"""
|
||||
return callback_manager or get_callback_manager()
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
return {}
|
||||
|
||||
def generate(
|
||||
self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
) -> LLMResult:
|
||||
"""Top Level call"""
|
||||
results = [self._generate(m, stop=stop) for m in messages]
|
||||
|
||||
callback_manager = CallbackManager.configure(
|
||||
callbacks, self.callbacks, self.verbose
|
||||
)
|
||||
message_strings = [get_buffer_string(m) for m in messages]
|
||||
run_manager = callback_manager.on_llm_start(
|
||||
{"name": self.__class__.__name__}, message_strings
|
||||
)
|
||||
|
||||
new_arg_supported = inspect.signature(self._generate).parameters.get(
|
||||
"run_manager"
|
||||
)
|
||||
try:
|
||||
results = [
|
||||
self._generate(m, stop=stop, run_manager=run_manager)
|
||||
if new_arg_supported
|
||||
else self._generate(m, stop=stop)
|
||||
for m in messages
|
||||
]
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
run_manager.on_llm_error(e)
|
||||
raise e
|
||||
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
|
||||
generations = [res.generations for res in results]
|
||||
return LLMResult(generations=generations, llm_output=llm_output)
|
||||
output = LLMResult(generations=generations, llm_output=llm_output)
|
||||
run_manager.on_llm_end(output)
|
||||
return output
|
||||
|
||||
async def agenerate(
|
||||
self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
) -> LLMResult:
|
||||
"""Top Level call"""
|
||||
results = await asyncio.gather(
|
||||
*[self._agenerate(m, stop=stop) for m in messages]
|
||||
|
||||
callback_manager = AsyncCallbackManager.configure(
|
||||
callbacks, self.callbacks, self.verbose
|
||||
)
|
||||
message_strings = [get_buffer_string(m) for m in messages]
|
||||
run_manager = await callback_manager.on_llm_start(
|
||||
{"name": self.__class__.__name__}, message_strings
|
||||
)
|
||||
|
||||
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
|
||||
"run_manager"
|
||||
)
|
||||
try:
|
||||
results = await asyncio.gather(
|
||||
*[
|
||||
self._agenerate(m, stop=stop, run_manager=run_manager)
|
||||
if new_arg_supported
|
||||
else self._agenerate(m, stop=stop)
|
||||
for m in messages
|
||||
]
|
||||
)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
await run_manager.on_llm_error(e)
|
||||
raise e
|
||||
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
|
||||
generations = [res.generations for res in results]
|
||||
return LLMResult(generations=generations, llm_output=llm_output)
|
||||
output = LLMResult(generations=generations, llm_output=llm_output)
|
||||
await run_manager.on_llm_end(output)
|
||||
return output
|
||||
|
||||
def generate_prompt(
|
||||
self, prompts: List[PromptValue], stop: Optional[List[str]] = None
|
||||
self,
|
||||
prompts: List[PromptValue],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
) -> LLMResult:
|
||||
prompt_messages = [p.to_messages() for p in prompts]
|
||||
prompt_strings = [p.to_string() for p in prompts]
|
||||
self.callback_manager.on_llm_start(
|
||||
{"name": self.__class__.__name__}, prompt_strings, verbose=self.verbose
|
||||
)
|
||||
try:
|
||||
output = self.generate(prompt_messages, stop=stop)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||
raise e
|
||||
self.callback_manager.on_llm_end(output, verbose=self.verbose)
|
||||
return output
|
||||
return self.generate(prompt_messages, stop=stop, callbacks=callbacks)
|
||||
|
||||
async def agenerate_prompt(
|
||||
self, prompts: List[PromptValue], stop: Optional[List[str]] = None
|
||||
self,
|
||||
prompts: List[PromptValue],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
) -> LLMResult:
|
||||
prompt_messages = [p.to_messages() for p in prompts]
|
||||
prompt_strings = [p.to_string() for p in prompts]
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_start(
|
||||
{"name": self.__class__.__name__}, prompt_strings, verbose=self.verbose
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_llm_start(
|
||||
{"name": self.__class__.__name__}, prompt_strings, verbose=self.verbose
|
||||
)
|
||||
try:
|
||||
output = await self.agenerate(prompt_messages, stop=stop)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||
else:
|
||||
self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||
raise e
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_end(output, verbose=self.verbose)
|
||||
else:
|
||||
self.callback_manager.on_llm_end(output, verbose=self.verbose)
|
||||
return output
|
||||
return await self.agenerate(prompt_messages, stop=stop, callbacks=callbacks)
|
||||
|
||||
@abstractmethod
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
"""Top Level call"""
|
||||
|
||||
@abstractmethod
|
||||
async def _agenerate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
"""Top Level call"""
|
||||
|
||||
def __call__(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
) -> BaseMessage:
|
||||
return self._generate(messages, stop=stop).generations[0].message
|
||||
generation = self.generate(
|
||||
[messages], stop=stop, callbacks=callbacks
|
||||
).generations[0][0]
|
||||
if isinstance(generation, ChatGeneration):
|
||||
return generation.message
|
||||
else:
|
||||
raise ValueError("Unexpected generation type")
|
||||
|
||||
def call_as_llm(self, message: str, stop: Optional[List[str]] = None) -> str:
|
||||
result = self([HumanMessage(content=message)], stop=stop)
|
||||
@@ -134,15 +187,21 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
|
||||
class SimpleChatModel(BaseChatModel):
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
output_str = self._call(messages, stop=stop)
|
||||
output_str = self._call(messages, stop=stop, run_manager=run_manager)
|
||||
message = AIMessage(content=output_str)
|
||||
generation = ChatGeneration(message=message)
|
||||
return ChatResult(generations=[generation])
|
||||
|
||||
@abstractmethod
|
||||
def _call(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> str:
|
||||
"""Simpler interface."""
|
||||
|
||||
@@ -14,6 +14,10 @@ from tenacity import (
|
||||
wait_exponential,
|
||||
)
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
@@ -242,7 +246,10 @@ class ChatOpenAI(BaseChatModel):
|
||||
return {"token_usage": overall_token_usage, "model_name": self.model_name}
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
if self.streaming:
|
||||
@@ -255,10 +262,8 @@ class ChatOpenAI(BaseChatModel):
|
||||
role = stream_resp["choices"][0]["delta"].get("role", role)
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
inner_completion += token
|
||||
self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(token)
|
||||
message = _convert_dict_to_message(
|
||||
{"content": inner_completion, "role": role}
|
||||
)
|
||||
@@ -287,7 +292,10 @@ class ChatOpenAI(BaseChatModel):
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
async def _agenerate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
if self.streaming:
|
||||
@@ -300,16 +308,8 @@ class ChatOpenAI(BaseChatModel):
|
||||
role = stream_resp["choices"][0]["delta"].get("role", role)
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
inner_completion += token
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(token)
|
||||
message = _convert_dict_to_message(
|
||||
{"content": inner_completion, "role": role}
|
||||
)
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
import datetime
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.schema import BaseMessage, ChatResult
|
||||
|
||||
@@ -33,13 +37,16 @@ class PromptLayerChatOpenAI(ChatOpenAI):
|
||||
return_pl_id: Optional[bool] = False
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
|
||||
from promptlayer.utils import get_api_key, promptlayer_api_request
|
||||
|
||||
request_start_time = datetime.datetime.now().timestamp()
|
||||
generated_responses = super()._generate(messages, stop)
|
||||
generated_responses = super()._generate(messages, stop, run_manager)
|
||||
request_end_time = datetime.datetime.now().timestamp()
|
||||
message_dicts, params = super()._create_message_dicts(messages, stop)
|
||||
for i, generation in enumerate(generated_responses.generations):
|
||||
@@ -67,13 +74,16 @@ class PromptLayerChatOpenAI(ChatOpenAI):
|
||||
return generated_responses
|
||||
|
||||
async def _agenerate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
) -> ChatResult:
|
||||
"""Call ChatOpenAI agenerate and then call PromptLayer to log."""
|
||||
from promptlayer.utils import get_api_key, promptlayer_api_request_async
|
||||
|
||||
request_start_time = datetime.datetime.now().timestamp()
|
||||
generated_responses = await super()._agenerate(messages, stop)
|
||||
generated_responses = await super()._agenerate(messages, stop, run_manager)
|
||||
request_end_time = datetime.datetime.now().timestamp()
|
||||
message_dicts, params = super()._create_message_dicts(messages, stop)
|
||||
for i, generation in enumerate(generated_responses.generations):
|
||||
|
||||
@@ -12,7 +12,6 @@ from langchain.document_loaders.azure_blob_storage_file import (
|
||||
from langchain.document_loaders.bigquery import BigQueryLoader
|
||||
from langchain.document_loaders.bilibili import BiliBiliLoader
|
||||
from langchain.document_loaders.blackboard import BlackboardLoader
|
||||
from langchain.document_loaders.chatgpt import ChatGPTLoader
|
||||
from langchain.document_loaders.college_confidential import CollegeConfidentialLoader
|
||||
from langchain.document_loaders.confluence import ConfluenceLoader
|
||||
from langchain.document_loaders.conllu import CoNLLULoader
|
||||
@@ -56,7 +55,6 @@ from langchain.document_loaders.pdf import (
|
||||
UnstructuredPDFLoader,
|
||||
)
|
||||
from langchain.document_loaders.powerpoint import UnstructuredPowerPointLoader
|
||||
from langchain.document_loaders.python import PythonLoader
|
||||
from langchain.document_loaders.readthedocs import ReadTheDocsLoader
|
||||
from langchain.document_loaders.roam import RoamLoader
|
||||
from langchain.document_loaders.rtf import UnstructuredRTFLoader
|
||||
@@ -158,6 +156,4 @@ __all__ = [
|
||||
"ImageCaptionLoader",
|
||||
"DiscordChatLoader",
|
||||
"ConfluenceLoader",
|
||||
"PythonLoader",
|
||||
"ChatGPTLoader",
|
||||
]
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
"""Load conversations from ChatGPT data export"""
|
||||
import datetime
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
|
||||
def concatenate_rows(message: dict, title: str) -> str:
|
||||
if not message:
|
||||
return ""
|
||||
|
||||
sender = message["author"]["role"] if message["author"] else "unknown"
|
||||
text = message["content"]["parts"][0]
|
||||
date = datetime.datetime.fromtimestamp(message["create_time"]).strftime(
|
||||
"%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
return f"{title} - {sender} on {date}: {text}\n\n"
|
||||
|
||||
|
||||
class ChatGPTLoader(BaseLoader):
|
||||
"""Loader that loads conversations from exported ChatGPT data."""
|
||||
|
||||
def __init__(self, log_file: str, num_logs: int = -1):
|
||||
self.log_file = log_file
|
||||
self.num_logs = num_logs
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
with open(self.log_file, encoding="utf8") as f:
|
||||
data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f)
|
||||
|
||||
documents = []
|
||||
for d in data:
|
||||
title = d["title"]
|
||||
messages = d["mapping"]
|
||||
text = "".join(
|
||||
[
|
||||
concatenate_rows(messages[key]["message"], title)
|
||||
for idx, key in enumerate(messages)
|
||||
if not (
|
||||
idx == 0
|
||||
and messages[key]["message"]["author"]["role"] == "system"
|
||||
)
|
||||
]
|
||||
)
|
||||
metadata = {"source": str(self.log_file)}
|
||||
documents.append(Document(page_content=text, metadata=metadata))
|
||||
|
||||
return documents
|
||||
@@ -1,14 +0,0 @@
|
||||
import tokenize
|
||||
|
||||
from langchain.document_loaders.text import TextLoader
|
||||
|
||||
|
||||
class PythonLoader(TextLoader):
|
||||
"""
|
||||
Load Python files, respecting any non-default encoding if specified.
|
||||
"""
|
||||
|
||||
def __init__(self, file_path: str):
|
||||
with open(file_path, "rb") as f:
|
||||
encoding, _ = tokenize.detect_encoding(f.readline)
|
||||
super().__init__(file_path=file_path, encoding=encoding)
|
||||
@@ -61,13 +61,6 @@ class SitemapLoader(WebBaseLoader):
|
||||
}
|
||||
)
|
||||
|
||||
for sitemap in soup.find_all("sitemap"):
|
||||
loc = sitemap.find("loc")
|
||||
if not loc:
|
||||
continue
|
||||
soup_child = self.scrape_all([loc.text], "xml")[0]
|
||||
|
||||
els.extend(self.parse_sitemap(soup_child))
|
||||
return els
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
|
||||
@@ -114,11 +114,7 @@ class YoutubeLoader(BaseLoader):
|
||||
def load(self) -> List[Document]:
|
||||
"""Load documents."""
|
||||
try:
|
||||
from youtube_transcript_api import (
|
||||
NoTranscriptFound,
|
||||
TranscriptsDisabled,
|
||||
YouTubeTranscriptApi,
|
||||
)
|
||||
from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import youtube_transcript_api python package. "
|
||||
@@ -133,11 +129,7 @@ class YoutubeLoader(BaseLoader):
|
||||
video_info = self._get_video_info()
|
||||
metadata.update(video_info)
|
||||
|
||||
try:
|
||||
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
|
||||
except TranscriptsDisabled:
|
||||
return []
|
||||
|
||||
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
|
||||
try:
|
||||
transcript = transcript_list.find_transcript([self.language])
|
||||
except NoTranscriptFound:
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
"""Transform documents"""
|
||||
from typing import Any, Callable, List, Sequence
|
||||
|
||||
import numpy as np
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.math_utils import cosine_similarity
|
||||
from langchain.schema import BaseDocumentTransformer, Document
|
||||
|
||||
|
||||
class _DocumentWithState(Document):
|
||||
"""Wrapper for a document that includes arbitrary state."""
|
||||
|
||||
state: dict = Field(default_factory=dict)
|
||||
"""State associated with the document."""
|
||||
|
||||
def to_document(self) -> Document:
|
||||
"""Convert the DocumentWithState to a Document."""
|
||||
return Document(page_content=self.page_content, metadata=self.metadata)
|
||||
|
||||
@classmethod
|
||||
def from_document(cls, doc: Document) -> "_DocumentWithState":
|
||||
"""Create a DocumentWithState from a Document."""
|
||||
if isinstance(doc, cls):
|
||||
return doc
|
||||
return cls(page_content=doc.page_content, metadata=doc.metadata)
|
||||
|
||||
|
||||
def get_stateful_documents(
|
||||
documents: Sequence[Document],
|
||||
) -> Sequence[_DocumentWithState]:
|
||||
return [_DocumentWithState.from_document(doc) for doc in documents]
|
||||
|
||||
|
||||
def _filter_similar_embeddings(
|
||||
embedded_documents: List[List[float]], similarity_fn: Callable, threshold: float
|
||||
) -> List[int]:
|
||||
"""Filter redundant documents based on the similarity of their embeddings."""
|
||||
similarity = np.tril(similarity_fn(embedded_documents, embedded_documents), k=-1)
|
||||
redundant = np.where(similarity > threshold)
|
||||
redundant_stacked = np.column_stack(redundant)
|
||||
redundant_sorted = np.argsort(similarity[redundant])[::-1]
|
||||
included_idxs = set(range(len(embedded_documents)))
|
||||
for first_idx, second_idx in redundant_stacked[redundant_sorted]:
|
||||
if first_idx in included_idxs and second_idx in included_idxs:
|
||||
# Default to dropping the second document of any highly similar pair.
|
||||
included_idxs.remove(second_idx)
|
||||
return list(sorted(included_idxs))
|
||||
|
||||
|
||||
def _get_embeddings_from_stateful_docs(
|
||||
embeddings: Embeddings, documents: Sequence[_DocumentWithState]
|
||||
) -> List[List[float]]:
|
||||
if len(documents) and "embedded_doc" in documents[0].state:
|
||||
embedded_documents = [doc.state["embedded_doc"] for doc in documents]
|
||||
else:
|
||||
embedded_documents = embeddings.embed_documents(
|
||||
[d.page_content for d in documents]
|
||||
)
|
||||
for doc, embedding in zip(documents, embedded_documents):
|
||||
doc.state["embedded_doc"] = embedding
|
||||
return embedded_documents
|
||||
|
||||
|
||||
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
|
||||
"""Filter that drops redundant documents by comparing their embeddings."""
|
||||
|
||||
embeddings: Embeddings
|
||||
"""Embeddings to use for embedding document contents."""
|
||||
similarity_fn: Callable = cosine_similarity
|
||||
"""Similarity function for comparing documents. Function expected to take as input
|
||||
two matrices (List[List[float]]) and return a matrix of scores where higher values
|
||||
indicate greater similarity."""
|
||||
similarity_threshold: float = 0.95
|
||||
"""Threshold for determining when two documents are similar enough
|
||||
to be considered redundant."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
"""Filter down documents."""
|
||||
stateful_documents = get_stateful_documents(documents)
|
||||
embedded_documents = _get_embeddings_from_stateful_docs(
|
||||
self.embeddings, stateful_documents
|
||||
)
|
||||
included_idxs = _filter_similar_embeddings(
|
||||
embedded_documents, self.similarity_fn, self.similarity_threshold
|
||||
)
|
||||
return [stateful_documents[i] for i in sorted(included_idxs)]
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Wrapper around HuggingFace embedding models."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Extra, Field
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.embeddings.base import Embeddings
|
||||
|
||||
@@ -22,10 +22,8 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
|
||||
model_name = "sentence-transformers/all-mpnet-base-v2"
|
||||
model_kwargs = {'device': 'cpu'}
|
||||
hf = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
|
||||
hf = HuggingFaceEmbeddings(model_name=model_name)
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
@@ -34,8 +32,6 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
||||
cache_folder: Optional[str] = None
|
||||
"""Path to store models.
|
||||
Can be also set by SENTENCE_TRANSFORMERS_HOME enviroment variable."""
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Key word arguments to pass to the model."""
|
||||
|
||||
def __init__(self, **kwargs: Any):
|
||||
"""Initialize the sentence_transformer."""
|
||||
@@ -43,15 +39,14 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
||||
try:
|
||||
import sentence_transformers
|
||||
|
||||
except ImportError as exc:
|
||||
self.client = sentence_transformers.SentenceTransformer(
|
||||
self.model_name, self.cache_folder
|
||||
)
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import sentence_transformers python package. "
|
||||
"Please install it with `pip install sentence_transformers`."
|
||||
) from exc
|
||||
|
||||
self.client = sentence_transformers.SentenceTransformer(
|
||||
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
|
||||
)
|
||||
)
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -95,22 +90,13 @@ class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.embeddings import HuggingFaceInstructEmbeddings
|
||||
|
||||
model_name = "hkunlp/instructor-large"
|
||||
model_kwargs = {'device': 'cpu'}
|
||||
hf = HuggingFaceInstructEmbeddings(
|
||||
model_name=model_name, model_kwargs=model_kwargs
|
||||
)
|
||||
hf = HuggingFaceInstructEmbeddings(model_name=model_name)
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model_name: str = DEFAULT_INSTRUCT_MODEL
|
||||
"""Model name to use."""
|
||||
cache_folder: Optional[str] = None
|
||||
"""Path to store models.
|
||||
Can be also set by SENTENCE_TRANSFORMERS_HOME enviroment variable."""
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Key word arguments to pass to the model."""
|
||||
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
|
||||
"""Instruction to use for embedding documents."""
|
||||
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
|
||||
@@ -122,9 +108,7 @@ class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
|
||||
try:
|
||||
from InstructorEmbedding import INSTRUCTOR
|
||||
|
||||
self.client = INSTRUCTOR(
|
||||
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
|
||||
)
|
||||
self.client = INSTRUCTOR(self.model_name)
|
||||
except ImportError as e:
|
||||
raise ValueError("Dependencies for InstructorEmbedding not found.") from e
|
||||
|
||||
|
||||
@@ -7,10 +7,6 @@ from langchain.embeddings.base import Embeddings
|
||||
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
|
||||
|
||||
|
||||
class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]):
|
||||
"""Content handler for LLM class."""
|
||||
|
||||
|
||||
class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
|
||||
"""Wrapper around custom Sagemaker Inference Endpoints.
|
||||
|
||||
@@ -66,7 +62,7 @@ class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
|
||||
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
|
||||
"""
|
||||
|
||||
content_handler: EmbeddingsContentHandler
|
||||
content_handler: ContentHandlerBase
|
||||
"""The content handler class that provides an input and
|
||||
output transform functions to handle formats between LLM
|
||||
and the endpoint.
|
||||
@@ -75,21 +71,21 @@ class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
|
||||
"""
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
|
||||
|
||||
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
|
||||
|
||||
class ContentHandler(EmbeddingsContentHandler):
|
||||
class ContentHandler(ContentHandlerBase):
|
||||
content_type = "application/json"
|
||||
accepts = "application/json"
|
||||
|
||||
def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes:
|
||||
input_str = json.dumps({prompts: prompts, **model_kwargs})
|
||||
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
|
||||
input_str = json.dumps({prompt: prompt, **model_kwargs})
|
||||
return input_str.encode('utf-8')
|
||||
|
||||
def transform_output(self, output: bytes) -> List[List[float]]:
|
||||
|
||||
def transform_output(self, output: bytes) -> str:
|
||||
response_json = json.loads(output.read().decode("utf-8"))
|
||||
return response_json["vectors"]
|
||||
""" # noqa: E501
|
||||
return response_json[0]["generated_text"]
|
||||
"""
|
||||
|
||||
model_kwargs: Optional[Dict] = None
|
||||
"""Key word arguments to pass to the model."""
|
||||
@@ -139,7 +135,7 @@ class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
|
||||
)
|
||||
return values
|
||||
|
||||
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
|
||||
def _embedding_func(self, texts: List[str]) -> List[float]:
|
||||
"""Call out to SageMaker Inference embedding endpoint."""
|
||||
# replace newlines, which can negatively affect performance.
|
||||
texts = list(map(lambda x: x.replace("\n", " "), texts))
|
||||
@@ -183,7 +179,7 @@ class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
|
||||
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
|
||||
for i in range(0, len(texts), _chunk_size):
|
||||
response = self._embedding_func(texts[i : i + _chunk_size])
|
||||
results.extend(response)
|
||||
results.append(response)
|
||||
return results
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
@@ -195,4 +191,4 @@ class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
|
||||
Returns:
|
||||
Embeddings for the text.
|
||||
"""
|
||||
return self._embedding_func([text])[0]
|
||||
return self._embedding_func([text])
|
||||
|
||||
@@ -44,8 +44,7 @@ class AutoGPTOutputParser(BaseAutoGPTOutputParser):
|
||||
name=parsed["command"]["name"],
|
||||
args=parsed["command"]["args"],
|
||||
)
|
||||
except (KeyError, TypeError):
|
||||
# If the command is null or incomplete, return an erroneous tool
|
||||
except KeyError:
|
||||
return AutoGPTAction(
|
||||
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
|
||||
TaskCreationChain,
|
||||
@@ -13,7 +14,6 @@ from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
|
||||
TaskPrioritizationChain,
|
||||
)
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from langchain import LLMChain, PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
|
||||
|
||||
class TaskCreationChain(LLMChain):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from langchain import LLMChain, PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
|
||||
|
||||
class TaskExecutionChain(LLMChain):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from langchain import LLMChain, PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
|
||||
|
||||
class TaskPrioritizationChain(LLMChain):
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Any, Dict, List, Optional
|
||||
import requests
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
@@ -106,7 +107,12 @@ class AI21(LLM):
|
||||
"""Return type of llm."""
|
||||
return "ai21"
|
||||
|
||||
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
def _call(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> str:
|
||||
"""Call out to AI21's complete endpoint.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Any, Dict, List, Optional, Sequence
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.llms.utils import enforce_stop_tokens
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
@@ -200,7 +201,12 @@ class AlephAlpha(LLM):
|
||||
"""Return type of llm."""
|
||||
return "alpeh_alpha"
|
||||
|
||||
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
def _call(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> str:
|
||||
"""Call out to Aleph Alpha's completion endpoint.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -4,6 +4,10 @@ from typing import Any, Callable, Dict, Generator, List, Mapping, Optional
|
||||
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
@@ -142,7 +146,12 @@ class Anthropic(LLM, _AnthropicCommon):
|
||||
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
|
||||
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
|
||||
|
||||
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
def _call(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> str:
|
||||
r"""Call out to Anthropic's completion endpoint.
|
||||
|
||||
Args:
|
||||
@@ -171,9 +180,8 @@ class Anthropic(LLM, _AnthropicCommon):
|
||||
for data in stream_resp:
|
||||
delta = data["completion"][len(current_completion) :]
|
||||
current_completion = data["completion"]
|
||||
self.callback_manager.on_llm_new_token(
|
||||
delta, verbose=self.verbose, **data
|
||||
)
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(delta, **data)
|
||||
return current_completion
|
||||
response = self.client.completion(
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
@@ -182,7 +190,12 @@ class Anthropic(LLM, _AnthropicCommon):
|
||||
)
|
||||
return response["completion"]
|
||||
|
||||
async def _acall(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
async def _acall(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
) -> str:
|
||||
"""Call out to Anthropic's completion endpoint asynchronously."""
|
||||
stop = self._get_anthropic_stop(stop)
|
||||
if self.streaming:
|
||||
@@ -195,14 +208,8 @@ class Anthropic(LLM, _AnthropicCommon):
|
||||
async for data in stream_resp:
|
||||
delta = data["completion"][len(current_completion) :]
|
||||
current_completion = data["completion"]
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_new_token(
|
||||
delta, verbose=self.verbose, **data
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_llm_new_token(
|
||||
delta, verbose=self.verbose, **data
|
||||
)
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(delta, **data)
|
||||
return current_completion
|
||||
response = await self.client.acompletion(
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
from pydantic import Extra, Field, root_validator
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.llms.utils import enforce_stop_tokens
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
@@ -80,7 +81,12 @@ class Banana(LLM):
|
||||
"""Return type of llm."""
|
||||
return "banana"
|
||||
|
||||
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
def _call(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
) -> str:
|
||||
"""Call to Banana endpoint."""
|
||||
try:
|
||||
import banana_dev as banana
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user