mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-07 09:40:07 +00:00
Compare commits
113 Commits
v0.0.154
...
vwp/align_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a77540fbe | ||
|
|
5763d26b9e | ||
|
|
54076f21b2 | ||
|
|
f3d727147a | ||
|
|
c825bd45d8 | ||
|
|
3b10dabe4d | ||
|
|
21f0719c9e | ||
|
|
0094879504 | ||
|
|
396a4b0458 | ||
|
|
b76f8cd252 | ||
|
|
2b7d51706e | ||
|
|
0e7e1e66f9 | ||
|
|
af302d99f0 | ||
|
|
1c2e2e93c8 | ||
|
|
0e06e6e34a | ||
|
|
37b819cfa5 | ||
|
|
ec00fc71a8 | ||
|
|
ceec14f1bf | ||
|
|
4aa03b3e01 | ||
|
|
7e6097964e | ||
|
|
5104f9b08c | ||
|
|
871c295b4c | ||
|
|
07627b57ec | ||
|
|
6f514361be | ||
|
|
b7f4a410a3 | ||
|
|
eb12242495 | ||
|
|
aa1c3df5cf | ||
|
|
f7af565510 | ||
|
|
3ec77607dc | ||
|
|
597e87abac | ||
|
|
dfdb8279a6 | ||
|
|
1999294349 | ||
|
|
6732ef9d35 | ||
|
|
2ba18a0096 | ||
|
|
48997b35c9 | ||
|
|
9d7cfbcfcc | ||
|
|
8fb767b8c6 | ||
|
|
69db22be32 | ||
|
|
5f0248f0fb | ||
|
|
580f1b2a48 | ||
|
|
be794e0360 | ||
|
|
659e94fc9c | ||
|
|
74a95629a3 | ||
|
|
1781d611f8 | ||
|
|
ca0dfd38f8 | ||
|
|
4630916e8c | ||
|
|
57a6982007 | ||
|
|
cdbc4cda37 | ||
|
|
4f501e59ec | ||
|
|
c850a4d406 | ||
|
|
aa9cf24a54 | ||
|
|
ffac033150 | ||
|
|
8fc1c43e5d | ||
|
|
1deacb4f0a | ||
|
|
621ab11734 | ||
|
|
4bb95ad529 | ||
|
|
8f5996a31c | ||
|
|
68c19e1452 | ||
|
|
df0e1f85da | ||
|
|
99f74ff7d9 | ||
|
|
fe5db65628 | ||
|
|
59a4a8b34b | ||
|
|
73aedeed07 | ||
|
|
e7d27d52f6 | ||
|
|
1c73dc6408 | ||
|
|
b9d0e88584 | ||
|
|
7482cc218c | ||
|
|
cc247960a4 | ||
|
|
2e2be677c9 | ||
|
|
dca5772ed9 | ||
|
|
5adfda8507 | ||
|
|
704e0b98d8 | ||
|
|
cc6902f817 | ||
|
|
2f1ab146d5 | ||
|
|
9bcb2af86a | ||
|
|
cdc9c6a2fd | ||
|
|
d0fa3cf798 | ||
|
|
d80017f51f | ||
|
|
bf0bbc8f2c | ||
|
|
27f1463f4a | ||
|
|
f7b05e7348 | ||
|
|
bf795bffdb | ||
|
|
906488f87e | ||
|
|
7a01742895 | ||
|
|
cef046ae18 | ||
|
|
5e53336c7d | ||
|
|
95ae3c5f4b | ||
|
|
fa9c5ac78d | ||
|
|
d5ef266842 | ||
|
|
3fdfa5d576 | ||
|
|
e41a70eb59 | ||
|
|
71db9c97c6 | ||
|
|
042415eee4 | ||
|
|
37cc3d2e63 | ||
|
|
828c96072c | ||
|
|
edbd3c7964 | ||
|
|
f553d28a11 | ||
|
|
e8e8ca163b | ||
|
|
c9d5525485 | ||
|
|
8f4f90cdae | ||
|
|
612f928323 | ||
|
|
7c211d2438 | ||
|
|
6a0abccf4d | ||
|
|
beb0f6fd60 | ||
|
|
219b618a5b | ||
|
|
fcd174cf43 | ||
|
|
74f46262d0 | ||
|
|
058273174a | ||
|
|
1a4c4a24f2 | ||
|
|
f6c98a7c1e | ||
|
|
e0cb4c3005 | ||
|
|
97cabb40ae | ||
|
|
37b68dc8f2 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -144,8 +144,4 @@ wandb/
|
||||
/.ruff_cache/
|
||||
|
||||
*.pkl
|
||||
*.bin
|
||||
|
||||
# integration test artifacts
|
||||
data_map*
|
||||
\[('_type', 'fake'), ('stop', None)]
|
||||
*.bin
|
||||
@@ -4,8 +4,6 @@
|
||||
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/lint.yml) [](https://github.com/hwchase17/langchain/actions/workflows/test.yml) [](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml) [](https://pepy.tech/project/langchain) [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai) [](https://discord.gg/6adMQxSpJS)
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/hwchase17/langchainjs).
|
||||
|
||||
**Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.
|
||||
Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set up a dedicated support Slack channel.
|
||||
|
||||
|
||||
BIN
docs/_static/MetalDash.png
vendored
BIN
docs/_static/MetalDash.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 3.5 MiB |
@@ -1,10 +1,14 @@
|
||||
# Deployments
|
||||
|
||||
So, you've created a really cool chain - now what? How do you deploy it and make it easily shareable with the world?
|
||||
So you've made a really cool chain - now what? How do you deploy it and make it easily sharable with the world?
|
||||
|
||||
This section covers several options for that. Note that these options are meant for quick deployment of prototypes and demos, not for production systems. If you need help with the deployment of a production system, please contact us directly.
|
||||
This section covers several options for that.
|
||||
Note that these are meant as quick deployment options for prototypes and demos, and not for production systems.
|
||||
If you are looking for help with deployment of a production system, please contact us directly.
|
||||
|
||||
What follows is a list of template GitHub repositories designed to be easily forked and modified to use your chain. This list is far from exhaustive, and we are EXTREMELY open to contributions here.
|
||||
What follows is a list of template GitHub repositories aimed that are intended to be
|
||||
very easy to fork and modify to use your chain.
|
||||
This is far from an exhaustive list of options, and we are EXTREMELY open to contributions here.
|
||||
|
||||
## [Streamlit](https://github.com/hwchase17/langchain-streamlit-template)
|
||||
|
||||
@@ -43,11 +47,12 @@ A minimal example on how to deploy LangChain to Google Cloud Run.
|
||||
|
||||
## [SteamShip](https://github.com/steamship-core/steamship-langchain/)
|
||||
|
||||
This repository contains LangChain adapters for Steamship, enabling LangChain developers to rapidly deploy their apps on Steamship. This includes: production-ready endpoints, horizontal scaling across dependencies, persistent storage of app state, multi-tenancy support, etc.
|
||||
This repository contains LangChain adapters for Steamship, enabling LangChain developers to rapidly deploy their apps on Steamship.
|
||||
This includes: production ready endpoints, horizontal scaling across dependencies, persistant storage of app state, multi-tenancy support, etc.
|
||||
|
||||
## [Langchain-serve](https://github.com/jina-ai/langchain-serve)
|
||||
|
||||
This repository allows users to serve local chains and agents as RESTful, gRPC, or WebSocket APIs, thanks to [Jina](https://docs.jina.ai/). Deploy your chains & agents with ease and enjoy independent scaling, serverless and autoscaling APIs, as well as a Streamlit playground on Jina AI Cloud.
|
||||
This repository allows users to serve local chains and agents as RESTful, gRPC, or Websocket APIs thanks to [Jina](https://docs.jina.ai/). Deploy your chains & agents with ease and enjoy independent scaling, serverless and autoscaling APIs, as well as a Streamlit playground on Jina AI Cloud.
|
||||
|
||||
## [BentoML](https://github.com/ssheng/BentoChain)
|
||||
|
||||
@@ -55,4 +60,4 @@ This repository provides an example of how to deploy a LangChain application wit
|
||||
|
||||
## [Databutton](https://databutton.com/home?new-data-app=true)
|
||||
|
||||
These templates serve as examples of how to build, deploy, and share LangChain applications using Databutton. You can create user interfaces with Streamlit, automate tasks by scheduling Python code, and store files and data in the built-in store. Examples include a Chatbot interface with conversational memory, a Personal search engine, and a starter template for LangChain apps. Deploying and sharing is just one click away.
|
||||
These templates serve as examples of how to build, deploy, and share LangChain applications using Databutton. You can create user interfaces with Streamlit, automate tasks by scheduling Python code, and store files and data in the built-in store. Examples include Chatbot interface with conversational memory, Personal search engine, and a starter template for LangChain apps. Deploying and sharing is one click.
|
||||
|
||||
@@ -61,6 +61,7 @@
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
@@ -108,8 +109,8 @@
|
||||
" experiment_name=\"scenario 1: OpenAI LLM\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"callbacks = [StdOutCallbackHandler(), aim_callback]\n",
|
||||
"llm = OpenAI(temperature=0, callbacks=callbacks)"
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), aim_callback])\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -176,7 +177,7 @@
|
||||
"Title: {title}\n",
|
||||
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [\n",
|
||||
" {\"title\": \"documentary about good video games that push the boundary of game design\"},\n",
|
||||
@@ -248,12 +249,13 @@
|
||||
],
|
||||
"source": [
|
||||
"# scenario 3 - Agent with Tools\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" callbacks=callbacks,\n",
|
||||
" callback_manager=manager,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
|
||||
@@ -79,6 +79,7 @@
|
||||
"source": [
|
||||
"from datetime import datetime\n",
|
||||
"from langchain.callbacks import ClearMLCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"# Setup and use the ClearML Callback\n",
|
||||
@@ -92,9 +93,9 @@
|
||||
" complexity_metrics=True,\n",
|
||||
" stream_logs=True\n",
|
||||
")\n",
|
||||
"callbacks = [StdOutCallbackHandler(), clearml_callback]\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), clearml_callback])\n",
|
||||
"# Get the OpenAI model ready to go\n",
|
||||
"llm = OpenAI(temperature=0, callbacks=callbacks)"
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -522,12 +523,13 @@
|
||||
"from langchain.agents import AgentType\n",
|
||||
"\n",
|
||||
"# SCENARIO 2 - Agent with Tools\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" callbacks=callbacks,\n",
|
||||
" callback_manager=manager,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
" \"Who is the wife of the person who sang summer of 69?\"\n",
|
||||
|
||||
@@ -121,6 +121,7 @@
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
@@ -130,8 +131,8 @@
|
||||
" tags=[\"llm\"],\n",
|
||||
" visualizations=[\"dep\"],\n",
|
||||
")\n",
|
||||
"callbacks = [StdOutCallbackHandler(), comet_callback]\n",
|
||||
"llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True)\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_result = llm.generate([\"Tell me a joke\", \"Tell me a poem\", \"Tell me a fact\"] * 3)\n",
|
||||
"print(\"LLM result\", llm_result)\n",
|
||||
@@ -152,6 +153,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
@@ -162,14 +164,15 @@
|
||||
" stream_logs=True,\n",
|
||||
" tags=[\"synopsis-chain\"],\n",
|
||||
")\n",
|
||||
"callbacks = [StdOutCallbackHandler(), comet_callback]\n",
|
||||
"llm = OpenAI(temperature=0.9, callbacks=callbacks)\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
|
||||
"Title: {title}\n",
|
||||
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [{\"title\": \"Documentary about Bigfoot in Paris\"}]\n",
|
||||
"print(synopsis_chain.apply(test_prompts))\n",
|
||||
@@ -191,6 +194,7 @@
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
@@ -199,15 +203,15 @@
|
||||
" stream_logs=True,\n",
|
||||
" tags=[\"agent\"],\n",
|
||||
")\n",
|
||||
"callbacks = [StdOutCallbackHandler(), comet_callback]\n",
|
||||
"llm = OpenAI(temperature=0.9, callbacks=callbacks)\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=\"zero-shot-react-description\",\n",
|
||||
" callbacks=callbacks,\n",
|
||||
" callback_manager=manager,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
@@ -251,6 +255,7 @@
|
||||
"from rouge_score import rouge_scorer\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
@@ -293,10 +298,10 @@
|
||||
" tags=[\"custom_metrics\"],\n",
|
||||
" custom_metrics=rouge_score.compute_metric,\n",
|
||||
")\n",
|
||||
"callbacks = [StdOutCallbackHandler(), comet_callback]\n",
|
||||
"llm = OpenAI(temperature=0.9)\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [\n",
|
||||
" {\n",
|
||||
@@ -318,7 +323,7 @@
|
||||
" \"\"\"\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"print(synopsis_chain.apply(test_prompts, callbacks=callbacks))\n",
|
||||
"print(synopsis_chain.apply(test_prompts))\n",
|
||||
"comet_callback.flush_tracker(synopsis_chain, finish=True)"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
This page covers how to use the `GPT4All` wrapper within LangChain. The tutorial is divided into two parts: installation and setup, followed by usage with an example.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install the Python package with `pip install pyllamacpp`
|
||||
- Download a [GPT4All model](https://github.com/nomic-ai/pyllamacpp#supported-model) and place it in your desired directory
|
||||
|
||||
@@ -29,16 +28,16 @@ To stream the model's predictions, add in a CallbackManager.
|
||||
|
||||
```python
|
||||
from langchain.llms import GPT4All
|
||||
from langchain.callbacks.base import CallbackManager
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
|
||||
# There are many CallbackHandlers supported, such as
|
||||
# from langchain.callbacks.streamlit import StreamlitCallbackHandler
|
||||
|
||||
callbacks = [StreamingStdOutCallbackHandler()]
|
||||
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8)
|
||||
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
||||
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8, callback_handler=callback_handler, verbose=True)
|
||||
|
||||
# Generate text. Tokens are streamed through the callback manager.
|
||||
model("Once upon a time, ", callbacks=callbacks)
|
||||
model("Once upon a time, ")
|
||||
```
|
||||
|
||||
## Model File
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# LanceDB
|
||||
|
||||
This page covers how to use [LanceDB](https://github.com/lancedb/lancedb) within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific LanceDB wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install the Python SDK with `pip install lancedb`
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around LanceDB databases, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import LanceDB
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the LanceDB wrapper, see [this notebook](../modules/indexes/vectorstores/examples/lancedb.ipynb)
|
||||
@@ -1,26 +0,0 @@
|
||||
# Metal
|
||||
|
||||
This page covers how to use [Metal](https://getmetal.io) within LangChain.
|
||||
|
||||
## What is Metal?
|
||||
|
||||
Metal is a managed retrieval & memory platform built for production. Easily index your data into `Metal` and run semantic search and retrieval on it.
|
||||
|
||||

|
||||
|
||||
## Quick start
|
||||
|
||||
Get started by [creating a Metal account](https://app.getmetal.io/signup).
|
||||
|
||||
Then, you can easily take advantage of the `MetalRetriever` class to start retrieving your data for semantic search, prompting context, etc. This class takes a `Metal` instance and a dictionary of parameters to pass to the Metal API.
|
||||
|
||||
```python
|
||||
from langchain.retrievers import MetalRetriever
|
||||
from metal_sdk.metal import Metal
|
||||
|
||||
|
||||
metal = Metal("API_KEY", "CLIENT_ID", "INDEX_ID");
|
||||
retriever = MetalRetriever(metal, params={"limit": 2})
|
||||
|
||||
docs = retriever.get_relevant_documents("search term")
|
||||
```
|
||||
@@ -1,19 +0,0 @@
|
||||
# PipelineAI
|
||||
|
||||
This page covers how to use the PipelineAI ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific PipelineAI wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install with `pip install pipeline-ai`
|
||||
- Get a Pipeline Cloud api key and set it as an environment variable (`PIPELINE_API_KEY`)
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists a PipelineAI LLM wrapper, which you can access with
|
||||
|
||||
```python
|
||||
from langchain.llms import PipelineAI
|
||||
```
|
||||
@@ -1,79 +0,0 @@
|
||||
# Redis
|
||||
|
||||
This page covers how to use the [Redis](https://redis.com) ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Redis wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Redis Python SDK with `pip install redis`
|
||||
|
||||
## Wrappers
|
||||
|
||||
### Cache
|
||||
|
||||
The Cache wrapper allows for [Redis](https://redis.io) to be used as a remote, low-latency, in-memory cache for LLM prompts and responses.
|
||||
|
||||
#### Standard Cache
|
||||
The standard cache is the Redis bread & butter of use case in production for both [open source](https://redis.io) and [enterprise](https://redis.com) users globally.
|
||||
|
||||
To import this cache:
|
||||
```python
|
||||
from langchain.cache import RedisCache
|
||||
```
|
||||
|
||||
To use this cache with your LLMs:
|
||||
```python
|
||||
import langchain
|
||||
import redis
|
||||
|
||||
redis_client = redis.Redis.from_url(...)
|
||||
langchain.llm_cache = RedisCache(redis_client)
|
||||
```
|
||||
|
||||
#### Semantic Cache
|
||||
Semantic caching allows users to retrieve cached prompts based on semantic similarity between the user input and previously cached results. Under the hood it blends Redis as both a cache and a vectorstore.
|
||||
|
||||
To import this cache:
|
||||
```python
|
||||
from langchain.cache import RedisSemanticCache
|
||||
```
|
||||
|
||||
To use this cache with your LLMs:
|
||||
```python
|
||||
import langchain
|
||||
import redis
|
||||
|
||||
# use any embedding provider...
|
||||
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
||||
|
||||
redis_url = "redis://localhost:6379"
|
||||
|
||||
langchain.llm_cache = RedisSemanticCache(
|
||||
embedding=FakeEmbeddings(),
|
||||
redis_url=redis_url
|
||||
)
|
||||
```
|
||||
|
||||
### VectorStore
|
||||
|
||||
The vectorstore wrapper turns Redis into a low-latency [vector database](https://redis.com/solutions/use-cases/vector-database/) for semantic search or LLM content retrieval.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Redis
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Redis vectorstore wrapper, see [this notebook](../modules/indexes/vectorstores/examples/redis.ipynb).
|
||||
|
||||
### Retriever
|
||||
|
||||
The Redis vector store retriever wrapper generalizes the vectorstore class to perform low-latency document retrieval. To create the retriever, simply call `.as_retriever()` on the base vectorstore class.
|
||||
|
||||
### Memory
|
||||
Redis can be used to persist LLM conversations.
|
||||
|
||||
#### Vector Store Retriever Memory
|
||||
|
||||
For a more detailed walkthrough of the `VectorStoreRetrieverMemory` wrapper, see [this notebook](../modules/memory/types/vectorstore_retriever_memory.ipynb).
|
||||
|
||||
#### Chat Message History Memory
|
||||
For a detailed example of Redis to cache conversation message history, see [this notebook](../modules/memory/examples/redis_chat_message_history.ipynb).
|
||||
@@ -1,22 +0,0 @@
|
||||
# Tair
|
||||
|
||||
This page covers how to use the Tair ecosystem within LangChain.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Install Tair Python SDK with `pip install tair`.
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around TairVector, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Tair
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Tair wrapper, see [this notebook](../modules/indexes/vectorstores/examples/tair.ipynb)
|
||||
@@ -50,6 +50,7 @@
|
||||
"source": [
|
||||
"from datetime import datetime\n",
|
||||
"from langchain.callbacks import WandbCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -195,8 +196,8 @@
|
||||
" name=\"llm\",\n",
|
||||
" tags=[\"test\"],\n",
|
||||
")\n",
|
||||
"callbacks = [StdOutCallbackHandler(), wandb_callback]\n",
|
||||
"llm = OpenAI(temperature=0, callbacks=callbacks)"
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), wandb_callback])\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -483,7 +484,7 @@
|
||||
"Title: {title}\n",
|
||||
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [\n",
|
||||
" {\n",
|
||||
@@ -576,15 +577,16 @@
|
||||
],
|
||||
"source": [
|
||||
"# SCENARIO 3 - Agent with Tools\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" callback_manager=manager,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\",\n",
|
||||
" callbacks=callbacks,\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")\n",
|
||||
"wandb_callback.flush_tracker(agent, reset=False, finish=True)"
|
||||
]
|
||||
|
||||
@@ -172,9 +172,9 @@ In order to load agents, you should understand the following concepts:
|
||||
- LLM: The language model powering the agent.
|
||||
- Agent: The agent to use. This should be a string that references a support agent class. Because this notebook focuses on the simplest, highest level API, this only covers using the standard supported agents. If you want to implement a custom agent, see the documentation for custom agents (coming soon).
|
||||
|
||||
**Agents**: For a list of supported agents and their specifications, see [here](../modules/agents/getting_started.ipynb).
|
||||
**Agents**: For a list of supported agents and their specifications, see [here](../modules/agents/agents.md).
|
||||
|
||||
**Tools**: For a list of predefined tools and their specifications, see [here](../modules/agents/tools/getting_started.md).
|
||||
**Tools**: For a list of predefined tools and their specifications, see [here](../modules/agents/tools.md).
|
||||
|
||||
For this example, you will also need to install the SerpAPI Python package.
|
||||
|
||||
@@ -361,9 +361,9 @@ from langchain.prompts.chat import (
|
||||
|
||||
chat = ChatOpenAI(temperature=0)
|
||||
|
||||
template = "You are a helpful assistant that translates {input_language} to {output_language}."
|
||||
template="You are a helpful assistant that translates {input_language} to {output_language}."
|
||||
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
||||
human_template = "{text}"
|
||||
human_template="{text}"
|
||||
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
||||
|
||||
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
||||
@@ -387,9 +387,9 @@ from langchain.prompts.chat import (
|
||||
|
||||
chat = ChatOpenAI(temperature=0)
|
||||
|
||||
template = "You are a helpful assistant that translates {input_language} to {output_language}."
|
||||
template="You are a helpful assistant that translates {input_language} to {output_language}."
|
||||
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
||||
human_template = "{text}"
|
||||
human_template="{text}"
|
||||
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
||||
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
||||
|
||||
|
||||
@@ -44,8 +44,6 @@ These modules are, in increasing order of complexity:
|
||||
|
||||
- `Agents <./modules/agents.html>`_: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.
|
||||
|
||||
- `Callbacks <./modules/callbacks/getting_started.html>`_: It can be difficult to track all that occurs inside a chain or agent - callbacks help add a level of observability and introspection.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
@@ -59,7 +57,6 @@ These modules are, in increasing order of complexity:
|
||||
./modules/memory.md
|
||||
./modules/chains.md
|
||||
./modules/agents.md
|
||||
./modules/callbacks/getting_started.ipynb
|
||||
|
||||
Use Cases
|
||||
----------
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 1,
|
||||
"id": "da5df06c-af6f-4572-b9f5-0ab971c16487",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -42,6 +42,7 @@
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.tracers import LangChainTracer\n",
|
||||
"from aiohttp import ClientSession\n",
|
||||
"\n",
|
||||
@@ -56,7 +57,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"id": "fd4c294e-b1d6-44b8-b32e-2765c017e503",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -72,15 +73,16 @@
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the US Open men's final in 2019 and then calculate his age raised to the 0.334 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open men's final 2019 winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mRafael Nadal defeated Daniil Medvedev in the final, 7–5, 6–3, 5–7, 4–6, 6–4 to win the men's singles tennis title at the 2019 US Open. It was his fourth US ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mRafael Nadal\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Rafael Nadal's age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Rafael Nadal age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m36 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now need to calculate his age raised to the 0.334 power\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 36 raised to the 0.334 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 36^0.334\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\u001b[0m\n",
|
||||
"\n",
|
||||
@@ -91,17 +93,18 @@
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mOlivia Wilde started dating Harry Styles after ending her years-long engagement to Jason Sudeikis — see their relationship timeline.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Harry Styles' age.\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mJason Sudeikis\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Jason Sudeikis' age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m29 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n",
|
||||
"Action Input: \"Jason Sudeikis age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m47 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 47 raised to the 0.23 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 29^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.169459462491557\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Harry Styles, Olivia Wilde's boyfriend, is 29 years old and his age raised to the 0.23 power is 2.169459462491557.\u001b[0m\n",
|
||||
"Action Input: 47^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.4242784855673896\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
@@ -110,17 +113,17 @@
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the grand prix and then calculate their age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Formula 1 Grand Prix Winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mMichael Schumacher (top left) and Lewis Hamilton (top right) have each won the championship a record seven times during their careers, while Sebastian Vettel ( ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mMax Verstappen\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Max Verstappen's age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Michael Schumacher age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m54 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate the age raised to the 0.23 power\n",
|
||||
"Action Input: \"Max Verstappen Age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m25 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 25 raised to the 0.23 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 54^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.502940725307012\u001b[0m\n",
|
||||
"Action Input: 25^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.84599359907945\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Michael Schumacher, aged 54, raised to the 0.23 power is 2.502940725307012.\u001b[0m\n",
|
||||
"Final Answer: Max Verstappen, 25 years old, raised to the 0.23 power is 1.84599359907945.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
@@ -129,17 +132,18 @@
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the US Open women's final in 2019 and then calculate her age raised to the 0.34 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open women's final 2019 winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mBianca Andreescu\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out her age\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mBianca Andreescu defeated Serena Williams in the final, 6–3, 7–5 to win the women's singles tennis title at the 2019 US Open. It was her first major title, and she became the first Canadian, as well as the first player born in the 2000s, to win a major singles title.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Bianca Andreescu's age.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Bianca Andreescu age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m22 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate her age raised to the 0.34 power\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the age of Bianca Andreescu and can calculate her age raised to the 0.34 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 22^0.34\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.8603798598506933\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Bianca Andreescu, aged 22, won the US Open women's final in 2019 and her age raised to the 0.34 power is 2.86.\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.8603798598506933\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Bianca Andreescu won the US Open women's final in 2019 and her age raised to the 0.34 power is 2.8603798598506933.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
@@ -156,32 +160,35 @@
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 53 raised to the 0.19 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 53^0.19\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.12624064206896\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.12624064206896\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Jay-Z is Beyonce's husband and his age raised to the 0.19 power is 2.12624064206896.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Serial executed in 52.47 seconds.\n"
|
||||
"Serial executed in 65.11 seconds.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")\n",
|
||||
"def generate_serially():\n",
|
||||
" for q in questions:\n",
|
||||
" llm = OpenAI(temperature=0)\n",
|
||||
" tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm)\n",
|
||||
" agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
" )\n",
|
||||
" agent.run(q)\n",
|
||||
"\n",
|
||||
"s = time.perf_counter()\n",
|
||||
"for q in questions:\n",
|
||||
" agent.run(q)\n",
|
||||
"generate_serially()\n",
|
||||
"elapsed = time.perf_counter() - s\n",
|
||||
"print(f\"Serial executed in {elapsed:0.2f} seconds.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"id": "076d7b85-45ec-465d-8b31-c2ad119c3438",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -195,8 +202,8 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
@@ -206,94 +213,179 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the US Open women's final in 2019 and then calculate her age raised to the 0.34 power.\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out who Beyonce's husband is and then calculate his age raised to the 0.19 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Who is Beyonce's husband?\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mJay-Z\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out who won the grand prix and then calculate their age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Formula 1 Grand Prix Winner\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out who won the US Open women's final in 2019 and then calculate her age raised to the 0.34 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open women's final 2019 winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mBianca Andreescu\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mJason Sudeikis\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mMax Verstappen\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mBianca Andreescu defeated Serena Williams in the final, 6–3, 7–5 to win the women's singles tennis title at the 2019 US Open. It was her first major title, and she became the first Canadian, as well as the first player born in the 2000s, to win a major singles title.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Jason Sudeikis' age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Jason Sudeikis age\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out Jay-Z's age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"How old is Jay-Z?\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m53 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out who won the US Open men's final in 2019 and then calculate his age raised to the 0.334 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open men's final 2019 winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mRafael Nadal defeated Daniil Medvedev in the final, 7–5, 6–3, 5–7, 4–6, 6–4 to win the men's singles tennis title at the 2019 US Open. It was his fourth US ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out who won the grand prix and then calculate their age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Formula 1 Grand Prix Winner\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out who Beyonce's husband is and then calculate his age raised to the 0.19 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Who is Beyonce's husband?\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mOlivia Wilde started dating Harry Styles after ending her years-long engagement to Jason Sudeikis — see their relationship timeline.\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mJay-Z\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mMichael Schumacher (top left) and Lewis Hamilton (top right) have each won the championship a record seven times during their careers, while Sebastian Vettel ( ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out her age\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m47 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Max Verstappen's age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Bianca Andreescu age\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out Jay-Z's age\n",
|
||||
"Action Input: \"Max Verstappen Age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m25 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Bianca Andreescu's age.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"How old is Jay-Z?\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m53 years\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Action Input: \"Bianca Andreescu age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m22 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Harry Styles' age.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m29 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate her age raised to the 0.34 power\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 53 raised to the 0.19 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 22^0.34\u001b[0m\u001b[32;1m\u001b[1;3m I need to calculate 53 raised to the 0.19 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 53^0.19\u001b[0m\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 29^0.23\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Action Input: 53^0.19\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Rafael Nadal age\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out the age of the winner\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Michael Schumacher age\"\u001b[0m\n",
|
||||
"Observation: \n",
|
||||
"Action Input: \"Rafael Nadal age\"\u001b[0m\u001b[32;1m\u001b[1;3m I need to calculate 47 raised to the 0.23 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 47^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m36 years\u001b[0m\n",
|
||||
"Thought:\u001b[33;1m\u001b[1;3m54 years\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.8603798598506933\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.169459462491557\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.12624064206896\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate the age raised to the 0.334 power\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 25 raised to the 0.23 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 36^0.334\u001b[0m\u001b[32;1m\u001b[1;3m I now need to calculate the age raised to the 0.23 power\n",
|
||||
"Action Input: 25^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.12624064206896\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the age of Bianca Andreescu and can calculate her age raised to the 0.34 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 54^0.23\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\u001b[0m\n",
|
||||
"Action Input: 22^0.34\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.84599359907945\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.502940725307012\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.4242784855673896\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now need to calculate his age raised to the 0.334 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 36^0.334\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 2.8603798598506933\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Jay-Z is Beyonce's husband and his age raised to the 0.19 power is 2.12624064206896.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Concurrent executed in 14.49 seconds.\n"
|
||||
"\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Max Verstappen, 25 years old, raised to the 0.23 power is 1.84599359907945.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Bianca Andreescu won the US Open women's final in 2019 and her age raised to the 0.34 power is 2.8603798598506933.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Concurrent executed in 12.38 seconds.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")\n",
|
||||
"async def generate_concurrently():\n",
|
||||
" agents = []\n",
|
||||
" # To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
|
||||
" # but you must manually close the client session at the end of your program/event loop\n",
|
||||
" aiosession = ClientSession()\n",
|
||||
" for _ in questions:\n",
|
||||
" manager = CallbackManager([StdOutCallbackHandler()])\n",
|
||||
" llm = OpenAI(temperature=0, callback_manager=manager)\n",
|
||||
" async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager)\n",
|
||||
" agents.append(\n",
|
||||
" initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
|
||||
" )\n",
|
||||
" tasks = [async_agent.arun(q) for async_agent, q in zip(agents, questions)]\n",
|
||||
" await asyncio.gather(*tasks)\n",
|
||||
" await aiosession.close()\n",
|
||||
"\n",
|
||||
"s = time.perf_counter()\n",
|
||||
"# If running this outside of Jupyter, use asyncio.run or loop.run_until_complete\n",
|
||||
"tasks = [agent.arun(q) for q in questions]\n",
|
||||
"await asyncio.gather(*tasks)\n",
|
||||
"# If running this outside of Jupyter, use asyncio.run(generate_concurrently())\n",
|
||||
"await generate_concurrently()\n",
|
||||
"elapsed = time.perf_counter() - s\n",
|
||||
"print(f\"Concurrent executed in {elapsed:0.2f} seconds.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "97ef285c-4a43-4a4e-9698-cd52a1bc56c9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Tracing with Asynchronous Agents\n",
|
||||
"\n",
|
||||
"To use tracing with async agents, you must pass in a custom `CallbackManager` with `LangChainTracer` to each agent running asynchronously. This way, you avoid collisions while the trace is being collected."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "44bda05a-d33e-4e91-9a71-a0f3f96aae95",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who won the US Open men's final in 2019 and then calculate his age raised to the 0.334 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"US Open men's final 2019 winner\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mRafael Nadal\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Rafael Nadal's age\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Rafael Nadal age\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m36 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 36 raised to the 0.334 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 36^0.334\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 3.3098250249682484\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Rafael Nadal, aged 36, won the US Open men's final in 2019 and his age raised to the 0.334 power is 3.3098250249682484.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
|
||||
"# but you must manually close the client session at the end of your program/event loop\n",
|
||||
"aiosession = ClientSession()\n",
|
||||
"tracer = LangChainTracer()\n",
|
||||
"tracer.load_default_session()\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), tracer])\n",
|
||||
"\n",
|
||||
"# Pass the manager into the llm if you want llm calls traced.\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession)\n",
|
||||
"async_agent = initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
|
||||
"await async_agent.arun(questions[0])\n",
|
||||
"await aiosession.close()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -312,7 +404,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -373,7 +373,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = get_tools(\"whats the weather?\")\n",
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain, \n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -674,6 +674,153 @@
|
||||
"source": [
|
||||
"agent.run(\"whats 2**.12\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8aa3c353-bd89-467c-9c27-b83a90cd4daa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-argument tools\n",
|
||||
"\n",
|
||||
"Many functions expect structured inputs. These can also be supported using the Tool decorator or by directly subclassing `BaseTool`! We have to modify the LLM's OutputParser to map its string output to a dictionary to pass to the action, however."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "537bc628",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional, Union\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def custom_search(k: int, query: str, other_arg: Optional[str] = None):\n",
|
||||
" \"\"\"The custom search function.\"\"\"\n",
|
||||
" return f\"Here are the results for the custom search: k={k}, query={query}, other_arg={other_arg}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "d5c992cf-776a-40cd-a6c4-e7cf65ea709e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AgentAction,\n",
|
||||
" AgentFinish,\n",
|
||||
")\n",
|
||||
"from langchain.agents import AgentOutputParser\n",
|
||||
"\n",
|
||||
"# We will add a custom parser to map the arguments to a dictionary\n",
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" \n",
|
||||
" def parse_tool_input(self, action_input: str) -> dict:\n",
|
||||
" # Regex pattern to match arguments and their values\n",
|
||||
" pattern = r\"(\\w+)\\s*=\\s*(None|\\\"[^\\\"]*\\\"|\\d+)\"\n",
|
||||
" matches = re.findall(pattern, action_input)\n",
|
||||
" \n",
|
||||
" if not matches:\n",
|
||||
" raise ValueError(f\"Could not parse action input: `{action_input}`\")\n",
|
||||
"\n",
|
||||
" # Create a dictionary with the parsed arguments and their values\n",
|
||||
" parsed_input = {}\n",
|
||||
" for arg, value in matches:\n",
|
||||
" if value == \"None\":\n",
|
||||
" parsed_value = None\n",
|
||||
" elif value.isdigit():\n",
|
||||
" parsed_value = int(value)\n",
|
||||
" else:\n",
|
||||
" parsed_value = value.strip('\"')\n",
|
||||
" parsed_input[arg] = parsed_value\n",
|
||||
"\n",
|
||||
" return parsed_input\n",
|
||||
" \n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
" return AgentFinish(\n",
|
||||
" # Return values is generally always a dictionary with a single `output` key\n",
|
||||
" # It is not recommended to try anything else at the moment :)\n",
|
||||
" return_values={\"output\": llm_output.split(\"Final Answer:\")[-1].strip()},\n",
|
||||
" log=llm_output,\n",
|
||||
" )\n",
|
||||
" # Parse out the action and action input\n",
|
||||
" regex = r\"Action\\s*\\d*\\s*:(.*?)\\nAction\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)\"\n",
|
||||
" match = re.search(regex, llm_output, re.DOTALL)\n",
|
||||
" if not match:\n",
|
||||
" raise ValueError(f\"Could not parse LLM output: `{llm_output}`\")\n",
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" tool_input = self.parse_tool_input(action_input)\n",
|
||||
" # Return the action and action \n",
|
||||
" return AgentAction(tool=action, tool_input=tool_input, log=llm_output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "68269547-1482-4138-a6ea-58f00b4a9548",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"agent = initialize_agent([custom_search], llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, agent_kwargs={\"output_parser\": CustomOutputParser()})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "0947835a-691c-4f51-b8f4-6744e0e48ab1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to use a search function to find the answer\n",
|
||||
"Action: custom_search\n",
|
||||
"Action Input: k=1, query=\"me\"\u001b[0m\u001b[36;1m\u001b[1;3mHere are the results for the custom search: k=1, query=me, other_arg=None\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The results of the custom search for k=1, query=me, other_arg=None.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The results of the custom search for k=1, query=me, other_arg=None.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Search for me and tell me whatever it says\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "caf39c66-102b-42c1-baf2-777a49886ce4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -692,7 +839,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -75,8 +75,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"arxiv = ArxivAPIWrapper()\n",
|
||||
"docs = arxiv.run(\"1605.08386\")\n",
|
||||
"docs"
|
||||
]
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AWS Lambda API"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook goes over how to use the AWS Lambda Tool component.\n",
|
||||
"\n",
|
||||
"AWS Lambda is a serverless computing service provided by Amazon Web Services (AWS), designed to allow developers to build and run applications and services without the need for provisioning or managing servers. This serverless architecture enables you to focus on writing and deploying code, while AWS automatically takes care of scaling, patching, and managing the infrastructure required to run your applications.\n",
|
||||
"\n",
|
||||
"By including a `awslambda` in the list of tools provided to an Agent, you can grant your Agent the ability to invoke code running in your AWS Cloud for whatever purposes you need.\n",
|
||||
"\n",
|
||||
"When an Agent uses the awslambda tool, it will provide an argument of type string which will in turn be passed into the Lambda function via the event parameter.\n",
|
||||
"\n",
|
||||
"First, you need to install `boto3` python package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install boto3 > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order for an agent to use the tool, you must provide it with the name and description that match the functionality of you lambda function's logic. \n",
|
||||
"\n",
|
||||
"You must also provide the name of your function. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that because this tool is effectively just a wrapper around the boto3 library, you will need to run `aws configure` in order to make use of the tool. For more detail, see [here](https://docs.aws.amazon.com/cli/index.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI\n",
|
||||
"from langchain.agents import load_tools, AgentType\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"tools = load_tools(\n",
|
||||
" [\"awslambda\"],\n",
|
||||
" awslambda_tool_name=\"email-sender\",\n",
|
||||
" awslambda_tool_description=\"sends an email with the specified content to test@testing123.com\",\n",
|
||||
" function_name=\"testFunction1\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
|
||||
"\n",
|
||||
"agent.run(\"Send an email to test@testing123.com saying hello world.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -5,158 +5,57 @@
|
||||
"id": "8f210ec3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Shell Tool\n",
|
||||
"\n",
|
||||
"Giving agents access to the shell is powerful (though risky outside a sandboxed environment).\n",
|
||||
"\n",
|
||||
"The LLM can use it to execute any shell commands. A common use case for this is letting the LLM interact with your local file system."
|
||||
"# Bash\n",
|
||||
"It can often be useful to have an LLM generate bash commands, and then run them. A common use case for this is letting the LLM interact with your local file system. We provide an easy util to execute bash commands."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f7b3767b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import ShellTool\n",
|
||||
"\n",
|
||||
"shell_tool = ShellTool()"
|
||||
"from langchain.utilities import BashProcess"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c92ac832-556b-4f66-baa4-b78f965dfba0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello World!\n",
|
||||
"\n",
|
||||
"real\t0m0.000s\n",
|
||||
"user\t0m0.000s\n",
|
||||
"sys\t0m0.000s\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/wfh/code/lc/lckg/langchain/tools/shell/tool.py:34: UserWarning: The shell tool has no safeguards by default. Use at your own risk.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(shell_tool.run({\"commands\": [\"echo 'Hello World!'\", \"time\"]}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2fa952fc",
|
||||
"id": "cf1c92f0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Use with Agents\n",
|
||||
"\n",
|
||||
"As with all tools, these can be given to an agent to accomplish more complex tasks. Let's have the agent fetch some links from a web page."
|
||||
"bash = BashProcess()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "851fee9f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"id": "2fa952fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: What is the task?\n",
|
||||
"Thought: We need to download the langchain.com webpage and extract all the URLs from it. Then we need to sort the URLs and return them.\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"shell\",\n",
|
||||
" \"action_input\": {\n",
|
||||
" \"commands\": [\n",
|
||||
" \"curl -s https://langchain.com | grep -o 'http[s]*://[^\\\" ]*' | sort\"\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m"
|
||||
"bash.ipynb\n",
|
||||
"google_search.ipynb\n",
|
||||
"python.ipynb\n",
|
||||
"requests.ipynb\n",
|
||||
"serpapi.ipynb\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/wfh/code/lc/lckg/langchain/tools/shell/tool.py:34: UserWarning: The shell tool has no safeguards by default. Use at your own risk.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mhttps://blog.langchain.dev/\n",
|
||||
"https://discord.gg/6adMQxSpJS\n",
|
||||
"https://docs.langchain.com/docs/\n",
|
||||
"https://github.com/hwchase17/chat-langchain\n",
|
||||
"https://github.com/hwchase17/langchain\n",
|
||||
"https://github.com/hwchase17/langchainjs\n",
|
||||
"https://github.com/sullivan-sean/chat-langchainjs\n",
|
||||
"https://js.langchain.com/docs/\n",
|
||||
"https://python.langchain.com/en/latest/\n",
|
||||
"https://twitter.com/langchainai\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe URLs have been successfully extracted and sorted. We can return the list of URLs as the final answer.\n",
|
||||
"Final Answer: [\"https://blog.langchain.dev/\", \"https://discord.gg/6adMQxSpJS\", \"https://docs.langchain.com/docs/\", \"https://github.com/hwchase17/chat-langchain\", \"https://github.com/hwchase17/langchain\", \"https://github.com/hwchase17/langchainjs\", \"https://github.com/sullivan-sean/chat-langchainjs\", \"https://js.langchain.com/docs/\", \"https://python.langchain.com/en/latest/\", \"https://twitter.com/langchainai\"]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'[\"https://blog.langchain.dev/\", \"https://discord.gg/6adMQxSpJS\", \"https://docs.langchain.com/docs/\", \"https://github.com/hwchase17/chat-langchain\", \"https://github.com/hwchase17/langchain\", \"https://github.com/hwchase17/langchainjs\", \"https://github.com/sullivan-sean/chat-langchainjs\", \"https://js.langchain.com/docs/\", \"https://python.langchain.com/en/latest/\", \"https://twitter.com/langchainai\"]'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"shell_tool.description = shell_tool.description + f\"args {shell_tool.args}\".replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n",
|
||||
"self_ask_with_search = initialize_agent([shell_tool], llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
|
||||
"self_ask_with_search.run(\"Download the langchain.com webpage and grep for all urls. Return only a sorted list of them. Be sure to use double quotes.\")"
|
||||
"print(bash.run(\"ls\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8d0ea3ac-0890-4e39-9cec-74bd80b4b8b8",
|
||||
"id": "851fee9f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -178,7 +77,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.16"
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import DuckDuckGoSearchRun"
|
||||
"from langchain.tools import DuckDuckGoSearchTool"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -37,7 +37,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = DuckDuckGoSearchRun()"
|
||||
"search = DuckDuckGoSearchTool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# File System Tools\n",
|
||||
"\n",
|
||||
"LangChain provides tools for interacting with a local file system out of the box. This notebook walks through some of them.\n",
|
||||
"\n",
|
||||
"Note: these tools are not recommended for use outside a sandboxed environment! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, we'll import the tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.file_management import (\n",
|
||||
" ReadFileTool,\n",
|
||||
" CopyFileTool,\n",
|
||||
" DeleteFileTool,\n",
|
||||
" MoveFileTool,\n",
|
||||
" WriteFileTool,\n",
|
||||
" ListDirectoryTool,\n",
|
||||
")\n",
|
||||
"from langchain.agents.agent_toolkits import FileManagementToolkit\n",
|
||||
"from tempfile import TemporaryDirectory\n",
|
||||
"\n",
|
||||
"# We'll make a temporary directory to avoid clutter\n",
|
||||
"working_directory = TemporaryDirectory()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## The FileManagementToolkit\n",
|
||||
"\n",
|
||||
"If you want to provide all the file tooling to your agent, it's easy to do so with the toolkit. We'll pass the temporary directory in as a root directory as a workspace for the LLM.\n",
|
||||
"\n",
|
||||
"It's recommended to always pass in a root directory, since without one, it's easy for the LLM to pollute the working directory, and without one, there isn't any validation against\n",
|
||||
"straightforward prompt injection."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[CopyFileTool(name='copy_file', description='Create a copy of a file in a specified location', args_schema=<class 'langchain.tools.file_management.copy.FileCopyInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" DeleteFileTool(name='file_delete', description='Delete a file', args_schema=<class 'langchain.tools.file_management.delete.FileDeleteInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" FileSearchTool(name='file_search', description='Recursively search for files in a subdirectory that match the regex pattern', args_schema=<class 'langchain.tools.file_management.file_search.FileSearchInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" MoveFileTool(name='move_file', description='Move or rename a file from one location to another', args_schema=<class 'langchain.tools.file_management.move.FileMoveInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" ReadFileTool(name='read_file', description='Read file from disk', args_schema=<class 'langchain.tools.file_management.read.ReadFileInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" WriteFileTool(name='write_file', description='Write file to disk', args_schema=<class 'langchain.tools.file_management.write.WriteFileInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" ListDirectoryTool(name='list_directory', description='List files and directories in a specified folder', args_schema=<class 'langchain.tools.file_management.list_dir.DirectoryListingInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"toolkit = FileManagementToolkit(root_dir=str(working_directory.name)) # If you don't provide a root_dir, operations will default to the current working directory\n",
|
||||
"toolkit.get_tools()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Selecting File System Tools\n",
|
||||
"\n",
|
||||
"If you only want to select certain tools, you can pass them in as arguments when initializing the toolkit, or you can individually initialize the desired tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ReadFileTool(name='read_file', description='Read file from disk', args_schema=<class 'langchain.tools.file_management.read.ReadFileInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" WriteFileTool(name='write_file', description='Write file to disk', args_schema=<class 'langchain.tools.file_management.write.WriteFileInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug'),\n",
|
||||
" ListDirectoryTool(name='list_directory', description='List files and directories in a specified folder', args_schema=<class 'langchain.tools.file_management.list_dir.DirectoryListingInput'>, return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x1156f4350>, root_dir='/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/tmpxb8c3aug')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = FileManagementToolkit(root_dir=str(working_directory.name), selected_tools=[\"read_file\", \"write_file\", \"list_directory\"]).get_tools()\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'File written successfully to example.txt.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"read_tool, write_tool, list_tool = tools\n",
|
||||
"write_tool.run({\"file_path\": \"example.txt\", \"text\": \"Hello World!\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'example.txt'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# List files in the working directory\n",
|
||||
"list_tool.run({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -69,8 +69,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"local_file_path = StableDiffusionTool().langchain.run(\"Please create a photo of a dog riding a skateboard\")\n",
|
||||
"local_file_path"
|
||||
"StableDiffusionTool().langchain.run(\"Please create a photo of a dog riding a skateboard\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -90,7 +89,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"im = Image.open(local_file_path)"
|
||||
"im = Image.open(\"/Users/harrisonchase/workplace/langchain/docs/modules/agents/tools/examples/b61c1dd9-47e2-46f1-a47c-20d27640993d/tmp4ap48vnm.jpg\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain.utilities import PythonREPL"
|
||||
]
|
||||
},
|
||||
@@ -60,14 +59,7 @@
|
||||
"id": "54fc1f03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# You can create the tool to pass to an agent\n",
|
||||
"repl_tool = Tool(\n",
|
||||
" name=\"python_repl\",\n",
|
||||
" description=\"A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.\",\n",
|
||||
" func=python_repl\n",
|
||||
")"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SceneXplain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"[SceneXplain](https://scenex.jina.ai/) is an ImageCaptioning service accessible through the SceneXplain Tool.\n",
|
||||
"\n",
|
||||
"To use this tool, you'll need to make an account and fetch your API Token [from the website](https://scenex.jina.ai/api). Then you can instantiate the tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.tools import SceneXplainTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"os.environ[\"SCENEX_API_KEY\"] = \"<YOUR_API_KEY>\"\n",
|
||||
"tool = SceneXplainTool()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage in an Agent\n",
|
||||
"\n",
|
||||
"The tool can be used in any LangChain agent as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Thought: Do I need to use a tool? Yes\n",
|
||||
"Action: Image Explainer\n",
|
||||
"Action Input: https://storage.googleapis.com/causal-diffusion.appspot.com/imagePrompts%2F0rw369i5h9t%2Foriginal.png\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mIn a charmingly whimsical scene, a young girl is seen braving the rain alongside her furry companion, the lovable Totoro. The two are depicted standing on a bustling street corner, where they are sheltered from the rain by a bright yellow umbrella. The girl, dressed in a cheerful yellow frock, holds onto the umbrella with both hands while gazing up at Totoro with an expression of wonder and delight.\n",
|
||||
"\n",
|
||||
"Totoro, meanwhile, stands tall and proud beside his young friend, holding his own umbrella aloft to protect them both from the downpour. His furry body is rendered in rich shades of grey and white, while his large ears and wide eyes lend him an endearing charm.\n",
|
||||
"\n",
|
||||
"In the background of the scene, a street sign can be seen jutting out from the pavement amidst a flurry of raindrops. A sign with Chinese characters adorns its surface, adding to the sense of cultural diversity and intrigue. Despite the dreary weather, there is an undeniable sense of joy and camaraderie in this heartwarming image.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m Do I need to use a tool? No\n",
|
||||
"AI: This image appears to be a still from the 1988 Japanese animated fantasy film My Neighbor Totoro. The film follows two young girls, Satsuki and Mei, as they explore the countryside and befriend the magical forest spirits, including the titular character Totoro.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"This image appears to be a still from the 1988 Japanese animated fantasy film My Neighbor Totoro. The film follows two young girls, Satsuki and Mei, as they explore the countryside and befriend the magical forest spirits, including the titular character Totoro.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||||
"tools = [\n",
|
||||
" tool\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, memory=memory, agent=\"conversational-react-description\", verbose=True\n",
|
||||
")\n",
|
||||
"output = agent.run(\n",
|
||||
" input=(\n",
|
||||
" \"What is in this image https://storage.googleapis.com/causal-diffusion.appspot.com/imagePrompts%2F0rw369i5h9t%2Foriginal.png. \"\n",
|
||||
" \"Is it movie or a game? If it is a movie, what is the name of the movie?\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(output)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -102,15 +102,7 @@
|
||||
"id": "e0a1dc1c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"# You can create the tool to pass to an agent\n",
|
||||
"repl_tool = Tool(\n",
|
||||
" name=\"python_repl\",\n",
|
||||
" description=\"A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.\",\n",
|
||||
" func=search.run,\n",
|
||||
")"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,7 +10,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -24,8 +24,8 @@
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"echo \"Hello World\"\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['echo \"Hello World\"']\u001b[0m\n",
|
||||
"```\u001b[0m['```bash', 'echo \"Hello World\"', '```']\n",
|
||||
"\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mHello World\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -37,7 +37,7 @@
|
||||
"'Hello World\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -50,7 +50,7 @@
|
||||
"\n",
|
||||
"text = \"Please write a bash script that prints 'Hello World' to the console.\"\n",
|
||||
"\n",
|
||||
"bash_chain = LLMBashChain.from_llm(llm, verbose=True)\n",
|
||||
"bash_chain = LLMBashChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"bash_chain.run(text)"
|
||||
]
|
||||
@@ -65,12 +65,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain.chains.llm_bash.prompt import BashOutputParser\n",
|
||||
"\n",
|
||||
"_PROMPT_TEMPLATE = \"\"\"If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n",
|
||||
"Question: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n",
|
||||
@@ -89,12 +88,12 @@
|
||||
"That is the format. Begin!\n",
|
||||
"Question: {question}\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate(input_variables=[\"question\"], template=_PROMPT_TEMPLATE, output_parser=BashOutputParser())"
|
||||
"PROMPT = PromptTemplate(input_variables=[\"question\"], template=_PROMPT_TEMPLATE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -108,8 +107,8 @@
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"printf \"Hello World\\n\"\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['printf \"Hello World\\\\n\"']\u001b[0m\n",
|
||||
"```\u001b[0m['```bash', 'printf \"Hello World\\\\n\"', '```']\n",
|
||||
"\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mHello World\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -121,125 +120,18 @@
|
||||
"'Hello World\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"bash_chain = LLMBashChain.from_llm(llm, prompt=PROMPT, verbose=True)\n",
|
||||
"bash_chain = LLMBashChain(llm=llm, prompt=PROMPT, verbose=True)\n",
|
||||
"\n",
|
||||
"text = \"Please write a bash script that prints 'Hello World' to the console.\"\n",
|
||||
"\n",
|
||||
"bash_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Persistent Terminal\n",
|
||||
"\n",
|
||||
"By default, the chain will run in a separate subprocess each time it is called. This behavior can be changed by instantiating with a persistent bash process."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMBashChain chain...\u001b[0m\n",
|
||||
"List the current directory then move up a level.\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"ls\n",
|
||||
"cd ..\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['ls', 'cd ..']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mapi.ipynb\t\t\tllm_summarization_checker.ipynb\n",
|
||||
"constitutional_chain.ipynb\tmoderation.ipynb\n",
|
||||
"llm_bash.ipynb\t\t\topenai_openapi.yaml\n",
|
||||
"llm_checker.ipynb\t\topenapi.ipynb\n",
|
||||
"llm_math.ipynb\t\t\tpal.ipynb\n",
|
||||
"llm_requests.ipynb\t\tsqlite.ipynb\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'api.ipynb\\t\\t\\tllm_summarization_checker.ipynb\\r\\nconstitutional_chain.ipynb\\tmoderation.ipynb\\r\\nllm_bash.ipynb\\t\\t\\topenai_openapi.yaml\\r\\nllm_checker.ipynb\\t\\topenapi.ipynb\\r\\nllm_math.ipynb\\t\\t\\tpal.ipynb\\r\\nllm_requests.ipynb\\t\\tsqlite.ipynb'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.utilities.bash import BashProcess\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"persistent_process = BashProcess(persistent=True)\n",
|
||||
"bash_chain = LLMBashChain.from_llm(llm, bash_process=persistent_process, verbose=True)\n",
|
||||
"\n",
|
||||
"text = \"List the current directory then move up a level.\"\n",
|
||||
"\n",
|
||||
"bash_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMBashChain chain...\u001b[0m\n",
|
||||
"List the current directory then move up a level.\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"ls\n",
|
||||
"cd ..\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['ls', 'cd ..']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mexamples\t\tgetting_started.ipynb\tindex_examples\n",
|
||||
"generic\t\t\thow_to_guides.rst\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'examples\\t\\tgetting_started.ipynb\\tindex_examples\\r\\ngeneric\\t\\t\\thow_to_guides.rst'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Run the same command again and see that the state is maintained between calls\n",
|
||||
"bash_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -258,7 +150,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -23,16 +23,28 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
|
||||
"\u001b[1mChain 0\u001b[0m:\n",
|
||||
"{'statement': '\\nNone. Mammals do not lay eggs.'}\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[1mChain 1\u001b[0m:\n",
|
||||
"{'assertions': '\\n• Mammals reproduce using live birth\\n• Mammals do not lay eggs\\n• Animals that lay eggs are not mammals'}\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"\u001b[1mChain 2\u001b[0m:\n",
|
||||
"{'checked_assertions': '\\n1. True\\n\\n2. True\\n\\n3. False - Mammals are a class of animals that includes animals that lay eggs, such as monotremes (platypus and echidna).'}\n",
|
||||
"\n",
|
||||
"\u001b[1mChain 3\u001b[0m:\n",
|
||||
"{'revised_statement': ' Monotremes, such as the platypus and echidna, lay the biggest eggs of any mammal.'}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished SequentialChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMCheckerChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' No mammal lays the biggest eggs. The Elephant Bird, which was a species of giant bird, laid the largest eggs of any bird.'"
|
||||
"' Monotremes, such as the platypus and echidna, lay the biggest eggs of any mammal.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
@@ -48,7 +60,7 @@
|
||||
"\n",
|
||||
"text = \"What type of mammal lays the biggest eggs?\"\n",
|
||||
"\n",
|
||||
"checker_chain = LLMCheckerChain.from_llm(llm, verbose=True)\n",
|
||||
"checker_chain = LLMCheckerChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"checker_chain.run(text)"
|
||||
]
|
||||
@@ -77,7 +89,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"id": "44e9ba31",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -24,22 +24,23 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"What is 13 raised to the .3432 power?\u001b[32;1m\u001b[1;3m\n",
|
||||
"```text\n",
|
||||
"13 ** .3432\n",
|
||||
"```python\n",
|
||||
"import math\n",
|
||||
"print(math.pow(13, .3432))\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"13 ** .3432\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m2.4116004626599237\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m2.4116004626599237\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: 2.4116004626599237'"
|
||||
"'Answer: 2.4116004626599237\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -48,7 +49,102 @@
|
||||
"from langchain import OpenAI, LLMMathChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_math = LLMMathChain.from_llm(llm, verbose=True)\n",
|
||||
"llm_math = LLMMathChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_math.run(\"What is 13 raised to the .3432 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bdd5fc6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Customize Prompt\n",
|
||||
"You can also customize the prompt that is used. Here is an example prompting it to use numpy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "76be17b0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"_PROMPT_TEMPLATE = \"\"\"You are GPT-3, and you can't do math.\n",
|
||||
"\n",
|
||||
"You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.\n",
|
||||
"\n",
|
||||
"So we hooked you up to a Python 3 kernel, and now you can execute code. If you execute code, you must print out the final answer using the print function. You MUST use the python package numpy to answer your question. You must import numpy as np.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Question: ${{Question with hard calculation.}}\n",
|
||||
"```python\n",
|
||||
"${{Code that prints what you need to know}}\n",
|
||||
"print(${{code}})\n",
|
||||
"```\n",
|
||||
"```output\n",
|
||||
"${{Output of your code}}\n",
|
||||
"```\n",
|
||||
"Answer: ${{Answer}}\n",
|
||||
"\n",
|
||||
"Begin.\n",
|
||||
"\n",
|
||||
"Question: What is 37593 * 67?\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import numpy as np\n",
|
||||
"print(np.multiply(37593, 67))\n",
|
||||
"```\n",
|
||||
"```output\n",
|
||||
"2518731\n",
|
||||
"```\n",
|
||||
"Answer: 2518731\n",
|
||||
"\n",
|
||||
"Question: {question}\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate(input_variables=[\"question\"], template=_PROMPT_TEMPLATE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "0c42faa0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"What is 13 raised to the .3432 power?\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import numpy as np\n",
|
||||
"print(np.power(13, .3432))\n",
|
||||
"```\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m2.4116004626599237\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: 2.4116004626599237\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_math = LLMMathChain(llm=llm, prompt=PROMPT, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_math.run(\"What is 13 raised to the .3432 power?\")"
|
||||
]
|
||||
@@ -56,7 +152,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e978bb8e",
|
||||
"id": "0c62951b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -78,7 +174,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -221,11 +221,11 @@
|
||||
"\n",
|
||||
"• The light from these galaxies has been traveling for over 13 billion years to reach us. - True \n",
|
||||
"\n",
|
||||
"• JWST has provided us with the first images of exoplanets, which are planets outside of our own solar system. - False. The first exoplanet was discovered in 1992, but the first images of exoplanets were taken by the Hubble Space Telescope in 2004. \n",
|
||||
"• JWST has provided us with the first images of exoplanets, which are planets outside of our own solar system. - False. The first exoplanet was discovered in 1992, but the first images of exoplanets were taken by the Hubble Space Telescope in 1995. \n",
|
||||
"\n",
|
||||
"• Exoplanets were first discovered in 1992. - True \n",
|
||||
"\n",
|
||||
"• The JWST has allowed us to see exoplanets in greater detail. - Undetermined. The JWST has not yet been launched, so it is not yet known how much detail it will be able to provide.\n",
|
||||
"• The JWST has allowed us to see exoplanets in greater detail. - Undetermined. It is too early to tell as the JWST has not been launched yet.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"Original Summary:\n",
|
||||
@@ -296,11 +296,11 @@
|
||||
"\n",
|
||||
"• The light from these galaxies has been traveling for over 13 billion years to reach us. - True \n",
|
||||
"\n",
|
||||
"• JWST has provided us with the first images of exoplanets, which are planets outside of our own solar system. - False. The first exoplanet was discovered in 1992, but the first images of exoplanets were taken by the Hubble Space Telescope in 2004. \n",
|
||||
"• JWST has provided us with the first images of exoplanets, which are planets outside of our own solar system. - False. The first exoplanet was discovered in 1992, but the first images of exoplanets were taken by the Hubble Space Telescope in 1995. \n",
|
||||
"\n",
|
||||
"• Exoplanets were first discovered in 1992. - True \n",
|
||||
"\n",
|
||||
"• The JWST has allowed us to see exoplanets in greater detail. - Undetermined. The JWST has not yet been launched, so it is not yet known how much detail it will be able to provide.\n",
|
||||
"• The JWST has allowed us to see exoplanets in greater detail. - Undetermined. It is too early to tell as the JWST has not been launched yet.\n",
|
||||
"\"\"\"\n",
|
||||
"Result:\u001b[0m\n",
|
||||
"\n",
|
||||
@@ -312,7 +312,7 @@
|
||||
"Your 9-year old might like these recent discoveries made by The James Webb Space Telescope (JWST):\n",
|
||||
"• In 2023, The JWST will spot a number of galaxies nicknamed \"green peas.\" They were given this name because they are small, round, and green, like peas.\n",
|
||||
"• The telescope will capture images of galaxies that are over 13 billion years old. This means that the light from these galaxies has been traveling for over 13 billion years to reach us.\n",
|
||||
"• Exoplanets, which are planets outside of our own solar system, were first discovered in 1992. The JWST will allow us to see them in greater detail when it is launched in 2023.\n",
|
||||
"• Exoplanets, which are planets outside of our own solar system, were first discovered in 1992. The JWST will allow us to see them in greater detail than ever before.\n",
|
||||
"These discoveries can spark a child's imagination about the infinite wonders of the universe.\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -321,7 +321,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Your 9-year old might like these recent discoveries made by The James Webb Space Telescope (JWST):\\n• In 2023, The JWST will spot a number of galaxies nicknamed \"green peas.\" They were given this name because they are small, round, and green, like peas.\\n• The telescope will capture images of galaxies that are over 13 billion years old. This means that the light from these galaxies has been traveling for over 13 billion years to reach us.\\n• Exoplanets, which are planets outside of our own solar system, were first discovered in 1992. The JWST will allow us to see them in greater detail when it is launched in 2023.\\nThese discoveries can spark a child\\'s imagination about the infinite wonders of the universe.'"
|
||||
"'Your 9-year old might like these recent discoveries made by The James Webb Space Telescope (JWST):\\n• In 2023, The JWST will spot a number of galaxies nicknamed \"green peas.\" They were given this name because they are small, round, and green, like peas.\\n• The telescope will capture images of galaxies that are over 13 billion years old. This means that the light from these galaxies has been traveling for over 13 billion years to reach us.\\n• Exoplanets, which are planets outside of our own solar system, were first discovered in 1992. The JWST will allow us to see them in greater detail than ever before.\\nThese discoveries can spark a child\\'s imagination about the infinite wonders of the universe.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
@@ -334,7 +334,7 @@
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain(llm=llm, verbose=True, max_checks=2)\n",
|
||||
"text = \"\"\"\n",
|
||||
"Your 9-year old might like these recent discoveries made by The James Webb Space Telescope (JWST):\n",
|
||||
"• In 2023, The JWST spotted a number of galaxies nicknamed \"green peas.\" They were given this name because they are small, round, and green, like peas.\n",
|
||||
@@ -407,8 +407,7 @@
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false. If the answer is false, a suggestion is given for a correction.\n",
|
||||
"\n",
|
||||
"Checked Assertions:\n",
|
||||
"\"\"\"\n",
|
||||
"Checked Assertions:\"\"\"\n",
|
||||
"\n",
|
||||
"- The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. True\n",
|
||||
"\n",
|
||||
@@ -429,8 +428,7 @@
|
||||
"- It is considered the northern branch of the Norwegian Sea. True\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"Original Summary:\n",
|
||||
"\"\"\"\n",
|
||||
"Original Summary:\"\"\"\n",
|
||||
"The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. It has an area of 465,000 square miles and is one of five oceans in the world, alongside the Pacific Ocean, Atlantic Ocean, Indian Ocean, and the Southern Ocean. It is the smallest of the five oceans and is covered almost entirely by water, some of which is frozen in the form of glaciers and icebergs. The sea is named after the island of Greenland, and is the Arctic Ocean's main outlet to the Atlantic. It is often frozen over so navigation is limited, and is considered the northern branch of the Norwegian Sea.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
@@ -445,7 +443,7 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true or false.\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false.\n",
|
||||
"\n",
|
||||
"If all of the assertions are true, return \"True\". If any of the assertions are false, return \"False\".\n",
|
||||
"\n",
|
||||
@@ -557,8 +555,7 @@
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false. If the answer is false, a suggestion is given for a correction.\n",
|
||||
"\n",
|
||||
"Checked Assertions:\n",
|
||||
"\"\"\"\n",
|
||||
"Checked Assertions:\"\"\"\n",
|
||||
"\n",
|
||||
"- The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. True\n",
|
||||
"\n",
|
||||
@@ -577,8 +574,7 @@
|
||||
"- It is considered the northern branch of the Norwegian Sea. False - It is considered the northern branch of the Atlantic Ocean.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"Original Summary:\n",
|
||||
"\"\"\"\n",
|
||||
"Original Summary:\"\"\"\n",
|
||||
"\n",
|
||||
"The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. It has an area of 465,000 square miles and is an arm of the Arctic Ocean. It is covered almost entirely by water, some of which is frozen in the form of glaciers and icebergs. The sea is named after the island of Greenland, and is the Arctic Ocean's main outlet to the Atlantic. It is often frozen over so navigation is limited, and is considered the northern branch of the Norwegian Sea.\n",
|
||||
"\"\"\"\n",
|
||||
@@ -587,20 +583,14 @@
|
||||
"\n",
|
||||
"The output should have the same structure and formatting as the original summary.\n",
|
||||
"\n",
|
||||
"Summary:\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Summary:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true or false.\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false.\n",
|
||||
"\n",
|
||||
"If all of the assertions are true, return \"True\". If any of the assertions are false, return \"False\".\n",
|
||||
"\n",
|
||||
@@ -711,8 +701,7 @@
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false. If the answer is false, a suggestion is given for a correction.\n",
|
||||
"\n",
|
||||
"Checked Assertions:\n",
|
||||
"\"\"\"\n",
|
||||
"Checked Assertions:\"\"\"\n",
|
||||
"\n",
|
||||
"- The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. True\n",
|
||||
"\n",
|
||||
@@ -729,8 +718,7 @@
|
||||
"- It is considered the northern branch of the Atlantic Ocean. False - The Greenland Sea is considered part of the Arctic Ocean, not the Atlantic Ocean.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"Original Summary:\n",
|
||||
"\"\"\"\n",
|
||||
"Original Summary:\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. It has an area of 465,000 square miles and is an arm of the Arctic Ocean. It is covered almost entirely by water, some of which is frozen in the form of glaciers and icebergs. The sea is named after the country of Greenland, and is the Arctic Ocean's main outlet to the Atlantic. It is often frozen over so navigation is limited, and is considered the northern branch of the Atlantic Ocean.\n",
|
||||
@@ -747,7 +735,7 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true or false.\n",
|
||||
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false.\n",
|
||||
"\n",
|
||||
"If all of the assertions are true, return \"True\". If any of the assertions are false, return \"False\".\n",
|
||||
"\n",
|
||||
@@ -825,14 +813,14 @@
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain(llm=llm, verbose=True, max_checks=3)\n",
|
||||
"text = \"The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. It has an area of 465,000 square miles and is one of five oceans in the world, alongside the Pacific Ocean, Atlantic Ocean, Indian Ocean, and the Southern Ocean. It is the smallest of the five oceans and is covered almost entirely by water, some of which is frozen in the form of glaciers and icebergs. The sea is named after the island of Greenland, and is the Arctic Ocean's main outlet to the Atlantic. It is often frozen over so navigation is limited, and is considered the northern branch of the Norwegian Sea.\"\n",
|
||||
"checker_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1089,7 +1077,7 @@
|
||||
"'Birds are not mammals, but they are a class of their own. They lay eggs, unlike mammals which give birth to live young.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -1099,10 +1087,17 @@
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain(llm=llm, max_checks=3, verbose=True)\n",
|
||||
"text = \"Mammals can lay eggs, birds can lay eggs, therefore birds are mammals.\"\n",
|
||||
"checker_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0, max_tokens=512)"
|
||||
"llm = OpenAI(model_name='code-davinci-002', temperature=0, max_tokens=512)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -63,9 +63,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3ef64b27",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -73,17 +71,17 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mdef solution():\n",
|
||||
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mdef solution():\n",
|
||||
" \"\"\"Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?\"\"\"\n",
|
||||
" cindy_pets = 4\n",
|
||||
" marcia_pets = cindy_pets + 2\n",
|
||||
" jan_pets = marcia_pets * 3\n",
|
||||
" total_pets = cindy_pets + marcia_pets + jan_pets\n",
|
||||
" result = total_pets\n",
|
||||
" return result\u001b[0m\n",
|
||||
" return result\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -141,8 +139,8 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
|
||||
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m# Put objects into a list to record ordering\n",
|
||||
"objects = []\n",
|
||||
"objects += [('booklet', 'blue')] * 2\n",
|
||||
"objects += [('booklet', 'purple')] * 2\n",
|
||||
@@ -153,9 +151,9 @@
|
||||
"\n",
|
||||
"# Count number of purple objects\n",
|
||||
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
|
||||
"answer = num_purple\u001b[0m\n",
|
||||
"answer = num_purple\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished PALChain chain.\u001b[0m\n"
|
||||
"\u001B[1m> Finished PALChain chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -214,8 +212,8 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
|
||||
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m# Put objects into a list to record ordering\n",
|
||||
"objects = []\n",
|
||||
"objects += [('booklet', 'blue')] * 2\n",
|
||||
"objects += [('booklet', 'purple')] * 2\n",
|
||||
@@ -226,9 +224,9 @@
|
||||
"\n",
|
||||
"# Count number of purple objects\n",
|
||||
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
|
||||
"answer = num_purple\u001b[0m\n",
|
||||
"answer = num_purple\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -282,7 +280,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -73,7 +73,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)"
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -175,7 +175,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, prompt=PROMPT, verbose=True)"
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, prompt=PROMPT, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -230,7 +230,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, prompt=PROMPT, verbose=True, return_intermediate_steps=True)"
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, prompt=PROMPT, verbose=True, return_intermediate_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -285,7 +285,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True, top_k=3)"
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True, top_k=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -407,7 +407,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)"
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -569,7 +569,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)\n",
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)\n",
|
||||
"db_chain.run(\"What are some example tracks by Bach?\")"
|
||||
]
|
||||
},
|
||||
@@ -681,7 +681,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "593f7553-7038-498e-96d4-8255e5ce34f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Creating a custom Chain\n",
|
||||
"\n",
|
||||
"To implement your own custom chain you can subclass `Chain` and implement the following methods:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c19c736e-ca74-4726-bb77-0a849bcc2960",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __future__ import annotations\n",
|
||||
"\n",
|
||||
"from typing import Any, Dict, List, Optional\n",
|
||||
"\n",
|
||||
"from pydantic import Extra\n",
|
||||
"\n",
|
||||
"from langchain.base_language import BaseLanguageModel\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
" AsyncCallbackManagerForChainRun,\n",
|
||||
" CallbackManagerForChainRun,\n",
|
||||
")\n",
|
||||
"from langchain.chains.base import Chain\n",
|
||||
"from langchain.prompts.base import BasePromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomChain(Chain):\n",
|
||||
" \"\"\"\n",
|
||||
" An example of a custom chain.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" prompt: BasePromptTemplate\n",
|
||||
" \"\"\"Prompt object to use.\"\"\"\n",
|
||||
" llm: BaseLanguageModel\n",
|
||||
" output_key: str = \"text\" #: :meta private:\n",
|
||||
"\n",
|
||||
" class Config:\n",
|
||||
" \"\"\"Configuration for this pydantic object.\"\"\"\n",
|
||||
"\n",
|
||||
" extra = Extra.forbid\n",
|
||||
" arbitrary_types_allowed = True\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def input_keys(self) -> List[str]:\n",
|
||||
" \"\"\"Will be whatever keys the prompt expects.\n",
|
||||
"\n",
|
||||
" :meta private:\n",
|
||||
" \"\"\"\n",
|
||||
" return self.prompt.input_variables\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def output_keys(self) -> List[str]:\n",
|
||||
" \"\"\"Will always return text key.\n",
|
||||
"\n",
|
||||
" :meta private:\n",
|
||||
" \"\"\"\n",
|
||||
" return [self.output_key]\n",
|
||||
"\n",
|
||||
" def _call(\n",
|
||||
" self,\n",
|
||||
" inputs: Dict[str, Any],\n",
|
||||
" run_manager: Optional[CallbackManagerForChainRun] = None,\n",
|
||||
" ) -> Dict[str, str]:\n",
|
||||
" # Your custom chain logic goes here\n",
|
||||
" # This is just an example that mimics LLMChain\n",
|
||||
" prompt_value = self.prompt.format_prompt(**inputs)\n",
|
||||
" \n",
|
||||
" # Whenever you call a language model, or another chain, you should pass\n",
|
||||
" # a callback manager to it. This allows the inner run to be tracked by\n",
|
||||
" # any callbacks that are registered on the outer run.\n",
|
||||
" # You can always obtain a callback manager for this by calling\n",
|
||||
" # `run_manager.get_child()` as shown below.\n",
|
||||
" response = self.llm.generate_prompt(\n",
|
||||
" [prompt_value],\n",
|
||||
" callbacks=run_manager.get_child() if run_manager else None\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # If you want to log something about this run, you can do so by calling\n",
|
||||
" # methods on the `run_manager`, as shown below. This will trigger any\n",
|
||||
" # callbacks that are registered for that event.\n",
|
||||
" if run_manager:\n",
|
||||
" run_manager.on_text(\"Log something about this run\")\n",
|
||||
" \n",
|
||||
" return {self.output_key: response.generations[0][0].text}\n",
|
||||
"\n",
|
||||
" async def _acall(\n",
|
||||
" self,\n",
|
||||
" inputs: Dict[str, Any],\n",
|
||||
" run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n",
|
||||
" ) -> Dict[str, str]:\n",
|
||||
" # Your custom chain logic goes here\n",
|
||||
" # This is just an example that mimics LLMChain\n",
|
||||
" prompt_value = self.prompt.format_prompt(**inputs)\n",
|
||||
" \n",
|
||||
" # Whenever you call a language model, or another chain, you should pass\n",
|
||||
" # a callback manager to it. This allows the inner run to be tracked by\n",
|
||||
" # any callbacks that are registered on the outer run.\n",
|
||||
" # You can always obtain a callback manager for this by calling\n",
|
||||
" # `run_manager.get_child()` as shown below.\n",
|
||||
" response = await self.llm.agenerate_prompt(\n",
|
||||
" [prompt_value],\n",
|
||||
" callbacks=run_manager.get_child() if run_manager else None\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # If you want to log something about this run, you can do so by calling\n",
|
||||
" # methods on the `run_manager`, as shown below. This will trigger any\n",
|
||||
" # callbacks that are registered for that event.\n",
|
||||
" if run_manager:\n",
|
||||
" await run_manager.on_text(\"Log something about this run\")\n",
|
||||
" \n",
|
||||
" return {self.output_key: response.generations[0][0].text}\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def _chain_type(self) -> str:\n",
|
||||
" return \"my_custom_chain\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "18361f89",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new MyCustomChain chain...\u001b[0m\n",
|
||||
"Log something about this run\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Why did the callback function feel lonely? Because it was always waiting for someone to call it back!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
"from langchain.chat_models.openai import ChatOpenAI\n",
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = MyCustomChain(\n",
|
||||
" prompt=PromptTemplate.from_template('tell us a joke about {topic}'),\n",
|
||||
" llm=ChatOpenAI()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain.run({'topic': 'callbacks'}, callbacks=[StdOutCallbackHandler()])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -185,6 +185,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -554,7 +555,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.8.16"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -589,6 +589,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
@@ -596,7 +597,7 @@
|
||||
"# Construct a ConversationalRetrievalChain with a streaming llm for combine docs\n",
|
||||
"# and a separate, non-streaming llm for question generation\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"streaming_llm = OpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)\n",
|
||||
"streaming_llm = OpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
|
||||
"\n",
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_chain(streaming_llm, chain_type=\"stuff\", prompt=QA_PROMPT)\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Question Answering with Sources\n",
|
||||
"\n",
|
||||
"This notebook walks through how to use LangChain for question answering with sources over a list of documents. It covers four different chain types: `stuff`, `map_reduce`, `refine`,`map-rerank`. For a more in depth explanation of what these chain types are, see [here](https://docs.langchain.com/docs/components/chains/index_related_chains)."
|
||||
"This notebook walks through how to use LangChain for question answering with sources over a list of documents. It covers four different chain types: `stuff`, `map_reduce`, `refine`,`map-rerank`. For a more in depth explanation of what these chain types are, see [here](../combine_docs.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Question Answering\n",
|
||||
"\n",
|
||||
"This notebook walks through how to use LangChain for question answering over a list of documents. It covers four different types of chains: `stuff`, `map_reduce`, `refine`, `map_rerank`. For a more in depth explanation of what these chain types are, see [here](https://docs.langchain.com/docs/components/chains/index_related_chains)."
|
||||
"This notebook walks through how to use LangChain for question answering over a list of documents. It covers four different types of chains: `stuff`, `map_reduce`, `refine`, `map_rerank`. For a more in depth explanation of what these chain types are, see [here](../combine_docs.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bda1f3f5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Arxiv\n",
|
||||
"\n",
|
||||
"[arXiv](https://arxiv.org/) is an open-access archive for 2 million scholarly articles in the fields of physics, mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and systems science, and economics.\n",
|
||||
"\n",
|
||||
"This notebook shows how to load scientific articles from `Arxiv.org` into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b7a1eef-7bf7-4e7d-8bfc-c4e27c9488cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2abd5578-aa3d-46b9-99af-8b262f0b3df8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, you need to install `arxiv` python package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b674aaea-ed3a-4541-8414-260a8f67f623",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install arxiv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "094b5f13-7e54-4354-9d83-26d6926ecaa0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"Second, you need to install `PyMuPDF` python package which transform PDF files from the `arxiv.org` site into the text fromat."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7cd91121-2e96-43ba-af50-319853695f86",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install pymupdf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "95f05e1c-195e-4e2b-ae8e-8d6637f15be6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e29b954c-1407-4797-ae21-6ba8937156be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`ArxivLoader` has these arguments:\n",
|
||||
"- `query`: free text which used to find documents in the Arxiv\n",
|
||||
"- optional `load_max_docs`: default=100. Use it to limit number of downloaded documents. It takes time to download all 100 documents, so use a small number for experiments.\n",
|
||||
"- optional `load_all_available_meta`: default=False. By defaul only the most important fields downloaded: `Published` (date when document was published/last updated), `Title`, `Authors`, `Summary`. If True, other fields also downloaded."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9bfd5e46",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.base import Document\n",
|
||||
"from langchain.document_loaders import ArxivLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "700e4ef2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = ArxivLoader(query=\"1605.08386\", load_max_docs=2).load()\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8977bac0-0042-4f23-9754-247dbd32439b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'Published': '2016-05-26',\n",
|
||||
" 'Title': 'Heat-bath random walks with Markov bases',\n",
|
||||
" 'Authors': 'Caprice Stanley, Tobias Windisch',\n",
|
||||
" 'Summary': 'Graphs on lattice points are studied whose edges come from a finite set of\\nallowed moves of arbitrary length. We show that the diameter of these graphs on\\nfibers of a fixed integer matrix can be bounded from above by a constant. We\\nthen study the mixing behaviour of heat-bath random walks on these graphs. We\\nalso state explicit conditions on the set of moves so that the heat-bath random\\nwalk, a generalization of the Glauber dynamics, is an expander in fixed\\ndimension.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc[0].metadata # meta-information of the Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "46969806-45a9-4c4d-a61b-cfb9658fc9de",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'arXiv:1605.08386v1 [math.CO] 26 May 2016\\nHEAT-BATH RANDOM WALKS WITH MARKOV BASES\\nCAPRICE STANLEY AND TOBIAS WINDISCH\\nAbstract. Graphs on lattice points are studied whose edges come from a finite set of\\nallowed moves of arbitrary length. We show that the diameter of these graphs on fibers of a\\nfixed integer matrix can be bounded from above by a constant. We then study the mixing\\nbehaviour of heat-b'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc[0].page_content[:400] # all pages of the Document content\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,7 +7,7 @@
|
||||
"id": "vm8vn9t8DvC_"
|
||||
},
|
||||
"source": [
|
||||
"# Blockchain"
|
||||
"# Blockchain Document Loader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -31,7 +31,7 @@
|
||||
"\n",
|
||||
"Initially this Loader supports:\n",
|
||||
"\n",
|
||||
"* Loading NFTs as Documents from NFT Smart Contracts (ERC721 and ERC1155)\n",
|
||||
"\n",
|
||||
"* Ethereum Maninnet, Ethereum Testnet, Polgyon Mainnet, Polygon Testnet (default is eth-mainnet)\n",
|
||||
"* Alchemy's getNFTsForCollection API\n",
|
||||
"\n",
|
||||
@@ -39,14 +39,14 @@
|
||||
"\n",
|
||||
"* Additional APIs can be added (e.g. Tranction-related APIs)\n",
|
||||
"\n",
|
||||
"This Document Loader Requires:\n",
|
||||
"To run this notebook, the user will need:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"* An OpenAI key (for OpenAI models)\n",
|
||||
"* A free [Alchemy API Key](https://www.alchemy.com/)\n",
|
||||
"\n",
|
||||
"The output takes the following format:\n",
|
||||
"\n",
|
||||
"- pageContent= Individual NFT\n",
|
||||
"- metadata={'source': '0x1a92f7381b9f03921564a437210bb9396471050c', 'blockchain': 'eth-mainnet', 'tokenId': '0x15'})"
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -54,16 +54,55 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load NFTs into Document Loader"
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 48,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install langchain -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"alchemyApiKey = \"get from https://www.alchemy.com/ and set in environment variable ALCHEMY_API_KEY\""
|
||||
"from langchain.document_loaders import BlockchainDocumentLoader\n",
|
||||
"from langchain.document_loaders.blockchain import BlockchainType\n",
|
||||
"import os"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"alchemyApiKey = \"get your own key from https://www.alchemy.com/\" \n",
|
||||
"os.environ[\"ALCHEMY_API_KEY\"] = alchemyApiKey"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "nzuPWRaBNCMx"
|
||||
},
|
||||
"source": [
|
||||
"## Create a Blockchain Document Loader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -76,18 +115,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 24,
|
||||
"metadata": {
|
||||
"id": "J3LWHARC-Kn0"
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content=\"{'contract': {'address': '0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d'}, 'id': {'tokenId': '0x0000000000000000000000000000000000000000000000000000000000000000', 'tokenMetadata': {'tokenType': 'ERC721'}}, 'title': '', 'description': '', 'tokenUri': {'gateway': 'https://alchemy.mypinata.cloud/ipfs/QmeSjSinHpPnmXmspMjwiXyN6zS4E9zccariGR3jxcaWtq/0', 'raw': 'ipfs://QmeSjSinHpPnmXmspMjwiXyN6zS4E9zccariGR3jxcaWtq/0'}, 'media': [{'gateway': 'https://nft-cdn.alchemy.com/eth-mainnet/415d618f5fef7bfe683e02d4653c4289', 'thumbnail': 'https://res.cloudinary.com/alchemyapi/image/upload/thumbnailv2/eth-mainnet/415d618f5fef7bfe683e02d4653c4289', 'raw': 'ipfs://QmRRPWG96cmgTn2qSzjwr2qvfNEuhunv6FNeMFGa9bx6mQ', 'format': 'png', 'bytes': 133270}], 'metadata': {'image': 'ipfs://QmRRPWG96cmgTn2qSzjwr2qvfNEuhunv6FNeMFGa9bx6mQ', 'attributes': [{'value': 'Silver Hoop', 'trait_type': 'Earring'}, {'value': 'Orange', 'trait_type': 'Background'}, {'value': 'Robot', 'trait_type': 'Fur'}, {'value': 'Striped Tee', 'trait_type': 'Clothes'}, {'value': 'Discomfort', 'trait_type': 'Mouth'}, {'value': 'X Eyes', 'trait_type': 'Eyes'}]}, 'timeLastUpdated': '2023-04-18T04:05:27.817Z', 'contractMetadata': {'name': 'BoredApeYachtClub', 'symbol': 'BAYC', 'totalSupply': '10000', 'tokenType': 'ERC721', 'contractDeployer': '0xaba7161a7fb69c88e16ed9f455ce62b791ee4d03', 'deployedBlockNumber': 12287507, 'openSea': {'floorPrice': 68.16, 'collectionName': 'Bored Ape Yacht Club', 'safelistRequestStatus': 'verified', 'imageUrl': 'https://i.seadn.io/gae/Ju9CkWtV-1Okvf45wo8UctR-M9He2PjILP0oOvxE89AyiPPGtrR3gysu1Zgy0hjd2xKIgjJJtWIc0ybj4Vd7wv8t3pxDGHoJBzDB?w=500&auto=format', 'description': 'The Bored Ape Yacht Club is a collection of 10,000 unique Bored Ape NFTs— unique digital collectibles living on the Ethereum blockchain. Your Bored Ape doubles as your Yacht Club membership card, and grants access to members-only benefits, the first of which is access to THE BATHROOM, a collaborative graffiti board. Future areas and perks can be unlocked by the community through roadmap activation. Visit www.BoredApeYachtClub.com for more details.', 'externalUrl': 'http://www.boredapeyachtclub.com/', 'twitterUsername': 'BoredApeYC', 'discordUrl': 'https://discord.gg/3P5K3dzgdB', 'lastIngestedAt': '2023-03-21T03:54:33.000Z'}}}\", metadata={'tokenId': '0x0000000000000000000000000000000000000000000000000000000000000000'}),\n",
|
||||
" Document(page_content=\"{'contract': {'address': '0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d'}, 'id': {'tokenId': '0x0000000000000000000000000000000000000000000000000000000000000001', 'tokenMetadata': {'tokenType': 'ERC721'}}, 'title': '', 'description': '', 'tokenUri': {'gateway': 'https://alchemy.mypinata.cloud/ipfs/QmeSjSinHpPnmXmspMjwiXyN6zS4E9zccariGR3jxcaWtq/1', 'raw': 'ipfs://QmeSjSinHpPnmXmspMjwiXyN6zS4E9zccariGR3jxcaWtq/1'}, 'media': [{'gateway': 'https://nft-cdn.alchemy.com/eth-mainnet/65558a4d0c5b0c56fbc50bf03f55e3fa', 'thumbnail': 'https://res.cloudinary.com/alchemyapi/image/upload/thumbnailv2/eth-mainnet/65558a4d0c5b0c56fbc50bf03f55e3fa', 'raw': 'ipfs://QmPbxeGcXhYQQNgsC6a36dDyYUcHgMLnGKnF8pVFmGsvqi', 'format': 'png', 'bytes': 171425}], 'metadata': {'image': 'ipfs://QmPbxeGcXhYQQNgsC6a36dDyYUcHgMLnGKnF8pVFmGsvqi', 'attributes': [{'value': 'Grin', 'trait_type': 'Mouth'}, {'value': 'Vietnam Jacket', 'trait_type': 'Clothes'}, {'value': 'Orange', 'trait_type': 'Background'}, {'value': 'Blue Beams', 'trait_type': 'Eyes'}, {'value': 'Robot', 'trait_type': 'Fur'}]}, 'timeLastUpdated': '2023-04-24T04:37:37.738Z', 'contractMetadata': {'name': 'BoredApeYachtClub', 'symbol': 'BAYC', 'totalSupply': '10000', 'tokenType': 'ERC721', 'contractDeployer': '0xaba7161a7fb69c88e16ed9f455ce62b791ee4d03', 'deployedBlockNumber': 12287507, 'openSea': {'floorPrice': 68.16, 'collectionName': 'Bored Ape Yacht Club', 'safelistRequestStatus': 'verified', 'imageUrl': 'https://i.seadn.io/gae/Ju9CkWtV-1Okvf45wo8UctR-M9He2PjILP0oOvxE89AyiPPGtrR3gysu1Zgy0hjd2xKIgjJJtWIc0ybj4Vd7wv8t3pxDGHoJBzDB?w=500&auto=format', 'description': 'The Bored Ape Yacht Club is a collection of 10,000 unique Bored Ape NFTs— unique digital collectibles living on the Ethereum blockchain. Your Bored Ape doubles as your Yacht Club membership card, and grants access to members-only benefits, the first of which is access to THE BATHROOM, a collaborative graffiti board. Future areas and perks can be unlocked by the community through roadmap activation. Visit www.BoredApeYachtClub.com for more details.', 'externalUrl': 'http://www.boredapeyachtclub.com/', 'twitterUsername': 'BoredApeYC', 'discordUrl': 'https://discord.gg/3P5K3dzgdB', 'lastIngestedAt': '2023-03-21T03:54:33.000Z'}}}\", metadata={'tokenId': '0x0000000000000000000000000000000000000000000000000000000000000001'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"contractAddress = \"0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d\" # Bored Ape Yacht Club contract address\n",
|
||||
"\n",
|
||||
"blockchainType = BlockchainType.ETH_MAINNET #default value, optional parameter\n",
|
||||
"\n",
|
||||
"blockchainLoader = BlockchainDocumentLoader(contract_address=contractAddress,\n",
|
||||
" api_key=alchemyApiKey)\n",
|
||||
"blockchainLoader = BlockchainDocumentLoader(contractAddress)\n",
|
||||
"\n",
|
||||
"nfts = blockchainLoader.load()\n",
|
||||
"\n",
|
||||
@@ -104,22 +154,265 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 36,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content=\"{'contract': {'address': '0x448676ffcd0adf2d85c1f0565e8dde6924a9a7d9'}, 'id': {'tokenId': '0x01', 'tokenMetadata': {'tokenType': 'ERC1155'}}, 'title': 'Wyatt Horton #0001', 'description': 'A sleepy capybara', 'tokenUri': {'gateway': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/metadata/1.json', 'raw': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/metadata/1.json'}, 'media': [{'gateway': 'https://nft-cdn.alchemy.com/matic-mainnet/9085e06ff9f6c9074de91801d1c72d26', 'thumbnail': 'https://res.cloudinary.com/alchemyapi/image/upload/thumbnailv2/matic-mainnet/9085e06ff9f6c9074de91801d1c72d26', 'raw': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/images/1.png', 'format': 'png', 'bytes': 769622}], 'metadata': {'name': 'Wyatt Horton #0001', 'description': 'A sleepy capybara', 'image': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/images/1.png', 'attributes': [{'value': 'Avatar', 'trait_type': 'Type'}, {'value': 'Animal', 'trait_type': 'Category'}, {'value': 'Capybara', 'trait_type': 'Class'}, {'value': 'Fall 2022', 'trait_type': 'Collection'}, {'value': 'Furry', 'trait_type': 'Feature'}]}, 'timeLastUpdated': '2023-04-20T14:38:24.947Z', 'contractMetadata': {'name': 'Smoothstack - Avatars', 'symbol': 'SMTH', 'tokenType': 'ERC1155', 'contractDeployer': '0x23075b2523c6563b06920a302a8be4f90ef6e974', 'deployedBlockNumber': 34752389, 'openSea': {'lastIngestedAt': '2023-04-17T20:59:42.000Z'}}}\", metadata={'tokenId': '0x01'}),\n",
|
||||
" Document(page_content=\"{'contract': {'address': '0x448676ffcd0adf2d85c1f0565e8dde6924a9a7d9'}, 'id': {'tokenId': '0x02', 'tokenMetadata': {'tokenType': 'ERC1155'}}, 'title': 'Dylan Leisler #0002', 'description': 'A chipper cat with a big, red bowtie', 'tokenUri': {'gateway': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/metadata/2.json', 'raw': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/metadata/2.json'}, 'media': [{'gateway': 'https://nft-cdn.alchemy.com/matic-mainnet/67c3c7ccef44b32bf2ce758e8e73dbcd', 'thumbnail': 'https://res.cloudinary.com/alchemyapi/image/upload/thumbnailv2/matic-mainnet/67c3c7ccef44b32bf2ce758e8e73dbcd', 'raw': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/images/2.png', 'format': 'png', 'bytes': 1187749}], 'metadata': {'name': 'Dylan Leisler #0002', 'description': 'A chipper cat with a big, red bowtie', 'image': 'https://storage.googleapis.com/minted-nfts/smoothstack/avatars/images/2.png', 'attributes': [{'value': 'Avatar', 'trait_type': 'Type'}, {'value': 'Animal', 'trait_type': 'Category'}, {'value': 'Cat', 'trait_type': 'Class'}, {'value': 'Fall 2022', 'trait_type': 'Collection'}, {'value': 'Red Bowtie', 'trait_type': 'Feature'}]}, 'timeLastUpdated': '2023-04-23T13:38:29.316Z', 'contractMetadata': {'name': 'Smoothstack - Avatars', 'symbol': 'SMTH', 'tokenType': 'ERC1155', 'contractDeployer': '0x23075b2523c6563b06920a302a8be4f90ef6e974', 'deployedBlockNumber': 34752389, 'openSea': {'lastIngestedAt': '2023-04-17T20:59:42.000Z'}}}\", metadata={'tokenId': '0x02'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 36,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"contractAddress = \"0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9\" # Polygon Mainnet contract address\n",
|
||||
"\n",
|
||||
"blockchainType = BlockchainType.POLYGON_MAINNET \n",
|
||||
"\n",
|
||||
"blockchainLoader = BlockchainDocumentLoader(contract_address=contractAddress, \n",
|
||||
" blockchainType=blockchainType, \n",
|
||||
" api_key=alchemyApiKey)\n",
|
||||
"blockchainLoader = BlockchainDocumentLoader(contractAddress, blockchainType)\n",
|
||||
"\n",
|
||||
"nfts = blockchainLoader.load()\n",
|
||||
"\n",
|
||||
"nfts[:2]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## (Optional) Using the Blockchain Document Loader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "_PGkFfMCB8J3"
|
||||
},
|
||||
"source": [
|
||||
"### Setup Splitter and Index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install sentence_transformers chromadb openai tiktoken -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"metadata": {
|
||||
"id": "BwxxopOCCABh"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator\n",
|
||||
"from langchain.embeddings import HuggingFaceEmbeddings\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "JE_myAulCDSZ",
|
||||
"outputId": "99e16b6a-03b4-4e67-d4b4-9dd611a866ef"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NUMBER OF DOCUMENTS: 424\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=0)\n",
|
||||
"\n",
|
||||
"docs = text_splitter.split_documents(nfts)\n",
|
||||
"print(\"NUMBER OF DOCUMENTS: \", len(docs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"metadata": {
|
||||
"id": "d83yFuAuCKQS"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using embedded DuckDB without persistence: data will be transient\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"index = VectorstoreIndexCreator(\n",
|
||||
" embedding=HuggingFaceEmbeddings(),\n",
|
||||
" text_splitter=text_splitter).from_loaders([blockchainLoader])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "y0VfObeXDEXB"
|
||||
},
|
||||
"source": [
|
||||
"## Setup Models and Chains"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"openAiKey = \"put OpenAI key here\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = openAiKey"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"metadata": {
|
||||
"id": "hiNjDzP9C4pA"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "u-xDlKPaC_xg"
|
||||
},
|
||||
"source": [
|
||||
"### Retrieval Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {
|
||||
"id": "BqP00JovC9R4"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llmOpenAI = OpenAI()\n",
|
||||
"\n",
|
||||
"chainQA = RetrievalQA.from_chain_type(llm=llmOpenAI, \n",
|
||||
" chain_type=\"map_reduce\",\n",
|
||||
" retriever=index.vectorstore.as_retriever(), \n",
|
||||
" verbose=True,\n",
|
||||
" input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 122
|
||||
},
|
||||
"id": "2Y3cVVKZDVNq",
|
||||
"outputId": "dfeea416-5193-47cf-e9dc-c17a5c1cd780"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new RetrievalQA chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Popular attributes include \"Avatar\" (Type), \"Character\" (Category), and \"Human\" or \"Wizard\" (Class).'"
|
||||
]
|
||||
},
|
||||
"execution_count": 44,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chainQA.run(\"What are some of the popular attributes?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 122
|
||||
},
|
||||
"id": "7o6ArPo9DXbz",
|
||||
"outputId": "b1f8ad43-27c7-4cdb-95a7-8c8bd6381c5a"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new RetrievalQA chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"type": "string"
|
||||
},
|
||||
"text/plain": [
|
||||
"' There are 10,000 unique Bored Ape NFTs.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chainQA.run(\"How many NFTs are there?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -155,46 +155,6 @@
|
||||
" print(str(doc.metadata[\"page\"]) + \":\", doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d5c9879",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using MathPix\n",
|
||||
"\n",
|
||||
"Inspired by Daniel Gross's [https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21](https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "950eb58f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import MathpixPDFLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb6fd473",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = MathpixPDFLoader(\"example_data/layout-parser-paper.pdf\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a1d41e1a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09d64998",
|
||||
@@ -606,50 +566,10 @@
|
||||
"Additionally, you can pass along any of the options from the [PyMuPDF documentation](https://pymupdf.readthedocs.io/en/latest/app1.html#plain-text/) as keyword arguments in the `load` call, and it will be pass along to the `get_text()` call."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "15b57eab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PyPDF Directory\n",
|
||||
"\n",
|
||||
"Load PDFs from directory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b9e521d9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import PyPDFDirectoryLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4b20590f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = PyPDFDirectoryLoader(\"example_data/\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "e5ead943",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ea25b03c",
|
||||
"id": "1bf73c97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -657,9 +577,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "langchain_dev",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "langchain_dev"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -671,7 +591,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Reddit\n",
|
||||
"\n",
|
||||
"This loader fetches the text from the Posts of Subreddits or Reddit users, using the `praw` Python package.\n",
|
||||
"\n",
|
||||
"Make a Reddit Application from https://www.reddit.com/prefs/apps/ and initialize the loader with with your Reddit API credentials."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import RedditPostsLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install praw"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load using 'subreddit' mode\n",
|
||||
"loader = RedditPostsLoader(\n",
|
||||
" client_id=\"YOUR CLIENT ID\",\n",
|
||||
" client_secret=\"YOUR CLIENT SECRET\",\n",
|
||||
" user_agent=\"extractor by u/Master_Ocelot8179\",\n",
|
||||
" categories=['new', 'hot'], # List of categories to load posts from\n",
|
||||
" mode = 'subreddit',\n",
|
||||
" search_queries=['investing', 'wallstreetbets'], # List of subreddits to load posts from\n",
|
||||
" number_posts=20 # Default value is 10\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# # or load using 'username' mode\n",
|
||||
"# loader = RedditPostsLoader(\n",
|
||||
"# client_id=\"YOUR CLIENT ID\",\n",
|
||||
"# client_secret=\"YOUR CLIENT SECRET\",\n",
|
||||
"# user_agent=\"extractor by u/Master_Ocelot8179\",\n",
|
||||
"# categories=['new', 'hot'], \n",
|
||||
"# mode = 'username',\n",
|
||||
"# search_queries=['ga3far', 'Master_Ocelot8179'], # List of usernames to load posts from\n",
|
||||
"# number_posts=20\n",
|
||||
"# )\n",
|
||||
"\n",
|
||||
"# Note: Categories can be only of following value - \"controversial\" \"hot\" \"new\" \"rising\" \"top\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Hello, I am not looking for investment advice. I will apply my own due diligence. However, I am interested if anyone knows as a UK resident how fees and exchange rate differences would impact performance?\\n\\nI am planning to create a pie of index funds (perhaps UK, US, europe) or find a fund with a good track record of long term growth at low rates. \\n\\nDoes anyone have any ideas?', metadata={'post_subreddit': 'r/investing', 'post_category': 'new', 'post_title': 'Long term retirement funds fees/exchange rate query', 'post_score': 1, 'post_id': '130pa6m', 'post_url': 'https://www.reddit.com/r/investing/comments/130pa6m/long_term_retirement_funds_feesexchange_rate_query/', 'post_author': Redditor(name='Badmanshiz')}),\n",
|
||||
" Document(page_content='I much prefer the Roth IRA and would rather rollover my 401k to that every year instead of keeping it in the limited 401k options. But if I rollover, will I be able to continue contributing to my 401k? Or will that close my account? I realize that there are tax implications of doing this but I still think it is the better option.', metadata={'post_subreddit': 'r/investing', 'post_category': 'new', 'post_title': 'Is it possible to rollover my 401k every year?', 'post_score': 3, 'post_id': '130ja0h', 'post_url': 'https://www.reddit.com/r/investing/comments/130ja0h/is_it_possible_to_rollover_my_401k_every_year/', 'post_author': Redditor(name='AnCap_Catholic')}),\n",
|
||||
" Document(page_content='Have a general question? Want to offer some commentary on markets? Maybe you would just like to throw out a neat fact that doesn\\'t warrant a self post? Feel free to post here! \\n\\nIf your question is \"I have $10,000, what do I do?\" or other \"advice for my personal situation\" questions, you should include relevant information, such as the following:\\n\\n* How old are you? What country do you live in? \\n* Are you employed/making income? How much? \\n* What are your objectives with this money? (Buy a house? Retirement savings?) \\n* What is your time horizon? Do you need this money next month? Next 20yrs? \\n* What is your risk tolerance? (Do you mind risking it at blackjack or do you need to know its 100% safe?) \\n* What are you current holdings? (Do you already have exposure to specific funds and sectors? Any other assets?) \\n* Any big debts (include interest rate) or expenses? \\n* And any other relevant financial information will be useful to give you a proper answer. \\n\\nPlease consider consulting our FAQ first - https://www.reddit.com/r/investing/wiki/faq\\nAnd our [side bar](https://www.reddit.com/r/investing/about/sidebar) also has useful resources. \\n\\nIf you are new to investing - please refer to Wiki - [Getting Started](https://www.reddit.com/r/investing/wiki/index/gettingstarted/)\\n\\nThe reading list in the wiki has a list of books ranging from light reading to advanced topics depending on your knowledge level. Link here - [Reading List](https://www.reddit.com/r/investing/wiki/readinglist)\\n\\nCheck the resources in the sidebar.\\n\\nBe aware that these answers are just opinions of Redditors and should be used as a starting point for your research. You should strongly consider seeing a registered investment adviser if you need professional support before making any financial decisions!', metadata={'post_subreddit': 'r/investing', 'post_category': 'new', 'post_title': 'Daily General Discussion and Advice Thread - April 27, 2023', 'post_score': 5, 'post_id': '130eszz', 'post_url': 'https://www.reddit.com/r/investing/comments/130eszz/daily_general_discussion_and_advice_thread_april/', 'post_author': Redditor(name='AutoModerator')}),\n",
|
||||
" Document(page_content=\"Based on recent news about salt battery advancements and the overall issues of lithium, I was wondering what would be feasible ways to invest into non-lithium based battery technologies? CATL is of course a choice, but the selection of brokers I currently have in my disposal don't provide HK stocks at all.\", metadata={'post_subreddit': 'r/investing', 'post_category': 'new', 'post_title': 'Investing in non-lithium battery technologies?', 'post_score': 2, 'post_id': '130d6qp', 'post_url': 'https://www.reddit.com/r/investing/comments/130d6qp/investing_in_nonlithium_battery_technologies/', 'post_author': Redditor(name='-manabreak')}),\n",
|
||||
" Document(page_content='Hello everyone,\\n\\nI would really like to invest in an ETF that follows spy or another big index, as I think this form of investment suits me best. \\n\\nThe problem is, that I live in Denmark where ETFs and funds are taxed annually on unrealised gains at quite a steep rate. This means that an ETF growing say 10% per year will only grow about 6%, which really ruins the long term effects of compounding interest.\\n\\nHowever stocks are only taxed on realised gains which is why they look more interesting to hold long term.\\n\\nI do not like the lack of diversification this brings, as I am looking to spend tonnes of time picking the right long term stocks.\\n\\nIt would be ideal to find a few stocks that over the long term somewhat follows the indexes. Does anyone have suggestions?\\n\\nI have looked at Nasdaq Inc. which quite closely follows Nasdaq 100. \\n\\nI really appreciate any help.', metadata={'post_subreddit': 'r/investing', 'post_category': 'new', 'post_title': 'Stocks that track an index', 'post_score': 7, 'post_id': '130auvj', 'post_url': 'https://www.reddit.com/r/investing/comments/130auvj/stocks_that_track_an_index/', 'post_author': Redditor(name='LeAlbertP')})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"documents = loader.load()\n",
|
||||
"documents[:5]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "env1",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Stripe\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from the Stripe REST API into a format that can be ingested into LangChain, along with example usage for vectorization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from langchain.document_loaders import StripeLoader\n",
|
||||
"from langchain.indexes import VectorstoreIndexCreator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The Stripe API requires an access token, which can be found inside of the Stripe dashboard.\n",
|
||||
"\n",
|
||||
"This document loader also requires a `resource` option which defines what data you want to load.\n",
|
||||
"\n",
|
||||
"Following resources are available:\n",
|
||||
"\n",
|
||||
"`balance_transations` [Documentation](https://stripe.com/docs/api/balance_transactions/list)\n",
|
||||
"\n",
|
||||
"`charges` [Documentation](https://stripe.com/docs/api/charges/list)\n",
|
||||
"\n",
|
||||
"`customers` [Documentation](https://stripe.com/docs/api/customers/list)\n",
|
||||
"\n",
|
||||
"`events` [Documentation](https://stripe.com/docs/api/events/list)\n",
|
||||
"\n",
|
||||
"`refunds` [Documentation](https://stripe.com/docs/api/refunds/list)\n",
|
||||
"\n",
|
||||
"`disputes` [Documentation](https://stripe.com/docs/api/disputes/list)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"stripe_loader = StripeLoader(os.environ[\"STRIPE_ACCESS_TOKEN\"], \"charges\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a vectorstore retriver from the loader\n",
|
||||
"# see https://python.langchain.com/en/latest/modules/indexes/getting_started.html for more details\n",
|
||||
"\n",
|
||||
"index = VectorstoreIndexCreator().from_loaders([stripe_loader])\n",
|
||||
"stripe_doc_retriever = index.vectorstore.as_retriever()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -10,78 +10,9 @@
|
||||
"This covers how to load Word documents into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9438686b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Docx2txt\n",
|
||||
"\n",
|
||||
"Load .docx using `Docx2txt` into a document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "7b80ea89",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import Docx2txtLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "99a12031",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = Docx2txtLoader(\"example_data/fake.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b92f68b0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "d83dd755",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.docx'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d40727d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 1,
|
||||
"id": "721c48aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -198,7 +129,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,330 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13afcae7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Self-querying retriever\n",
|
||||
"In the notebook we'll demo the `SelfQueryRetriever`, which, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to it's underlying VectorStore. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documented, but to also extract filters from the user query on the metadata of stored documents and to execute those filter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68e75fb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating a Pinecone index\n",
|
||||
"First we'll want to create a Pinecone VectorStore and seed it with some data. We've created a small demo set of documents that contain summaries of movies.\n",
|
||||
"\n",
|
||||
"NOTE: The self-query retriever currently only has built-in support for Pinecone VectorStore.\n",
|
||||
"\n",
|
||||
"NOTE: The self-query retriever requires you to have `lark` installed (`pip install lark`)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "63a8af5b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install lark"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "3eb9c9a4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pinecone/index.py:4: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
|
||||
" from tqdm.autonotebook import tqdm\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import pinecone\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=os.environ[\"PINECONE_API_KEY\"], environment=os.environ[\"PINECONE_ENV\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cb4a5787",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Pinecone\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"# create new index\n",
|
||||
"pinecone.create_index(\"langchain-self-retriever-demo\", dimension=1536)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "bcbe04d9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = [\n",
|
||||
" Document(page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\", metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": [\"action\", \"science fiction\"]}),\n",
|
||||
" Document(page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\", metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2}),\n",
|
||||
" Document(page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\", metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6}),\n",
|
||||
" Document(page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\", metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3}),\n",
|
||||
" Document(page_content=\"Toys come alive and have a blast doing so\", metadata={\"year\": 1995, \"genre\": \"animated\"}),\n",
|
||||
" Document(page_content=\"Three men walk into the Zone, three men walk out of the Zone\", metadata={\"year\": 1979, \"rating\": 9.9, \"director\": \"Andrei Tarkovsky\", \"genre\": [\"science fiction\", \"thriller\"], \"rating\": 9.9})\n",
|
||||
"]\n",
|
||||
"vectorstore = Pinecone.from_documents(\n",
|
||||
" docs, embeddings, index_name=\"langchain-self-retriever-demo\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ecaab6d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Creating our self-querying retriever\n",
|
||||
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "86e34dbf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||
"\n",
|
||||
"metadata_field_info=[\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"genre\",\n",
|
||||
" description=\"The genre of the movie\", \n",
|
||||
" type=\"string or list[string]\", \n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"year\",\n",
|
||||
" description=\"The year the movie was released\", \n",
|
||||
" type=\"integer\", \n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"director\",\n",
|
||||
" description=\"The name of the movie director\", \n",
|
||||
" type=\"string\", \n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"rating\",\n",
|
||||
" description=\"A 1-10 rating for the movie\",\n",
|
||||
" type=\"float\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"document_content_description = \"Brief summary of a movie\"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"retriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_description, metadata_field_info, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea9df8d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Testing it out\n",
|
||||
"And now we can try actually using our retriever!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "38a126e9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='dinosaur' filter=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'genre': ['action', 'science fiction'], 'rating': 7.7, 'year': 1993.0}),\n",
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0}),\n",
|
||||
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0}),\n",
|
||||
" Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'director': 'Christopher Nolan', 'rating': 8.2, 'year': 2010.0})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.get_relevant_documents(\"What are some movies about dinosaurs\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "fc3f1e6e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': ['science fiction', 'thriller'], 'rating': 9.9, 'year': 1979.0})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a filter\n",
|
||||
"retriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "b19d4da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'director': 'Greta Gerwig', 'rating': 8.3, 'year': 2019.0})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and a filter\n",
|
||||
"retriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "f900e40e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction'), Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5)])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': ['science fiction', 'thriller'], 'rating': 9.9, 'year': 1979.0})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a composite filter\n",
|
||||
"retriever.get_relevant_documents(\"What's a highly rated (above 8.5) science fiction film?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "12a51522",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990.0), Comparison(comparator=<Comparator.LT: 'lt'>, attribute='year', value=2005.0), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='animated')])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and composite filter\n",
|
||||
"retriever.get_relevant_documents(\"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "69bbd809",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ce0f17b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vespa retriever\n",
|
||||
"\n",
|
||||
"This notebook shows how to use Vespa.ai as a LangChain retriever.\n",
|
||||
"Vespa.ai is a platform for highly efficient structured text and vector search.\n",
|
||||
"Please refer to [Vespa.ai](https://vespa.ai) for more information.\n",
|
||||
"\n",
|
||||
"In order to create a retriever, we use [pyvespa](https://pyvespa.readthedocs.io/en/latest/index.html) to\n",
|
||||
"create a connection a Vespa service."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c10dd962",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from vespa.application import Vespa\n",
|
||||
"\n",
|
||||
"vespa_app = Vespa(url=\"https://doc-search.vespa.oath.cloud\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3df4ce53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This creates a connection to a Vespa service, here the Vespa documentation search service.\n",
|
||||
"Using pyvespa, you can also connect to a\n",
|
||||
"[Vespa Cloud instance](https://pyvespa.readthedocs.io/en/latest/deploy-vespa-cloud.html)\n",
|
||||
"or a local\n",
|
||||
"[Docker instance](https://pyvespa.readthedocs.io/en/latest/deploy-docker.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"After connecting to the service, you can set up the retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7ccca1f4",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers.vespa_retriever import VespaRetriever\n",
|
||||
"\n",
|
||||
"vespa_query_body = {\n",
|
||||
" \"yql\": \"select content from paragraph where userQuery()\",\n",
|
||||
" \"hits\": 5,\n",
|
||||
" \"ranking\": \"documentation\",\n",
|
||||
" \"locale\": \"en-us\"\n",
|
||||
"}\n",
|
||||
"vespa_content_field = \"content\"\n",
|
||||
"retriever = VespaRetriever(vespa_app, vespa_query_body, vespa_content_field)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e7e34e1",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"This sets up a LangChain retriever that fetches documents from the Vespa application.\n",
|
||||
"Here, up to 5 results are retrieved from the `content` field in the `paragraph` document type,\n",
|
||||
"using `doumentation` as the ranking method. The `userQuery()` is replaced with the actual query\n",
|
||||
"passed from LangChain.\n",
|
||||
"\n",
|
||||
"Please refer to the [pyvespa documentation](https://pyvespa.readthedocs.io/en/latest/getting-started-pyvespa.html#Query)\n",
|
||||
"for more information.\n",
|
||||
"\n",
|
||||
"Now you can return the results and continue using the results in LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f47a2bfe",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"what is vespa?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# tiktoken (OpenAI) Length Function\n",
|
||||
"You can also use tiktoken, an open source tokenizer package from OpenAI to estimate tokens used. Will probably be more accurate for their models.\n",
|
||||
"You can also use tiktoken, a open source tokenizer package from OpenAI to estimate tokens used. Will probably be more accurate for their models.\n",
|
||||
"\n",
|
||||
"1. How the text is split: by character passed in\n",
|
||||
"2. How the chunk size is measured: by `tiktoken` tokenizer"
|
||||
|
||||
@@ -6,21 +6,15 @@
|
||||
"source": [
|
||||
"# AnalyticDB\n",
|
||||
"\n",
|
||||
">[AnalyticDB for PostgreSQL](https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/latest/product-introduction-overview) is a massively parallel processing (MPP) data warehousing service that is designed to analyze large volumes of data online.\n",
|
||||
"\n",
|
||||
">`AnalyticDB for PostgreSQL` is developed based on the open source `Greenplum Database` project and is enhanced with in-depth extensions by `Alibaba Cloud`. AnalyticDB for PostgreSQL is compatible with the ANSI SQL 2003 syntax and the PostgreSQL and Oracle database ecosystems. AnalyticDB for PostgreSQL also supports row store and column store. AnalyticDB for PostgreSQL processes petabytes of data offline at a high performance level and supports highly concurrent online queries.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `AnalyticDB` vector database.\n",
|
||||
"This notebook shows how to use functionality related to the AnalyticDB vector database.\n",
|
||||
"To run, you should have an [AnalyticDB](https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/latest/product-introduction-overview) instance up and running:\n",
|
||||
"- Using [AnalyticDB Cloud Vector Database](https://www.alibabacloud.com/product/hybriddb-postgresql). Click here to fast deploy it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -30,10 +24,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Split documents and get embeddings by call OpenAI API"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -52,7 +48,6 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Connect to AnalyticDB by setting related ENVIRONMENTS.\n",
|
||||
"```\n",
|
||||
@@ -64,7 +59,10 @@
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then store your embeddings and documents into AnalyticDB"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -92,10 +90,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Query and retrieve data"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -129,6 +129,13 @@
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -147,9 +154,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
|
||||
@@ -1,31 +1,34 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Annoy\n",
|
||||
"\n",
|
||||
"> \"Annoy (Approximate Nearest Neighbors Oh Yeah) is a C++ library with Python bindings to search for points in space that are close to a given query point. It also creates large read-only file-based data structures that are mmapped into memory so that many processes may share the same data.\"\n",
|
||||
"This notebook shows how to use functionality related to the Annoy vector database.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Annoy` vector database.\n",
|
||||
"> \"Annoy (Approximate Nearest Neighbors Oh Yeah) is a C++ library with Python bindings to search for points in space that are close to a given query point. It also creates large read-only file-based data structures that are mmapped into memory so that many processes may share the same data.\"\n",
|
||||
"\n",
|
||||
"via [Annoy](https://github.com/spotify/annoy) \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "3b450bdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"NOTE: Annoy is read-only - once the index is built you cannot add any more emebddings!\n",
|
||||
"If you want to progressively add new entries to your VectorStore then better choose an alternative!\n",
|
||||
"Annoy is read-only - once the index is built you cannot add any more emebddings!\n",
|
||||
"If you want to progressively add to your VectorStore then better choose an alternative!\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6613d222",
|
||||
"metadata": {},
|
||||
@@ -120,6 +123,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "4583b231",
|
||||
"metadata": {},
|
||||
@@ -261,6 +265,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "341390c2",
|
||||
"metadata": {},
|
||||
@@ -404,6 +409,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6f570f69",
|
||||
"metadata": {},
|
||||
@@ -466,6 +472,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "df4beb83",
|
||||
"metadata": {},
|
||||
@@ -557,7 +564,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -6,20 +6,7 @@
|
||||
"source": [
|
||||
"# AtlasDB\n",
|
||||
"\n",
|
||||
"This notebook shows you how to use functionality related to the `AtlasDB`.\n",
|
||||
"\n",
|
||||
">[MongoDB‘s](https://www.mongodb.com/) [Atlas](https://www.mongodb.com/cloud/atlas) is an on-demand fully managed service. `MongoDB Atlas` runs on `AWS`, `Microsoft Azure`, and `Google Cloud Platform`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install spacy"
|
||||
"This notebook shows you how to use functionality related to the AtlasDB"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -28,34 +15,7 @@
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
},
|
||||
"scrolled": true,
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!python3 -m spacy download en_core_web_sm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install nomic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -68,21 +28,31 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!python -m spacy download en_core_web_sm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ATLAS_TEST_API_KEY = '7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
@@ -101,8 +71,7 @@
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -196,6 +165,13 @@
|
||||
"source": [
|
||||
"db.project"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -214,9 +190,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
|
||||
@@ -7,68 +7,14 @@
|
||||
"source": [
|
||||
"# Chroma\n",
|
||||
"\n",
|
||||
">[Chroma](https://docs.trychroma.com/getting-started) is a database for building AI applications with embeddings.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Chroma` vector database."
|
||||
"This notebook shows how to use functionality related to the Chroma vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0825fa4a-d950-4e78-8bba-20cfcc347765",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install chromadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "42080f37-8fd1-4cec-acd9-15d2b03b2f4d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# get a token: https://platform.openai.com/account/api-keys\n",
|
||||
"\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c7a94d6c-b4d4-4498-9bdd-eb50c92b85c5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -79,11 +25,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
@@ -97,11 +41,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "5eabdb75",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
@@ -152,11 +94,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 5,
|
||||
"id": "72aaa9c8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = db.similarity_search_with_score(query)"
|
||||
@@ -164,20 +104,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 6,
|
||||
"id": "d88e958e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'}),\n",
|
||||
" 0.3949805498123169)"
|
||||
"(Document(page_content='In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \\n\\nWe cannot let this happen. \\n\\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" 0.3913410007953644)"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -232,6 +170,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f568a322",
|
||||
"metadata": {},
|
||||
@@ -361,7 +300,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -6,30 +6,24 @@
|
||||
"source": [
|
||||
"# Deep Lake\n",
|
||||
"\n",
|
||||
">[Deep Lake](https://docs.activeloop.ai/) as a Multi-Modal Vector Store that stores embeddings and their metadata including text, jsons, images, audio, video, and more. It saves the data locally, in your cloud, or on Activeloop storage. It performs hybrid search including embeddings and their attributes.\n",
|
||||
"This notebook showcases basic functionality related to Deep Lake. While Deep Lake can store embeddings, it is capable of storing any type of data. It is a fully fledged serverless data lake with version control, query engine and streaming dataloader to deep learning frameworks. \n",
|
||||
"\n",
|
||||
"This notebook showcases basic functionality related to `Deep Lake`. While `Deep Lake` can store embeddings, it is capable of storing any type of data. It is a fully fledged serverless data lake with version control, query engine and streaming dataloader to deep learning frameworks. \n",
|
||||
"\n",
|
||||
"For more information, please see the Deep Lake [documentation](https://docs.activeloop.ai) or [api reference](https://docs.deeplake.ai)"
|
||||
"For more information, please see the Deep Lake [documentation](docs.activeloop.ai) or [api reference](docs.deeplake.ai)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install openai deeplake tiktoken"
|
||||
"!python3 -m pip install openai deeplake tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -39,19 +33,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
@@ -62,10 +46,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
@@ -79,6 +61,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -87,19 +70,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/leo/.local/lib/python3.10/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.3.2) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
@@ -107,26 +80,11 @@
|
||||
"./my_deeplake/ loaded successfully.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": []
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": []
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": []
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Evaluating ingest: 100%|██████████████████████████████████████| 1/1 [00:07<00:00\n"
|
||||
"Evaluating ingest: 100%|██████████| 1/1 [00:04<00:00\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -135,17 +93,17 @@
|
||||
"text": [
|
||||
"Dataset(path='./my_deeplake/', tensors=['embedding', 'ids', 'metadata', 'text'])\n",
|
||||
"\n",
|
||||
" tensor htype shape dtype compression\n",
|
||||
" ------- ------- ------- ------- ------- \n",
|
||||
" embedding generic (42, 1536) float32 None \n",
|
||||
" ids text (42, 1) str None \n",
|
||||
" metadata json (42, 1) str None \n",
|
||||
" text text (42, 1) str None \n"
|
||||
" tensor htype shape dtype compression\n",
|
||||
" ------- ------- ------- ------- ------- \n",
|
||||
" embedding generic (4, 1536) float32 None \n",
|
||||
" ids text (4, 1) str None \n",
|
||||
" metadata json (4, 1) str None \n",
|
||||
" text text (4, 1) str None \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db = DeepLake(dataset_path=\"./my_deeplake/\", embedding_function=embeddings)\n",
|
||||
"db = DeepLake(dataset_path=\"./my_deeplake/\", embedding_function=embeddings, overwrite=True)\n",
|
||||
"db.add_documents(docs)\n",
|
||||
"# or shorter\n",
|
||||
"# db = DeepLake.from_documents(docs, dataset_path=\"./my_deeplake/\", embedding=embeddings, overwrite=True)\n",
|
||||
@@ -155,10 +113,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -179,6 +135,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -187,10 +144,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -200,11 +155,6 @@
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": []
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
@@ -218,12 +168,12 @@
|
||||
"text": [
|
||||
"Dataset(path='./my_deeplake/', read_only=True, tensors=['embedding', 'ids', 'metadata', 'text'])\n",
|
||||
"\n",
|
||||
" tensor htype shape dtype compression\n",
|
||||
" ------- ------- ------- ------- ------- \n",
|
||||
" embedding generic (42, 1536) float32 None \n",
|
||||
" ids text (42, 1) str None \n",
|
||||
" metadata json (42, 1) str None \n",
|
||||
" text text (42, 1) str None \n"
|
||||
" tensor htype shape dtype compression\n",
|
||||
" ------- ------- ------- ------- ------- \n",
|
||||
" embedding generic (4, 1536) float32 None \n",
|
||||
" ids text (4, 1) str None \n",
|
||||
" metadata json (4, 1) str None \n",
|
||||
" text text (4, 1) str None \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -233,6 +183,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -240,6 +191,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -248,16 +200,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 52,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/leo/.local/lib/python3.10/site-packages/langchain/llms/openai.py:624: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
|
||||
"/media/sdb/davit/Git/experiments/langchain/langchain/llms/openai.py:672: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
}
|
||||
@@ -271,18 +221,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The president nominated Ketanji Brown Jackson to serve on the United States Supreme Court. He described her as a former top litigator in private practice, a former federal public defender, a consensus builder, and from a family of public school educators and police officers. He also mentioned that she has received broad support from various groups since being nominated.'"
|
||||
"\"The president nominated Ketanji Brown Jackson to serve on the United States Supreme Court, describing her as one of the nation's top legal minds and a consensus builder with a background in private practice and public defense, and noting that she has received broad support from both Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -293,6 +241,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -376,6 +325,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -407,6 +357,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -438,6 +389,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -454,6 +406,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -476,6 +429,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -579,6 +533,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -639,6 +594,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -682,6 +638,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -858,7 +815,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.10.0"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
@@ -867,5 +824,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -7,135 +7,14 @@
|
||||
"source": [
|
||||
"# ElasticSearch\n",
|
||||
"\n",
|
||||
"[Elasticsearch](https://www.elastic.co/elasticsearch/) is a distributed, RESTful search and analytics engine. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Elasticsearch` database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b66c12b2-2a07-4136-ac77-ce1c9fa7a409",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "81f43794-f002-477c-9b68-4975df30e718",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check out [Elasticsearch installation instructions](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).\n",
|
||||
"\n",
|
||||
"To connect to an Elasticsearch instance that does not require\n",
|
||||
"login credentials, pass the Elasticsearch URL and index name along with the\n",
|
||||
"embedding object to the constructor.\n",
|
||||
"\n",
|
||||
"Example:\n",
|
||||
"```python\n",
|
||||
" from langchain import ElasticVectorSearch\n",
|
||||
" from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
" embedding = OpenAIEmbeddings()\n",
|
||||
" elastic_vector_search = ElasticVectorSearch(\n",
|
||||
" elasticsearch_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"test_index\",\n",
|
||||
" embedding=embedding\n",
|
||||
" )\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"To connect to an Elasticsearch instance that requires login credentials,\n",
|
||||
"including Elastic Cloud, use the Elasticsearch URL format\n",
|
||||
"https://username:password@es_host:9243. For example, to connect to Elastic\n",
|
||||
"Cloud, create the Elasticsearch URL with the required authentication details and\n",
|
||||
"pass it to the ElasticVectorSearch constructor as the named parameter\n",
|
||||
"elasticsearch_url.\n",
|
||||
"\n",
|
||||
"You can obtain your Elastic Cloud URL and login credentials by logging in to the\n",
|
||||
"Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and\n",
|
||||
"navigating to the \"Deployments\" page.\n",
|
||||
"\n",
|
||||
"To obtain your Elastic Cloud password for the default \"elastic\" user:\n",
|
||||
"1. Log in to the Elastic Cloud console at https://cloud.elastic.co\n",
|
||||
"2. Go to \"Security\" > \"Users\"\n",
|
||||
"3. Locate the \"elastic\" user and click \"Edit\"\n",
|
||||
"4. Click \"Reset password\"\n",
|
||||
"5. Follow the prompts to reset the password\n",
|
||||
"\n",
|
||||
"Format for Elastic Cloud URLs is\n",
|
||||
"https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.\n",
|
||||
"\n",
|
||||
"Example:\n",
|
||||
"```python\n",
|
||||
" from langchain import ElasticVectorSearch\n",
|
||||
" from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
" embedding = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
" elastic_host = \"cluster_id.region_id.gcp.cloud.es.io\"\n",
|
||||
" elasticsearch_url = f\"https://username:password@{elastic_host}:9243\"\n",
|
||||
" elastic_vector_search = ElasticVectorSearch(\n",
|
||||
" elasticsearch_url=elasticsearch_url,\n",
|
||||
" index_name=\"test_index\",\n",
|
||||
" embedding=embedding\n",
|
||||
" )\n",
|
||||
"```"
|
||||
"This notebook shows how to use functionality related to the ElasticSearch database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d6197931-cbe5-460c-a5e6-b5eedb83887c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install elasticsearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6030187-0bd7-4798-8372-a265036af5e0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -146,11 +25,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
@@ -166,9 +43,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "12eb86d8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = ElasticVectorSearch.from_documents(docs, embeddings, elasticsearch_url=\"http://localhost:9200\")\n",
|
||||
@@ -230,7 +105,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,65 +7,14 @@
|
||||
"source": [
|
||||
"# FAISS\n",
|
||||
"\n",
|
||||
">[Facebook AI Similarity Search (Faiss)](https://engineering.fb.com/2017/03/29/data-infrastructure/faiss-a-library-for-efficient-similarity-search/) is a library for efficient similarity search and clustering of dense vectors. It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM. It also contains supporting code for evaluation and parameter tuning.\n",
|
||||
"\n",
|
||||
"[Faiss documentation](https://faiss.ai/).\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `FAISS` vector database."
|
||||
"This notebook shows how to use functionality related to the FAISS vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "497fcd89-e832-46a7-a74a-c71199666206",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install faiss\n",
|
||||
"# OR\n",
|
||||
"!pip install faiss-cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "38237514-b3fa-44a4-9cff-30cd6bf50073",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "47f9b495-88f1-4286-8d5d-1416103931a7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -76,11 +25,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
@@ -94,11 +41,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 3,
|
||||
"id": "5eabdb75",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = FAISS.from_documents(docs, embeddings)\n",
|
||||
@@ -109,11 +54,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"id": "4b172de8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -372,7 +315,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,214 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LanceDB\n",
|
||||
"\n",
|
||||
">[LanceDB](https://lancedb.com/) is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings. Fully open source.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `LanceDB` vector database based on the Lance data format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bfcf346a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install lancedb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "99134dd1-b91e-486f-8d90-534248e43b9d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a0361f5c-e6f4-45f4-b829-11680cf03cec",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import LanceDB"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"documents = CharacterTextSplitter().split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "6e104aee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import lancedb\n",
|
||||
"\n",
|
||||
"db = lancedb.connect('/tmp/lancedb')\n",
|
||||
"table = db.create_table(\"my_table\", data=[\n",
|
||||
" {\"vector\": embeddings.embed_query(\"Hello World\"), \"text\": \"Hello World\", \"id\": \"1\"}\n",
|
||||
"], mode=\"overwrite\")\n",
|
||||
"\n",
|
||||
"docsearch = LanceDB.from_documents(documents, embeddings, connection=table)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "9c608226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n",
|
||||
"\n",
|
||||
"Officer Mora was 27 years old. \n",
|
||||
"\n",
|
||||
"Officer Rivera was 22. \n",
|
||||
"\n",
|
||||
"Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n",
|
||||
"\n",
|
||||
"I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n",
|
||||
"\n",
|
||||
"I’ve worked on these issues a long time. \n",
|
||||
"\n",
|
||||
"I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n",
|
||||
"\n",
|
||||
"So let’s not abandon our streets. Or choose between safety and equal justice. \n",
|
||||
"\n",
|
||||
"Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. \n",
|
||||
"\n",
|
||||
"That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. \n",
|
||||
"\n",
|
||||
"That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. \n",
|
||||
"\n",
|
||||
"We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. \n",
|
||||
"\n",
|
||||
"I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. \n",
|
||||
"\n",
|
||||
"And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. \n",
|
||||
"\n",
|
||||
"And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? \n",
|
||||
"\n",
|
||||
"Ban assault weapons and high-capacity magazines. \n",
|
||||
"\n",
|
||||
"Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. \n",
|
||||
"\n",
|
||||
"These laws don’t infringe on the Second Amendment. They save lives. \n",
|
||||
"\n",
|
||||
"The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. \n",
|
||||
"\n",
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n",
|
||||
"\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
|
||||
"\n",
|
||||
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n",
|
||||
"\n",
|
||||
"We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n",
|
||||
"\n",
|
||||
"We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n",
|
||||
"\n",
|
||||
"We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,63 +7,16 @@
|
||||
"source": [
|
||||
"# Milvus\n",
|
||||
"\n",
|
||||
">[Milvus](https://milvus.io/docs/overview.md) is a database that stores, indexes, and manages massive embedding vectors generated by deep neural networks and other machine learning (ML) models.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Milvus vector database.\n",
|
||||
"\n",
|
||||
"To run, you should have a [Milvus instance up and running](https://milvus.io/docs/install_standalone-docker.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a62cff8a-bcf7-4e33-bbbc-76999c2e3e20",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install pymilvus"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7a0f9e02-8eb0-4aef-b11f-8861360472ee",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key."
|
||||
"To run, you should have a Milvus instance up and running: https://milvus.io/docs/install_standalone-docker.md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "8b6ed9cd-81b9-46e5-9c20-5aafca2844d0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -74,11 +27,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
@@ -94,9 +45,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dcf88bdf",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_db = Milvus.from_documents(\n",
|
||||
@@ -125,6 +74,14 @@
|
||||
"source": [
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -143,7 +100,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,63 +1,37 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MyScale\n",
|
||||
"\n",
|
||||
">[MyScale](https://docs.myscale.com/en/overview/) is a cloud-based database optimized for AI applications and solutions, built on the open-source [ClickHouse](https://github.com/ClickHouse/ClickHouse). \n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `MyScale` vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43ead5d5-2c1f-4dce-a69a-cb00e4f9d6f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up envrionments"
|
||||
"This notebook shows how to use functionality related to the MyScale vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7dccc580-8270-4714-ad61-f79783dd6eea",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install clickhouse-connect"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "15a1d477-9cdb-4d82-b019-96951ecb2b72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "91003ea5-0c8c-436c-a5de-aaeaeef2f458",
|
||||
"execution_count": 2,
|
||||
"id": "aac9563e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import MyScale\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "a9d16fa3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up envrionments\n",
|
||||
"\n",
|
||||
"There are two ways to set up parameters for myscale index.\n",
|
||||
"\n",
|
||||
"1. Environment Variables\n",
|
||||
@@ -82,26 +56,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import MyScale\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
@@ -169,6 +126,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "e3a8b105",
|
||||
"metadata": {},
|
||||
@@ -187,6 +145,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f59360c0",
|
||||
"metadata": {},
|
||||
@@ -257,6 +216,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
@@ -299,7 +259,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.8.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
"source": [
|
||||
"# OpenSearch\n",
|
||||
"\n",
|
||||
"> [OpenSearch](https://opensearch.org/) is a scalable, flexible, and extensible open-source software suite for search, analytics, and observability applications licensed under Apache 2.0. `OpenSearch` is a distributed search and analytics engine based on `Apache Lucene`.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `OpenSearch` database.\n",
|
||||
"This notebook shows how to use functionality related to the OpenSearch database.\n",
|
||||
"\n",
|
||||
"To run, you should have the opensearch instance up and running: [here](https://opensearch.org/docs/latest/install-and-configure/install-opensearch/index/)\n",
|
||||
"`similarity_search` by default performs the Approximate k-NN Search which uses one of the several algorithms like lucene, nmslib, faiss recommended for\n",
|
||||
@@ -18,39 +15,6 @@
|
||||
"Check [this](https://opensearch.org/docs/latest/search-plugins/knn/index/) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6e606066-9386-4427-8a87-1b93f435c57e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install opensearch-py"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b1fa637e-4fbf-4d5a-9188-2cad826a193e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "28e5455e-322d-4010-9e3b-491d522ef5db",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -269,9 +233,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -6,38 +6,7 @@
|
||||
"source": [
|
||||
"# PGVector\n",
|
||||
"\n",
|
||||
">[PGVector](https://github.com/pgvector/pgvector) is an open-source vector similarity search for `Postgres`\n",
|
||||
"\n",
|
||||
"It supports:\n",
|
||||
"- exact and approximate nearest neighbor search\n",
|
||||
"- L2 distance, inner product, and cosine distance\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the Postgres vector database (`PGVector`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See the [installation instruction](https://github.com/pgvector/pgvector)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install pgvector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
"This notebook shows how to use functionality related to the Postgres vector database (PGVector)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -45,31 +14,6 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"False"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"## Loading Environment Variables\n",
|
||||
"from typing import List, Tuple\n",
|
||||
@@ -79,10 +23,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -240,9 +182,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -7,74 +7,14 @@
|
||||
"source": [
|
||||
"# Pinecone\n",
|
||||
"\n",
|
||||
"[Pinecone](https://docs.pinecone.io/docs/overview) is a vector database with broad functionality.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Pinecone` vector database.\n",
|
||||
"\n",
|
||||
"To use Pinecone, you must have an API key. \n",
|
||||
"Here are the [installation instructions](https://docs.pinecone.io/docs/quickstart)."
|
||||
"This notebook shows how to use functionality related to the Pinecone vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b4c41cad-08ef-4f72-a545-2151e4598efe",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install pinecone-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c1e38361-c1fe-4ac6-86e9-c90ebaf7ae87",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"PINECONE_API_KEY = getpass.getpass('Pinecone API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "02a536e0-d603-4d79-b18b-1ed562977b40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"PINECONE_ENV = getpass.getpass('Pinecone Environment:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "320af802-9271-46ee-948f-d2453933d44b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ffea66e4-bc23-46a9-9580-b348dfe7b7a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -110,8 +50,8 @@
|
||||
"\n",
|
||||
"# initialize pinecone\n",
|
||||
"pinecone.init(\n",
|
||||
" api_key=PINECONE_API_KEY, # find at app.pinecone.io\n",
|
||||
" environment=PINECONE_ENV # next to api key in console\n",
|
||||
" api_key=\"YOUR_API_KEY\", # find at app.pinecone.io\n",
|
||||
" environment=\"YOUR_ENV\" # next to api key in console\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"index_name = \"langchain-demo\"\n",
|
||||
@@ -160,7 +100,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,72 +7,22 @@
|
||||
"source": [
|
||||
"# Qdrant\n",
|
||||
"\n",
|
||||
">[Qdrant](https://qdrant.tech/documentation/) (read: quadrant ) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload. `Qdrant` is tailored to extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications.\n",
|
||||
"This notebook shows how to use functionality related to the Qdrant vector database. There are various modes of how to run Qdrant, and depending on the chosen one, there will be some subtle differences. The options include:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Qdrant` vector database. \n",
|
||||
"\n",
|
||||
"There are various modes of how to run `Qdrant`, and depending on the chosen one, there will be some subtle differences. The options include:\n",
|
||||
"- Local mode, no server required\n",
|
||||
"- On-premise server deployment\n",
|
||||
"- Qdrant Cloud\n",
|
||||
"\n",
|
||||
"See the [installation instructions](https://qdrant.tech/documentation/install/)."
|
||||
"- Qdrant Cloud"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e03e8460-8f32-4d1f-bb93-4f7636a476fa",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install qdrant-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7b2f111b-357a-4f42-9730-ef0603bdc1b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "082e7e8b-ac52-430c-98d6-8f0924457642",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:22.282884Z",
|
||||
"start_time": "2023-04-04T10:51:21.408077Z"
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -84,14 +34,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:22.520144Z",
|
||||
"start_time": "2023-04-04T10:51:22.285826Z"
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -121,14 +70,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 3,
|
||||
"id": "8429667e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:22.525091Z",
|
||||
"start_time": "2023-04-04T10:51:22.522015Z"
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -151,14 +99,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"id": "24b370e2",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:24.827567Z",
|
||||
"start_time": "2023-04-04T10:51:22.529080Z"
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -295,14 +242,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"id": "a8c513ab",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:25.204469Z",
|
||||
"start_time": "2023-04-04T10:51:24.855618Z"
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -312,14 +258,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"id": "fc516993",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:25.220984Z",
|
||||
"start_time": "2023-04-04T10:51:25.213943Z"
|
||||
},
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
|
||||
@@ -1,52 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Redis\n",
|
||||
"\n",
|
||||
">[Redis (Remote Dictionary Server)](https://en.wikipedia.org/wiki/Redis) is an in-memory data structure store, used as a distributed, in-memory key–value database, cache and message broker, with optional durability.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the [Redis vector database](https://redis.com/solutions/use-cases/vector-database/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
@@ -260,9 +227,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SupabaseVectorStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc80fa84-1f2f-48b4-bd39-3e6412f012f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
">[Supabase](https://supabase.com/docs) is an open source Firebase alternative.\n",
|
||||
"# SupabaseVectorStore\n",
|
||||
"\n",
|
||||
"This notebook shows how to use `Supabase` and `pgvector` as your VectorStore.\n",
|
||||
"This notebook shows how to use Supabase and `pgvector` as your VectorStore.\n",
|
||||
"\n",
|
||||
"To run this notebook, please ensure:\n",
|
||||
"\n",
|
||||
"- the `pgvector` extension is enabled\n",
|
||||
"- you have installed the `supabase-py` package\n",
|
||||
"- that you have created a `match_documents` function in your database\n",
|
||||
@@ -63,66 +57,23 @@
|
||||
" LIMIT match_count;\n",
|
||||
" END;\n",
|
||||
" $$;\n",
|
||||
"```"
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "6bd4498b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# with pip\n",
|
||||
"!pip install supabase\n",
|
||||
"# !pip install supabase\n",
|
||||
"\n",
|
||||
"# with conda\n",
|
||||
"# !conda install -c conda-forge supabase"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "69bff365-3039-4ff8-a641-aa190166179d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19846a7b-99bc-47a7-8e1c-f13c2497f1ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c71c3901-d44b-4d09-92c5-3018628c28fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ['SUPABASE_URL'] = getpass.getpass('Supabase URL:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b91ecfa-f61b-489a-a337-dff1f12f6ab2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ['SUPABASE_SERVICE_KEY'] = getpass.getpass('Supabase Service Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@@ -440,7 +391,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tair\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Tair vector database.\n",
|
||||
"To run, you should have an [Tair](https://www.alibabacloud.com/help/en/tair/latest/what-is-tair) instance up and running."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.fake import FakeEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Tair"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = FakeEmbeddings(size=128)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Connect to Tair using the `TAIR_URL` environment variable \n",
|
||||
"```\n",
|
||||
"export TAIR_URL=\"redis://{username}:{password}@{tair_address}:{tair_port}\"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"or the keyword argument `tair_url`.\n",
|
||||
"\n",
|
||||
"Then store documents and embeddings into Tair."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tair_url = \"redis://localhost:6379\"\n",
|
||||
"\n",
|
||||
"# drop first if index already exists\n",
|
||||
"Tair.drop_index(tair_url=tair_url)\n",
|
||||
"\n",
|
||||
"vector_store = Tair.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" tair_url=tair_url\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Query similar documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans. \\n\\nAnd tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \\n\\nBy the end of this year, the deficit will be down to less than half what it was before I took office. \\n\\nThe only president ever to cut the deficit by more than one trillion dollars in a single year. \\n\\nLowering your costs also means demanding more competition. \\n\\nI’m a capitalist, but capitalism without competition isn’t capitalism. \\n\\nIt’s exploitation—and it drives up prices. \\n\\nWhen corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. \\n\\nWe see it happening with ocean carriers moving goods in and out of America. \\n\\nDuring the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits.', metadata={'source': '../../../state_of_the_union.txt'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = vector_store.similarity_search(query)\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -7,73 +7,14 @@
|
||||
"source": [
|
||||
"# Weaviate\n",
|
||||
"\n",
|
||||
">[Weaviate](https://weaviate.io/) is an open-source vector database. It allows you to store data objects and vector embeddings from your favorite ML-models, and scale seamlessly into billions of data objects.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Weaviate`vector database.\n",
|
||||
"\n",
|
||||
"See the `Weaviate` [installation instructions](https://weaviate.io/developers/weaviate/installation)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e9ab167c-fffc-4d30-b1c1-37cc1b641698",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install weaviate-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b34828d-e627-4d85-aabd-eeb15d9f4b00",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "37697b9f-fbb2-430e-b95d-28d6eb83486d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fea2dbae-a609-4458-a05f-f1c8e1f37c6f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"WEAVIATE_URL = getpass.getpass('WEAVIATE_URL:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "53b7ce2d-3c09-4d1c-b66b-5769ce6746ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ['WEAVIATE_API_KEY'] = getpass.getpass('WEAVIATE_API_KEY:')"
|
||||
"This notebook shows how to use functionality related to the Weaviate vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
@@ -134,8 +75,7 @@
|
||||
" \"vectorizer\": \"text2vec-openai\",\n",
|
||||
" \"moduleConfig\": {\n",
|
||||
" \"text2vec-openai\": {\n",
|
||||
" \"model\": \"ada\",\n",
|
||||
" \"modelVersion\": \"002\",\n",
|
||||
" \"model\": \"babbage\",\n",
|
||||
" \"type\": \"text\"\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
@@ -215,7 +155,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,57 +7,9 @@
|
||||
"source": [
|
||||
"# Zilliz\n",
|
||||
"\n",
|
||||
">[Zilliz Cloud](https://zilliz.com/doc/quick_start) is a fully managed service on cloud for `LF AI Milvus®`,\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Zilliz Cloud managed vector database.\n",
|
||||
"\n",
|
||||
"To run, you should have a `Zilliz Cloud` instance up and running. Here are the [installation instructions](https://zilliz.com/cloud)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c0c50102-e6ac-4475-a930-49c94ed0bd99",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install pymilvus"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b25e246-ffe7-4822-a6bf-85d1a120df00",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d6691489-1ebc-40fa-bc09-b0916903a24d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19a71422",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# replace \n",
|
||||
"ZILLIZ_CLOUD_URI = \"\" # example: \"https://in01-17f69c292d4a5sa.aws-us-west-2.vectordb.zillizcloud.com:19536\"\n",
|
||||
"ZILLIZ_CLOUD_USERNAME = \"\" # example: \"username\"\n",
|
||||
"ZILLIZ_CLOUD_PASSWORD = \"\" # example: \"*********\""
|
||||
"To run, you should have a Zilliz Cloud instance up and running: https://zilliz.com/cloud"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -73,6 +25,18 @@
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19a71422",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# replace \n",
|
||||
"ZILLIZ_CLOUD_HOSTNAME = \"\" # example: \"in01-17f69c292d4a50a.aws-us-west-2.vectordb.zillizcloud.com\"\n",
|
||||
"ZILLIZ_CLOUD_PORT = \"\" #example: \"19532\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -99,12 +63,7 @@
|
||||
"vector_db = Milvus.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" connection_args={\n",
|
||||
" \"uri\": ZILLIZ_CLOUD_URI,\n",
|
||||
" \"username\": ZILLIZ_CLOUD_USERNAME,\n",
|
||||
" \"password\": ZILLIZ_CLOUD_PASSWORD,\n",
|
||||
" \"secure\": True\n",
|
||||
" }\n",
|
||||
" connection_args={\"host\": ZILLIZ_CLOUD_HOSTNAME, \"port\": ZILLIZ_CLOUD_PORT},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -145,7 +104,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
"version": "3.8.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -11,9 +11,9 @@
|
||||
"source": [
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"This notebook showcases basic functionality related to VectorStores. A key part of working with vectorstores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [embedding notebook](../../models/text_embedding.htpl) before diving into this.\n",
|
||||
"This notebook showcases basic functionality related to VectorStores. A key part of working with vectorstores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [embedding notebook](embeddings.ipynb) before diving into this.\n",
|
||||
"\n",
|
||||
"This covers generic high level functionality related to all vector stores."
|
||||
"This covers generic high level functionality related to all vector stores. For guides on specific vectorstores, please see the how-to guides [here](../how_to_guides.rst)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -265,7 +265,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -80,8 +80,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"chat = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)\n",
|
||||
"chat = ChatOpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
|
||||
"resp = chat([HumanMessage(content=\"Write me a song about sparkling water.\")])"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -373,8 +373,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"chat = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)\n",
|
||||
"chat = ChatOpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
|
||||
"resp = chat([HumanMessage(content=\"Write me a song about sparkling water.\")])\n"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -22,20 +22,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 1,
|
||||
"id": "a65696a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, List, Mapping, Optional\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import CallbackManagerForLLMRun\n",
|
||||
"from langchain.llms.base import LLM"
|
||||
"from langchain.llms.base import LLM\n",
|
||||
"from typing import Optional, List, Mapping, Any"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"id": "d5ceff02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -48,12 +46,7 @@
|
||||
" def _llm_type(self) -> str:\n",
|
||||
" return \"custom\"\n",
|
||||
" \n",
|
||||
" def _call(\n",
|
||||
" self,\n",
|
||||
" prompt: str,\n",
|
||||
" stop: Optional[List[str]] = None,\n",
|
||||
" run_manager: Optional[CallbackManagerForLLMRun] = None,\n",
|
||||
" ) -> str:\n",
|
||||
" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:\n",
|
||||
" if stop is not None:\n",
|
||||
" raise ValueError(\"stop kwargs are not permitted.\")\n",
|
||||
" return prompt[:self.n]\n",
|
||||
@@ -74,7 +67,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 3,
|
||||
"id": "10e5ece6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -84,7 +77,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"id": "8cd49199",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -94,7 +87,7 @@
|
||||
"'This is a '"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -113,7 +106,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 5,
|
||||
"id": "9c33fa19",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 3,
|
||||
"id": "f69f6283",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -52,7 +52,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"id": "64005d1f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -60,8 +60,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 26.1 ms, sys: 21.5 ms, total: 47.6 ms\n",
|
||||
"Wall time: 1.68 s\n"
|
||||
"CPU times: user 14.2 ms, sys: 4.9 ms, total: 19.1 ms\n",
|
||||
"Wall time: 1.1 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -70,7 +70,7 @@
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -83,7 +83,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"id": "c8a1cb2b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -91,8 +91,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 238 µs, sys: 143 µs, total: 381 µs\n",
|
||||
"Wall time: 1.76 ms\n"
|
||||
"CPU times: user 162 µs, sys: 7 µs, total: 169 µs\n",
|
||||
"Wall time: 175 µs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -101,7 +101,7 @@
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -214,18 +214,9 @@
|
||||
"## Redis Cache"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c5c9a4d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Standard Cache\n",
|
||||
"Use [Redis](../../../../ecosystem/redis.md) to cache prompts and responses."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"id": "39f6eb0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -234,35 +225,15 @@
|
||||
"# (make sure your local Redis instance is running first before running this example)\n",
|
||||
"from redis import Redis\n",
|
||||
"from langchain.cache import RedisCache\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = RedisCache(redis_=Redis())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"id": "28920749",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 6.88 ms, sys: 8.75 ms, total: 15.6 ms\n",
|
||||
"Wall time: 1.04 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
@@ -271,124 +242,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": null,
|
||||
"id": "94bf9415",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 1.59 ms, sys: 610 µs, total: 2.2 ms\n",
|
||||
"Wall time: 5.58 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82be23f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Semantic Cache\n",
|
||||
"Use [Redis](../../../../ecosystem/redis.md) to cache prompts and responses and evaluate hits based on semantic similarity."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "64df3099",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.cache import RedisSemanticCache\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = RedisSemanticCache(\n",
|
||||
" redis_url=\"redis://localhost:6379\",\n",
|
||||
" embedding=OpenAIEmbeddings()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "8e91d3ac",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 351 ms, sys: 156 ms, total: 507 ms\n",
|
||||
"Wall time: 3.37 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy don't scientists trust atoms?\\nBecause they make up everything.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "df856948",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 6.25 ms, sys: 2.72 ms, total: 8.97 ms\n",
|
||||
"Wall time: 262 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy don't scientists trust atoms?\\nBecause they make up everything.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time, while not a direct hit, the question is semantically similar to the original question,\n",
|
||||
"# so it uses the cached result!\n",
|
||||
"llm(\"Tell me one joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "684eab55",
|
||||
@@ -922,9 +785,7 @@
|
||||
"id": "9df0dab8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!rm .langchain.db sqlite.db"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "4ac0ff54-540a-4f2b-8d9a-b590fec7fe07",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -21,13 +21,14 @@
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI, Anthropic\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "77f60a4b-f786-41f2-972e-e5bb8a48dcd5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -78,7 +79,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)\n",
|
||||
"llm = OpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
|
||||
"resp = llm(\"Write me a song about sparkling water.\")"
|
||||
]
|
||||
},
|
||||
@@ -94,7 +95,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"id": "a35373f1-9ee6-4753-a343-5aee749b8527",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -135,7 +136,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"id": "22665f16-e05b-473c-a4bd-ad75744ea024",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -190,7 +191,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)\n",
|
||||
"chat = ChatOpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
|
||||
"resp = chat([HumanMessage(content=\"Write me a song about sparkling water.\")])"
|
||||
]
|
||||
},
|
||||
@@ -204,7 +205,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "eadae4ba-9f21-4ec8-845d-dd43b0edc2dc",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -244,7 +245,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = Anthropic(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)\n",
|
||||
"llm = Anthropic(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
|
||||
"llm(\"Write me a song about sparkling water.\")"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
"source": [
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"from langchain.llms import GPT4All\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
@@ -123,9 +124,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Callbacks support token-wise streaming\n",
|
||||
"callbacks = [StreamingStdOutCallbackHandler()]\n",
|
||||
"callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])\n",
|
||||
"# Verbose is required to pass to the callback manager\n",
|
||||
"llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)"
|
||||
"llm = GPT4All(model=local_path, callback_manager=callback_manager, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PipelineAI\n",
|
||||
"\n",
|
||||
"PipelineAI allows you to run your ML models at scale in the cloud. It also provides API access to [several LLM models](https://pipeline.ai).\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use Langchain with [PipelineAI](https://docs.pipeline.ai/docs)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install pipeline-ai\n",
|
||||
"The `pipeline-ai` library is required to use the `PipelineAI` API, AKA `Pipeline Cloud`. Install `pipeline-ai` using `pip install pipeline-ai`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install the package\n",
|
||||
"!pip install pipeline-ai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.llms import PipelineAI\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set the Environment API Key\n",
|
||||
"Make sure to get your API key from PipelineAI. Check out the [cloud quickstart guide](https://docs.pipeline.ai/docs/cloud-quickstart). You'll be given a 30 day free trial with 10 hours of serverless GPU compute to test different models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"PIPELINE_API_KEY\"] = \"YOUR_API_KEY_HERE\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the PipelineAI instance\n",
|
||||
"When instantiating PipelineAI, you need to specify the id or tag of the pipeline you want to use, e.g. `pipeline_key = \"public/gpt-j:base\"`. You then have the option of passing additional pipeline-specific keyword arguments:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = PipelineAI(pipeline_key=\"YOUR_PIPELINE_KEY\", pipeline_kwargs={...})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a Prompt Template\n",
|
||||
"We will create a prompt template for Question and Answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initiate the LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run the LLMChain\n",
|
||||
"Provide a question and run the LLMChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,28 +1,22 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6488fdaf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat Prompt Template\n",
|
||||
"\n",
|
||||
"[Chat Models](../models/chat.rst) takes a list of chat messages as input - this list commonly referred to as a prompt.\n",
|
||||
"These chat messages differ from raw string (which you would pass into a [LLM](../models/llms.rst) model) in that every message is associated with a role.\n",
|
||||
"\n",
|
||||
"For example, in OpenAI [Chat Completion API](https://platform.openai.com/docs/guides/chat/introduction), a chat message can be associated with the AI, human or system role. The model is supposed to follow instruction from system chat message more closely.\n",
|
||||
"\n",
|
||||
"Therefore, LangChain provides several related prompt templates to make constructing and working with prompts easily. You are encouraged to use these chat related prompt templates instead of `PromptTemplate` when querying chat models to fully exploit the potential of underlying chat model.\n"
|
||||
"Chat Models takes a list of chat messages as input - this list commonly referred to as a prompt.\n",
|
||||
"Typically this is not simply a hardcoded list of messages but rather a combination of a template, some examples, and user input.\n",
|
||||
"LangChain provides several classes and functions to make constructing and working with prompts easy.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"execution_count": 7,
|
||||
"id": "7647a621",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
@@ -44,18 +38,16 @@
|
||||
"id": "acb4a2f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To create a message template associated with a role, you use `MessagePromptTemplate`. \n",
|
||||
"You can make use of templating by using a `MessagePromptTemplate`. You can build a `ChatPromptTemplate` from one or more `MessagePromptTemplates`. You can use `ChatPromptTemplate`'s `format_prompt` -- this returns a `PromptValue`, which you can convert to a string or Message object, depending on whether you want to use the formatted value as input to an llm or chat model.\n",
|
||||
"\n",
|
||||
"For convenience, there is a `from_template` method exposed on the template. If you were to use this template, this is what it would look like:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"execution_count": 2,
|
||||
"id": "3124f5e9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template=\"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
@@ -64,47 +56,11 @@
|
||||
"human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8b08cda-7c57-4c15-a1e5-80627cfa9cbd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you wanted to construct the `MessagePromptTemplate` more directly, you could create a PromptTemplate outside and then pass it in, eg:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "5a8d249e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt=PromptTemplate(\n",
|
||||
" template=\"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" input_variables=[\"input_language\", \"output_language\"],\n",
|
||||
")\n",
|
||||
"system_message_prompt_2 = SystemMessagePromptTemplate(prompt=prompt)\n",
|
||||
"\n",
|
||||
"assert system_message_prompt == system_message_prompt_2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96836c5c-41f8-4073-95ac-ea1daab2e00e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After that, you can build a `ChatPromptTemplate` from one or more `MessagePromptTemplates`. You can use `ChatPromptTemplate`'s `format_prompt` -- this returns a `PromptValue`, which you can convert to a string or Message object, depending on whether you want to use the formatted value as input to an llm or chat model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"execution_count": 3,
|
||||
"id": "9c7e2e6f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -113,7 +69,7 @@
|
||||
" HumanMessage(content='I love programming.', additional_kwargs={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -126,225 +82,25 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0899f681-012e-4687-a754-199a9a396738",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Format output\n",
|
||||
"\n",
|
||||
"The output of the format method is available as string, list of messages and `ChatPromptValue`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "584166de-0c31-4bc9-bf7a-5b359e7173d8",
|
||||
"id": "0dbdf94f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As string:"
|
||||
"If you wanted to construct the MessagePromptTemplate more directly, you could create a PromptTemplate outside and then pass it in, eg:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "2f6c7ad1-def5-41dc-a4fe-3731dc8917f9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'System: You are a helpful assistant that translates English to French.\\nHuman: I love programming.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chat_prompt.format(input_language=\"English\", output_language=\"French\", text=\"I love programming.\")\n",
|
||||
"output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "144b3368-43f3-49fa-885f-3f3470e9ab7e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 8,
|
||||
"id": "5a8d249e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# or alternatively \n",
|
||||
"output_2 = chat_prompt.format_prompt(input_language=\"English\", output_language=\"French\", text=\"I love programming.\").to_string()\n",
|
||||
"\n",
|
||||
"assert output == output_2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "51970399-c2e1-4c9b-8003-6f5b1236fda8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As `ChatPromptValue`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "681ce4a7-d972-4cdf-ac77-ec35182fd352",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatPromptValue(messages=[SystemMessage(content='You are a helpful assistant that translates English to French.', additional_kwargs={}), HumanMessage(content='I love programming.', additional_kwargs={})])"
|
||||
]
|
||||
},
|
||||
"execution_count": 41,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_prompt.format_prompt(input_language=\"English\", output_language=\"French\", text=\"I love programming.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61041810-4418-4406-9c8a-91c9034c9752",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As list of Message objects"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "4ec2f166-1ef9-4071-8c37-fcfbd3f4bc29",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content='You are a helpful assistant that translates English to French.', additional_kwargs={}),\n",
|
||||
" HumanMessage(content='I love programming.', additional_kwargs={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 44,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_prompt.format_prompt(input_language=\"English\", output_language=\"French\", text=\"I love programming.\").to_messages()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73dcd3a2-ad6d-4b7b-ab21-1d9e417f959e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Different types of `MessagePromptTemplate`\n",
|
||||
"\n",
|
||||
"LangChain provides different types of `MessagePromptTemplate`. The most commonly used are `AIMessagePromptTemplate`, `SystemMessagePromptTemplate` and `HumanMessagePromptTemplate`, which create an AI message, system message and human message respectively.\n",
|
||||
"\n",
|
||||
"However, in cases where the chat model supports taking chat message with arbitrary role, you can use `ChatMessagePromptTemplate`, which allows user to specify the role name."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "55a21b1f-9cdc-4072-a41d-695b00dd11e6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatMessage(content='May the force be with you', additional_kwargs={}, role='Jedi')"
|
||||
]
|
||||
},
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatMessagePromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = \"May the {subject} be with you\"\n",
|
||||
"\n",
|
||||
"chat_message_prompt = ChatMessagePromptTemplate.from_template(role=\"Jedi\", template=prompt)\n",
|
||||
"chat_message_prompt.format(subject=\"force\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd6bf82b-4709-4683-b215-6b7c468f3347",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"LangChain also provides `MessagesPlaceholder`, which gives you full control of what messages to be rendered during formatting. This can be useful when you are uncertain of what role you should be using for your message prompt templates or when you wish to insert a list of messages during formatting."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "9f034650-5e50-4bc3-80f8-5e428ca6444d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"human_prompt = \"Summarize our conversation so far in {word_count} words.\"\n",
|
||||
"human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)\n",
|
||||
"\n",
|
||||
"chat_prompt = ChatPromptTemplate.from_messages([MessagesPlaceholder(variable_name=\"conversation\"), human_message_template])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "9789e8e7-b3f9-4391-a85c-373e576107b3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is the best way to learn programming?', additional_kwargs={}),\n",
|
||||
" AIMessage(content='1. Choose a programming language: Decide on a programming language that you want to learn. \\n\\n2. Start with the basics: Familiarize yourself with the basic programming concepts such as variables, data types and control structures.\\n\\n3. Practice, practice, practice: The best way to learn programming is through hands-on experience', additional_kwargs={}),\n",
|
||||
" HumanMessage(content='Summarize our conversation so far in 10 words.', additional_kwargs={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"human_message = HumanMessage(content=\"What is the best way to learn programming?\")\n",
|
||||
"ai_message = AIMessage(content=\"\"\"\\\n",
|
||||
"1. Choose a programming language: Decide on a programming language that you want to learn. \n",
|
||||
"\n",
|
||||
"2. Start with the basics: Familiarize yourself with the basic programming concepts such as variables, data types and control structures.\n",
|
||||
"\n",
|
||||
"3. Practice, practice, practice: The best way to learn programming is through hands-on experience\\\n",
|
||||
"\"\"\")\n",
|
||||
"\n",
|
||||
"chat_prompt.format_prompt(conversation=[human_message, ai_message], word_count=\"10\").to_messages()"
|
||||
"prompt=PromptTemplate(\n",
|
||||
" template=\"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" input_variables=[\"input_language\", \"output_language\"],\n",
|
||||
")\n",
|
||||
"system_message_prompt = SystemMessagePromptTemplate(prompt=prompt)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -364,7 +120,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.10"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -17,9 +17,7 @@
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ad0b5edf",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Feast\n",
|
||||
"\n",
|
||||
@@ -213,241 +211,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4049990-651d-44d3-82b1-0cd122da55c1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tecton\n",
|
||||
"\n",
|
||||
"Above, we showed how you could use Feast, a popular open source and self-managed feature store, with LangChain. Our examples below will show a similar integration using Tecton. Tecton is a fully managed feature platform built to orchestrate the complete ML feature lifecycle, from transformation to online serving, with enterprise-grade SLAs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7bb4dba1-0678-4ea4-be0a-d353c0b13fc2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"* Tecton Deployment (sign up at [https://tecton.ai](https://tecton.ai))\n",
|
||||
"* `TECTON_API_KEY` environment variable set to a valid Service Account key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac9eb618-8c52-4cd6-bb8e-9c99a150dfa6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"### Define and Load Features\n",
|
||||
"\n",
|
||||
"We will use the user_transaction_counts Feature View from the [Tecton tutorial](https://docs.tecton.ai/docs/tutorials/tecton-fundamentals) as part of a Feature Service. For simplicity, we are only using a single Feature View; however, more sophisticated applications may require more feature views to retrieve the features needed for its prompt.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"user_transaction_metrics = FeatureService(\n",
|
||||
" name = \"user_transaction_metrics\",\n",
|
||||
" features = [user_transaction_counts]\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"The above Feature Service is expected to be [applied to a live workspace](https://docs.tecton.ai/docs/applying-feature-repository-changes-to-a-workspace). For this example, we will be using the \"prod\" workspace."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"id": "32e9675d-a7e5-429f-906f-2260294d3e46",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tecton\n",
|
||||
"\n",
|
||||
"workspace = tecton.get_workspace(\"prod\")\n",
|
||||
"feature_service = workspace.get_feature_service(\"user_transaction_metrics\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "29b7550c-0eb4-4bd1-a501-1c63fb77aa56",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prompts\n",
|
||||
"\n",
|
||||
"Here we will set up a custom TectonPromptTemplate. This prompt template will take in a user_id , look up their stats, and format those stats into a prompt.\n",
|
||||
"\n",
|
||||
"Note that the input to this prompt template is just `user_id`, since that is the only user defined piece (all other variables are looked up inside the prompt template)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 61,
|
||||
"id": "6fb77ea4-64c6-4e48-a783-bd1ece021b82",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate, StringPromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 77,
|
||||
"id": "02a98fbc-8135-4b11-bf60-85d28e426667",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Given the vendor's up to date transaction stats, write them a note based on the following rules:\n",
|
||||
"\n",
|
||||
"1. If they had a transaction in the last day, write a short congratulations message on their recent sales\n",
|
||||
"2. If no transaction in the last day, but they had a transaction in the last 30 days, playfully encourage them to sell more.\n",
|
||||
"3. Always add a silly joke about chickens at the end\n",
|
||||
"\n",
|
||||
"Here are the vendor's stats:\n",
|
||||
"Number of Transactions Last Day: {transaction_count_1d}\n",
|
||||
"Number of Transactions Last 30 Days: {transaction_count_30d}\n",
|
||||
"\n",
|
||||
"Your response:\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 78,
|
||||
"id": "a35cdfd5-6ccc-4394-acfe-60d53804be51",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class TectonPromptTemplate(StringPromptTemplate):\n",
|
||||
" \n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" user_id = kwargs.pop(\"user_id\")\n",
|
||||
" feature_vector = feature_service.get_online_features(join_keys={\"user_id\": user_id}).to_dict()\n",
|
||||
" kwargs[\"transaction_count_1d\"] = feature_vector[\"user_transaction_counts.transaction_count_1d_1d\"]\n",
|
||||
" kwargs[\"transaction_count_30d\"] = feature_vector[\"user_transaction_counts.transaction_count_30d_1d\"]\n",
|
||||
" return prompt.format(**kwargs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 79,
|
||||
"id": "d5915df0-fb16-4770-8a82-22f885b74d1a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt_template = TectonPromptTemplate(input_variables=[\"user_id\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 80,
|
||||
"id": "a36abfc8-ea60-4ae0-a36d-d7b639c7307c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Given the vendor's up to date transaction stats, write them a note based on the following rules:\n",
|
||||
"\n",
|
||||
"1. If they had a transaction in the last day, write a short congratulations message on their recent sales\n",
|
||||
"2. If no transaction in the last day, but they had a transaction in the last 30 days, playfully encourage them to sell more.\n",
|
||||
"3. Always add a silly joke about chickens at the end\n",
|
||||
"\n",
|
||||
"Here are the vendor's stats:\n",
|
||||
"Number of Transactions Last Day: 657\n",
|
||||
"Number of Transactions Last 30 Days: 20326\n",
|
||||
"\n",
|
||||
"Your response:\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(prompt_template.format(user_id=\"user_469998441571\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8d4b905-1051-4303-9c33-8eddb65c1274",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"### Use in a chain\n",
|
||||
"\n",
|
||||
"We can now use this in a chain, successfully creating a chain that achieves personalization backed by the Tecton Feature Platform"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 81,
|
||||
"id": "ffb60cd0-8e3c-4c9d-b639-43d766e12c4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 82,
|
||||
"id": "3918abc7-00b5-466f-bdfc-ab046cd282da",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = LLMChain(llm=ChatOpenAI(), prompt=prompt_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 83,
|
||||
"id": "e7d91c4b-3e99-40cc-b3e9-a004c8c9193e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Wow, congratulations on your recent sales! Your business is really soaring like a chicken on a hot air balloon! Keep up the great work!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 83,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"user_469998441571\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f752b924-caf9-4f7a-b78b-cb8c8ada8c2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -466,7 +229,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -6,15 +6,16 @@ First, you should install tracing and set up your environment properly.
|
||||
You can use either a locally hosted version of this (uses Docker) or a cloud hosted version (in closed alpha).
|
||||
If you're interested in using the hosted platform, please fill out the form [here](https://forms.gle/tRCEMSeopZf6TE3b6).
|
||||
|
||||
|
||||
- [Locally Hosted Setup](./tracing/local_installation.md)
|
||||
- [Cloud Hosted Setup](./tracing/hosted_installation.md)
|
||||
|
||||
## Tracing Walkthrough
|
||||
|
||||
When you first access the UI, you should see a page with your tracing sessions.
|
||||
An initial one "default" should already be created for you.
|
||||
A session is just a way to group traces together.
|
||||
If you click on a session, it will take you to a page with no recorded traces that says "No Runs."
|
||||
When you first access the UI, you should see a page with your tracing sessions.
|
||||
An initial one "default" should already be created for you.
|
||||
A session is just a way to group traces together.
|
||||
If you click on a session, it will take you to a page with no recorded traces that says "No Runs."
|
||||
You can create a new session with the new session form.
|
||||
|
||||

|
||||
@@ -34,7 +35,7 @@ We can keep on clicking further and further down to explore deeper and deeper.
|
||||
|
||||

|
||||
|
||||
We can also click on the "Explore" button of the top level run to dive even deeper.
|
||||
We can also click on the "Explore" button of the top level run to dive even deeper.
|
||||
Here, we can see the inputs and outputs in full, as well as all the nested traces.
|
||||
|
||||

|
||||
@@ -45,12 +46,11 @@ For example, here is the lowest level trace with the exact inputs/outputs to the
|
||||

|
||||
|
||||
## Changing Sessions
|
||||
|
||||
1. To initially record traces to a session other than `"default"`, you can set the `LANGCHAIN_SESSION` environment variable to the name of the session you want to record to:
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["LANGCHAIN_TRACING"] = "true"
|
||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
os.environ["LANGCHAIN_SESSION"] = "my_session" # Make sure this session actually exists. You can create a new session in the UI.
|
||||
```
|
||||
|
||||
|
||||
@@ -5,14 +5,7 @@
|
||||
"id": "5371a9bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tracing Walkthrough\n",
|
||||
"\n",
|
||||
"There are two recommended ways to trace your LangChains:\n",
|
||||
"\n",
|
||||
"1. Setting the `LANGCHAIN_TRACING` environment variable to \"true\".\n",
|
||||
"1. Using a context manager with tracing_enabled() to trace a particular block of code.\n",
|
||||
"\n",
|
||||
"**Note** if the environment variable is set, all code will be traced, regardless of whether or not it's within the context manager."
|
||||
"# Tracing Walkthrough"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -25,22 +18,24 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING\"] = \"true\"\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\"\n",
|
||||
"\n",
|
||||
"## Uncomment this if using hosted setup.\n",
|
||||
"\n",
|
||||
"## Uncomment below if using hosted setup.\n",
|
||||
"# os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://langchain-api-gateway-57eoxz8z.uc.gateway.dev\" \n",
|
||||
"\n",
|
||||
"## Uncomment below if you want traces to be recorded to \"my_session\" instead of \"default\".\n",
|
||||
"## Uncomment this if you want traces to be recorded to \"my_session\" instead of default.\n",
|
||||
"\n",
|
||||
"# os.environ[\"LANGCHAIN_SESSION\"] = \"my_session\" \n",
|
||||
"\n",
|
||||
"## Better to set this environment variable in the terminal\n",
|
||||
"## Uncomment below if using hosted version. Replace \"my_api_key\" with your actual API Key.\n",
|
||||
"## Uncomment this if using hosted version. Replace \"my_api_key\" with your actual API Key.\n",
|
||||
"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = \"my_api_key\" \n",
|
||||
"\n",
|
||||
"import langchain\n",
|
||||
"from langchain.agents import Tool, initialize_agent, load_tools\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.callbacks import tracing_enabled\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
@@ -78,7 +73,8 @@
|
||||
"\u001b[32;1m\u001b[1;3m I need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 2^.123243\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: 1.0891804557407723\u001b[0m\n",
|
||||
"\n",
|
||||
@@ -108,9 +104,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4829eb1d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -119,11 +113,52 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mI need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 2 ^ .123243\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the answer to the question. \n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: What is 2 raised to .123243 power?\n",
|
||||
"Thought: I need a calculator to solve this problem.\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"calculator\",\n",
|
||||
" \"action_input\": \"2^0.123243\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m\n",
|
||||
"Observation: calculator is not a valid tool, try another one.\n",
|
||||
"\u001b[32;1m\u001b[1;3mI made a mistake, I need to use the correct tool for this question.\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"calculator\",\n",
|
||||
" \"action_input\": \"2^0.123243\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"Observation: calculator is not a valid tool, try another one.\n",
|
||||
"\u001b[32;1m\u001b[1;3mI made a mistake, the tool name is actually \"calc\" instead of \"calculator\".\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"calc\",\n",
|
||||
" \"action_input\": \"2^0.123243\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"Observation: calc is not a valid tool, try another one.\n",
|
||||
"\u001b[32;1m\u001b[1;3mI made another mistake, the tool name is actually \"Calculator\" instead of \"calc\".\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Calculator\",\n",
|
||||
" \"action_input\": \"2^0.123243\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe final answer is 1.0891804557407723.\n",
|
||||
"Final Answer: 1.0891804557407723\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -149,184 +184,10 @@
|
||||
"agent.run(\"What is 2 raised to .123243 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "76abfd82",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mI need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 2 ^ .123243\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the answer to the question. \n",
|
||||
"Final Answer: 1.0891804557407723\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mI need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 5 ^ .123243\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.2193914912400514\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the answer to the question. \n",
|
||||
"Final Answer: 1.2193914912400514\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Both of the agent runs will be traced because the environment variable is set\n",
|
||||
"agent.run(\"What is 2 raised to .123243 power?\")\n",
|
||||
"with tracing_enabled() as session:\n",
|
||||
" agent.run(\"What is 5 raised to .123243 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "fe833c33-033f-4806-be0c-cc3d147db13d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mI need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 5 ^ .123243\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.2193914912400514\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the answer to the question. \n",
|
||||
"Final Answer: 1.2193914912400514\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mI need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 2 ^ .123243\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the answer to the question. \n",
|
||||
"Final Answer: 1.0891804557407723\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1.0891804557407723'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Now, we unset the environment variable and use a context manager.\n",
|
||||
"if \"LANGCHAIN_TRACING\" in os.environ:\n",
|
||||
" del os.environ[\"LANGCHAIN_TRACING\"]\n",
|
||||
"\n",
|
||||
"# here, we are writing traces to \"my_test_session\"\n",
|
||||
"with tracing_enabled(\"my_session\") as session:\n",
|
||||
" assert session\n",
|
||||
" agent.run(\"What is 5 raised to .123243 power?\") # this should be traced\n",
|
||||
"\n",
|
||||
"agent.run(\"What is 2 raised to .123243 power?\") # this should not be traced"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "b34105a4-be8e-46e4-8abe-01adba3ba727",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[32;1m\u001b[1;3mI need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 3^0.123\u001b[0m\u001b[32;1m\u001b[1;3mI need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 2^0.123\u001b[0m\u001b[32;1m\u001b[1;3mAny number raised to the power of 0 is 1, but I'm not sure about a decimal power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 1^.123\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.1446847956963533\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0889970153361064\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1.0'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# The context manager is concurrency safe:\n",
|
||||
"import asyncio \n",
|
||||
"if \"LANGCHAIN_TRACING\" in os.environ:\n",
|
||||
" del os.environ[\"LANGCHAIN_TRACING\"]\n",
|
||||
" \n",
|
||||
"questions = [f\"What is {i} raised to .123 power?\" for i in range(1,4)]\n",
|
||||
"\n",
|
||||
"# start a background task\n",
|
||||
"task = asyncio.create_task(agent.arun(questions[0])) # this should not be traced\n",
|
||||
"with tracing_enabled() as session:\n",
|
||||
" assert session\n",
|
||||
" tasks = [agent.arun(q) for q in questions[1:3]] # these should be traced\n",
|
||||
" await asyncio.gather(*tasks)\n",
|
||||
"\n",
|
||||
"await task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e46c85b-2ac0-4661-abed-9c2bf3036820",
|
||||
"id": "76abfd82",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -348,7 +209,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -8,15 +8,10 @@ Agent simulations generally involve two main components:
|
||||
|
||||
Specific implementations of agent simulations (or parts of agent simulations) include
|
||||
|
||||
## Simulations with One Agent
|
||||
- [Simulated Environment: Gymnasium](agent_simulations/gymnasium.ipynb): an example of how to create a simple agent-environment interaction loop with [Gymnasium](https://github.com/Farama-Foundation/Gymnasium) (formerly [OpenAI Gym](https://github.com/openai/gym)).
|
||||
|
||||
## Simulations with Two Agents
|
||||
- [CAMEL](agent_simulations/camel_role_playing.ipynb): an implementation of the CAMEL (Communicative Agents for “Mind” Exploration of Large Scale Language Model Society) paper, where two agents communicate with each other.
|
||||
- [Two Player D&D](agent_simulations/two_player_dnd.ipynb): an example of how to use a generic simulator for two agents to implement a variant of the popular Dungeons & Dragons role playing game.
|
||||
|
||||
## Simulations with Multiple Agents
|
||||
- [Multi-Player D&D](agent_simulations/multi_player_dnd.ipynb): an example of how to use a generic dialogue simulator for multiple dialogue agents with a custom speaker-ordering, illustrated with a variant of the popular Dungeons & Dragons role playing game.
|
||||
- [Decentralized Speaker Selection](agent_simulations/multiagent_bidding.ipynb): an example of how to implement a multi-agent dialogue without a fixed schedule for who speaks when. Instead the agents decide for themselves who speaks by outputting bids to speak. This example shows how to do this in the context of a fictitious presidential debate.
|
||||
- [Authoritarian Speaker Selection](agent_simulations/multiagent_authoritarian.ipynb): an example of how to implement a multi-agent dialogue, where a privileged agent directs who speaks what. This example also showcases how to enable the privileged agent to determine when the conversation terminates. This example shows how to do this in the context of a fictitious news show.
|
||||
- [Generative Agents](agent_simulations/characters.ipynb): This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.
|
||||
|
||||
@@ -1,233 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b089493",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Simulated Environment: Gymnasium\n",
|
||||
"\n",
|
||||
"For many applications of LLM agents, the environment is real (internet, database, REPL, etc). However, we can also define agents to interact in simulated environments like text-based games. This is an example of how to create a simple agent-environment interaction loop with [Gymnasium](https://github.com/Farama-Foundation/Gymnasium) (formerly [OpenAI Gym](https://github.com/openai/gym))."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "f9bd38b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import gymnasium as gym\n",
|
||||
"import tenacity\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
")\n",
|
||||
"from langchain.output_parsers import RegexParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e222e811",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "870c24bc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_docs(env):\n",
|
||||
" while 'env' in dir(env):\n",
|
||||
" env = env.env\n",
|
||||
" return env.__doc__\n",
|
||||
"\n",
|
||||
"class Agent():\n",
|
||||
" def __init__(self, model, env):\n",
|
||||
" self.model = model\n",
|
||||
" self.docs = get_docs(env)\n",
|
||||
" \n",
|
||||
" self.instructions = \"\"\"\n",
|
||||
"Your goal is to maximize your return, i.e. the sum of the rewards you receive.\n",
|
||||
"I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:\n",
|
||||
"\n",
|
||||
"Observation: <observation>\n",
|
||||
"Reward: <reward>\n",
|
||||
"Termination: <termination>\n",
|
||||
"Truncation: <truncation>\n",
|
||||
"Return: <sum_of_rewards>\n",
|
||||
"\n",
|
||||
"You will respond with an action, formatted as:\n",
|
||||
"\n",
|
||||
"Action: <action>\n",
|
||||
"\n",
|
||||
"where you replace <action> with your actual action.\n",
|
||||
"Do nothing else but return the action.\n",
|
||||
"\"\"\"\n",
|
||||
" self.action_parser = RegexParser(\n",
|
||||
" regex=r\"Action: (.*)\", \n",
|
||||
" output_keys=['action'], \n",
|
||||
" default_output_key='action')\n",
|
||||
" \n",
|
||||
" self.message_history = []\n",
|
||||
" self.ret = 0\n",
|
||||
" \n",
|
||||
" def reset(self, obs):\n",
|
||||
" self.message_history = [\n",
|
||||
" SystemMessage(content=self.docs),\n",
|
||||
" SystemMessage(content=self.instructions),\n",
|
||||
" ]\n",
|
||||
" obs_message = f\"\"\"\n",
|
||||
"Observation: {obs}\n",
|
||||
"Reward: 0\n",
|
||||
"Termination: False\n",
|
||||
"Truncation: False\n",
|
||||
"Return: 0\n",
|
||||
" \"\"\"\n",
|
||||
" self.message_history.append(HumanMessage(content=obs_message))\n",
|
||||
" return obs_message\n",
|
||||
" \n",
|
||||
" def observe(self, obs, rew, term, trunc, info):\n",
|
||||
" self.ret += rew\n",
|
||||
" \n",
|
||||
" obs_message = f\"\"\"\n",
|
||||
"Observation: {obs}\n",
|
||||
"Reward: {rew}\n",
|
||||
"Termination: {term}\n",
|
||||
"Truncation: {trunc}\n",
|
||||
"Return: {self.ret}\n",
|
||||
" \"\"\"\n",
|
||||
" self.message_history.append(HumanMessage(content=obs_message))\n",
|
||||
" return obs_message\n",
|
||||
" \n",
|
||||
" @tenacity.retry(stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"),\n",
|
||||
" retry_error_callback=lambda retry_state: 0) # Default value when all retries are exhausted\n",
|
||||
" def act(self):\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)['action'])\n",
|
||||
" return action"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e76d22c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize the simulated environment and agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"id": "9e902cfd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"env = gym.make(\"Blackjack-v1\")\n",
|
||||
"agent = Agent(model=ChatOpenAI(temperature=0.2), env=env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2c12b15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Main loop"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"id": "ad361210",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Observation: (10, 3, 0)\n",
|
||||
"Reward: 0\n",
|
||||
"Termination: False\n",
|
||||
"Truncation: False\n",
|
||||
"Return: 0\n",
|
||||
" \n",
|
||||
"Action: 1\n",
|
||||
"\n",
|
||||
"Observation: (18, 3, 0)\n",
|
||||
"Reward: 0.0\n",
|
||||
"Termination: False\n",
|
||||
"Truncation: False\n",
|
||||
"Return: 0.0\n",
|
||||
" \n",
|
||||
"Action: 0\n",
|
||||
"\n",
|
||||
"Observation: (18, 3, 0)\n",
|
||||
"Reward: 1.0\n",
|
||||
"Termination: True\n",
|
||||
"Truncation: False\n",
|
||||
"Return: 1.0\n",
|
||||
" \n",
|
||||
"break True False\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"observation, info = env.reset()\n",
|
||||
"obs_message = agent.reset(observation)\n",
|
||||
"print(obs_message)\n",
|
||||
"while True:\n",
|
||||
" action = agent.act()\n",
|
||||
" observation, reward, termination, truncation, info = env.step(action)\n",
|
||||
" obs_message = agent.observe(observation, reward, termination, truncation, info)\n",
|
||||
" print(f'Action: {action}')\n",
|
||||
" print(obs_message)\n",
|
||||
" if termination or truncation:\n",
|
||||
" print('break', termination, truncation)\n",
|
||||
" break\n",
|
||||
"env.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "58a13e9c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -485,7 +485,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,849 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Multi-agent authoritarian speaker selection\n",
|
||||
"\n",
|
||||
"This notebook showcases how to implement a multi-agent simulation where a privileged agent decides who to speak.\n",
|
||||
"This follows the polar opposite selection scheme as [multi-agent decentralized speaker selection](https://python.langchain.com/en/latest/use_cases/agent_simulations/multiagent_bidding.html).\n",
|
||||
"\n",
|
||||
"We show an example of this approach in the context of a fictitious simulation of a news network. This example will showcase how we can implement agents that\n",
|
||||
"- think before speaking\n",
|
||||
"- terminate the conversation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import LangChain related modules "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from collections import OrderedDict\n",
|
||||
"import functools\n",
|
||||
"import random\n",
|
||||
"import re\n",
|
||||
"import tenacity\n",
|
||||
"from typing import List, Dict, Callable\n",
|
||||
"\n",
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate, \n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" PromptTemplate\n",
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `DialogueAgent` and `DialogueSimulator` classes\n",
|
||||
"We will use the same `DialogueAgent` and `DialogueSimulator` classes defined in our other examples [Multi-Player Dungeons & Dragons](https://python.langchain.com/en/latest/use_cases/agent_simulations/multi_player_dnd.html) and [Decentralized Speaker Selection](https://python.langchain.com/en/latest/use_cases/agent_simulations/multiagent_bidding.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class DialogueAgent:\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" system_message: SystemMessage,\n",
|
||||
" model: ChatOpenAI,\n",
|
||||
" ) -> None:\n",
|
||||
" self.name = name\n",
|
||||
" self.system_message = system_message\n",
|
||||
" self.model = model\n",
|
||||
" self.prefix = f\"{self.name}: \"\n",
|
||||
" self.reset()\n",
|
||||
" \n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\"Here is the conversation so far.\"]\n",
|
||||
"\n",
|
||||
" def send(self) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" return message.content\n",
|
||||
"\n",
|
||||
" def receive(self, name: str, message: str) -> None:\n",
|
||||
" \"\"\"\n",
|
||||
" Concatenates {message} spoken by {name} into message history\n",
|
||||
" \"\"\"\n",
|
||||
" self.message_history.append(f\"{name}: {message}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class DialogueSimulator:\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" agents: List[DialogueAgent],\n",
|
||||
" selection_function: Callable[[int, List[DialogueAgent]], int],\n",
|
||||
" ) -> None:\n",
|
||||
" self.agents = agents\n",
|
||||
" self._step = 0\n",
|
||||
" self.select_next_speaker = selection_function\n",
|
||||
" \n",
|
||||
" def reset(self):\n",
|
||||
" for agent in self.agents:\n",
|
||||
" agent.reset()\n",
|
||||
"\n",
|
||||
" def inject(self, name: str, message: str):\n",
|
||||
" \"\"\"\n",
|
||||
" Initiates the conversation with a {message} from {name}\n",
|
||||
" \"\"\"\n",
|
||||
" for agent in self.agents:\n",
|
||||
" agent.receive(name, message)\n",
|
||||
"\n",
|
||||
" # increment time\n",
|
||||
" self._step += 1\n",
|
||||
"\n",
|
||||
" def step(self) -> tuple[str, str]:\n",
|
||||
" # 1. choose the next speaker\n",
|
||||
" speaker_idx = self.select_next_speaker(self._step, self.agents)\n",
|
||||
" speaker = self.agents[speaker_idx]\n",
|
||||
"\n",
|
||||
" # 2. next speaker sends message\n",
|
||||
" message = speaker.send()\n",
|
||||
"\n",
|
||||
" # 3. everyone receives message\n",
|
||||
" for receiver in self.agents:\n",
|
||||
" receiver.receive(speaker.name, message)\n",
|
||||
"\n",
|
||||
" # 4. increment time\n",
|
||||
" self._step += 1\n",
|
||||
"\n",
|
||||
" return speaker.name, message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `DirectorDialogueAgent` class\n",
|
||||
"The `DirectorDialogueAgent` is a privileged agent that chooses which of the other agents to speak next. This agent is responsible for\n",
|
||||
"1. steering the conversation by choosing which agent speaks when\n",
|
||||
"2. terminating the conversation.\n",
|
||||
"\n",
|
||||
"In order to implement such an agent, we need to solve several problems.\n",
|
||||
"\n",
|
||||
"First, to steer the conversation, the `DirectorDialogueAgent` needs to (1) reflect on what has been said, (2) choose the next agent, and (3) prompt the next agent to speak, all in a single message. While it may be possible to prompt an LLM to perform all three steps in the same call, this requires writing custom code to parse the outputted message to extract which next agent is chosen to speak. This is less reliable the LLM can express how it chooses the next agent in different ways.\n",
|
||||
"\n",
|
||||
"What we can do instead is to explicitly break steps (1-3) into three separate LLM calls. First we will ask the `DirectorDialogueAgent` to reflect on the conversation so far and generate a response. Then we prompt the `DirectorDialogueAgent` to output the index of the next agent, which is easily parseable. Lastly, we pass the name of the selected next agent back to `DirectorDialogueAgent` to ask it prompt the next agent to speak. \n",
|
||||
"\n",
|
||||
"Second, simply prompting the `DirectorDialogueAgent` to decide when to terminate the conversation often results in the `DirectorDialogueAgent` terminating the conversation immediately. To fix this problem, we randomly sample a Bernoulli variable to decide whether the conversation should terminate. Depending on the value of this variable, we will inject a custom prompt to tell the `DirectorDialogueAgent` to either continue the conversation or terminate the conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class IntegerOutputParser(RegexParser):\n",
|
||||
" def get_format_instructions(self) -> str:\n",
|
||||
" return 'Your response should be an integer delimited by angled brackets, like this: <int>.' \n",
|
||||
"\n",
|
||||
"class DirectorDialogueAgent(DialogueAgent):\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" name,\n",
|
||||
" system_message: SystemMessage,\n",
|
||||
" model: ChatOpenAI,\n",
|
||||
" speakers: List[DialogueAgent],\n",
|
||||
" stopping_probability: float,\n",
|
||||
" ) -> None:\n",
|
||||
" super().__init__(name, system_message, model)\n",
|
||||
" self.speakers = speakers\n",
|
||||
" self.next_speaker = ''\n",
|
||||
" \n",
|
||||
" self.stop = False\n",
|
||||
" self.stopping_probability = stopping_probability\n",
|
||||
" self.termination_clause = 'Finish the conversation by stating a concluding message and thanking everyone.'\n",
|
||||
" self.continuation_clause = 'Do not end the conversation. Keep the conversation going by adding your own ideas.'\n",
|
||||
" \n",
|
||||
" # 1. have a prompt for generating a response to the previous speaker\n",
|
||||
" self.response_prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"message_history\", \"termination_clause\"],\n",
|
||||
" template=f\"\"\"{{message_history}}\n",
|
||||
"\n",
|
||||
"Follow up with an insightful comment.\n",
|
||||
"{{termination_clause}}\n",
|
||||
"{self.prefix}\n",
|
||||
" \"\"\")\n",
|
||||
" \n",
|
||||
" # 2. have a prompt for deciding who to speak next\n",
|
||||
" self.choice_parser = IntegerOutputParser(\n",
|
||||
" regex=r'<(\\d+)>', \n",
|
||||
" output_keys=['choice'], \n",
|
||||
" default_output_key='choice') \n",
|
||||
" self.choose_next_speaker_prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"message_history\", \"speaker_names\"],\n",
|
||||
" template=f\"\"\"{{message_history}}\n",
|
||||
"\n",
|
||||
"Given the above conversation, select the next speaker by choosing index next to their name: \n",
|
||||
"{{speaker_names}}\n",
|
||||
"\n",
|
||||
"{self.choice_parser.get_format_instructions()}\n",
|
||||
"\n",
|
||||
"Do nothing else.\n",
|
||||
" \"\"\")\n",
|
||||
" \n",
|
||||
" # 3. have a prompt for prompting the next speaker to speak\n",
|
||||
" self.prompt_next_speaker_prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"message_history\", \"next_speaker\"],\n",
|
||||
" template=f\"\"\"{{message_history}}\n",
|
||||
"\n",
|
||||
"The next speaker is {{next_speaker}}. \n",
|
||||
"Prompt the next speaker to speak with an insightful question.\n",
|
||||
"{self.prefix}\n",
|
||||
" \"\"\")\n",
|
||||
" \n",
|
||||
" def _generate_response(self):\n",
|
||||
" # if self.stop = True, then we will inject the prompt with a termination clause\n",
|
||||
" sample = random.uniform(0,1)\n",
|
||||
" self.stop = sample < self.stopping_probability\n",
|
||||
" \n",
|
||||
" print(f'\\tStop? {self.stop}\\n')\n",
|
||||
" \n",
|
||||
" response_prompt = self.response_prompt_template.format(\n",
|
||||
" message_history='\\n'.join(self.message_history),\n",
|
||||
" termination_clause=self.termination_clause if self.stop else ''\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" self.response = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=response_prompt),\n",
|
||||
" ]\n",
|
||||
" ).content\n",
|
||||
" \n",
|
||||
" return self.response\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" @tenacity.retry(stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"),\n",
|
||||
" retry_error_callback=lambda retry_state: 0) # Default value when all retries are exhausted\n",
|
||||
" def _choose_next_speaker(self) -> str: \n",
|
||||
" speaker_names = '\\n'.join([f'{idx}: {name}' for idx, name in enumerate(self.speakers)])\n",
|
||||
" choice_prompt = self.choose_next_speaker_prompt_template.format(\n",
|
||||
" message_history='\\n'.join(self.message_history + [self.prefix] + [self.response]),\n",
|
||||
" speaker_names=speaker_names\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" choice_string = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=choice_prompt),\n",
|
||||
" ]\n",
|
||||
" ).content\n",
|
||||
" choice = int(self.choice_parser.parse(choice_string)['choice'])\n",
|
||||
" \n",
|
||||
" return choice\n",
|
||||
" \n",
|
||||
" def select_next_speaker(self):\n",
|
||||
" return self.chosen_speaker_id\n",
|
||||
" \n",
|
||||
" def send(self) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" # 1. generate and save response to the previous speaker\n",
|
||||
" self.response = self._generate_response()\n",
|
||||
" \n",
|
||||
" if self.stop:\n",
|
||||
" message = self.response \n",
|
||||
" else:\n",
|
||||
" # 2. decide who to speak next\n",
|
||||
" self.chosen_speaker_id = self._choose_next_speaker()\n",
|
||||
" self.next_speaker = self.speakers[self.chosen_speaker_id]\n",
|
||||
" print(f'\\tNext speaker: {self.next_speaker}\\n')\n",
|
||||
"\n",
|
||||
" # 3. prompt the next speaker to speak\n",
|
||||
" next_prompt = self.prompt_next_speaker_prompt_template.format(\n",
|
||||
" message_history=\"\\n\".join(self.message_history + [self.prefix] + [self.response]),\n",
|
||||
" next_speaker=self.next_speaker\n",
|
||||
" )\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=next_prompt),\n",
|
||||
" ]\n",
|
||||
" ).content\n",
|
||||
" message = ' '.join([self.response, message])\n",
|
||||
" \n",
|
||||
" return message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define participants and topic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"topic = \"The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze\"\n",
|
||||
"director_name = \"Jon Stewart\"\n",
|
||||
"agent_summaries = OrderedDict({\n",
|
||||
" \"Jon Stewart\": (\"Host of the Daily Show\", \"New York\"),\n",
|
||||
" \"Samantha Bee\": (\"Hollywood Correspondent\", \"Los Angeles\"), \n",
|
||||
" \"Aasif Mandvi\": (\"CIA Correspondent\", \"Washington D.C.\"),\n",
|
||||
" \"Ronny Chieng\": (\"Average American Correspondent\", \"Cleveland, Ohio\"),\n",
|
||||
"})\n",
|
||||
"word_limit = 50"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate system messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_summary_string = '\\n- '.join([''] + [f'{name}: {role}, located in {location}' for name, (role, location) in agent_summaries.items()])\n",
|
||||
"\n",
|
||||
"conversation_description = f\"\"\"This is a Daily Show episode discussing the following topic: {topic}.\n",
|
||||
"\n",
|
||||
"The episode features {agent_summary_string}.\"\"\"\n",
|
||||
"\n",
|
||||
"agent_descriptor_system_message = SystemMessage(\n",
|
||||
" content=\"You can add detail to the description of each person.\")\n",
|
||||
"\n",
|
||||
"def generate_agent_description(agent_name, agent_role, agent_location):\n",
|
||||
" agent_specifier_prompt = [\n",
|
||||
" agent_descriptor_system_message,\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{conversation_description}\n",
|
||||
" Please reply with a creative description of {agent_name}, who is a {agent_role} in {agent_location}, that emphasizes their particular role and location.\n",
|
||||
" Speak directly to {agent_name} in {word_limit} words or less.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" agent_description = ChatOpenAI(temperature=1.0)(agent_specifier_prompt).content\n",
|
||||
" return agent_description\n",
|
||||
"\n",
|
||||
"def generate_agent_header(agent_name, agent_role, agent_location, agent_description):\n",
|
||||
" return f\"\"\"{conversation_description}\n",
|
||||
"\n",
|
||||
"Your name is {agent_name}, your role is {agent_role}, and you are located in {agent_location}.\n",
|
||||
"\n",
|
||||
"Your description is as follows: {agent_description}\n",
|
||||
"\n",
|
||||
"You are discussing the topic: {topic}.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"def generate_agent_system_message(agent_name, agent_header):\n",
|
||||
" return SystemMessage(content=(\n",
|
||||
" f\"\"\"{agent_header}\n",
|
||||
"You will speak in the style of {agent_name}, and exaggerate your personality.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of {agent_name}\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of {agent_name}.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to {word_limit} words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \"\"\"\n",
|
||||
" ))\n",
|
||||
"\n",
|
||||
"agent_descriptions = [generate_agent_description(name, role, location) for name, (role, location) in agent_summaries.items()]\n",
|
||||
"agent_headers = [generate_agent_header(name, role, location, description) for (name, (role, location)), description in zip(agent_summaries.items(), agent_descriptions)]\n",
|
||||
"agent_system_messages = [generate_agent_system_message(name, header) for name, header in zip(agent_summaries, agent_headers)]\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Jon Stewart Description:\n",
|
||||
"\n",
|
||||
"Jon Stewart, the sharp-tongued and quick-witted host of the Daily Show, holding it down in the hustle and bustle of New York City. Ready to deliver the news with a comedic twist, while keeping it real in the city that never sleeps.\n",
|
||||
"\n",
|
||||
"Header:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Jon Stewart, your role is Host of the Daily Show, and you are located in New York.\n",
|
||||
"\n",
|
||||
"Your description is as follows: Jon Stewart, the sharp-tongued and quick-witted host of the Daily Show, holding it down in the hustle and bustle of New York City. Ready to deliver the news with a comedic twist, while keeping it real in the city that never sleeps.\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"System Message:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Jon Stewart, your role is Host of the Daily Show, and you are located in New York.\n",
|
||||
"\n",
|
||||
"Your description is as follows: Jon Stewart, the sharp-tongued and quick-witted host of the Daily Show, holding it down in the hustle and bustle of New York City. Ready to deliver the news with a comedic twist, while keeping it real in the city that never sleeps.\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"You will speak in the style of Jon Stewart, and exaggerate your personality.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of Jon Stewart\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of Jon Stewart.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to 50 words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Samantha Bee Description:\n",
|
||||
"\n",
|
||||
"Samantha Bee, your location in Los Angeles as the Hollywood Correspondent gives you a front-row seat to the latest and sometimes outrageous trends in fitness. Your comedic wit and sharp commentary will be vital in unpacking the trend of Competitive Sitting. Let's sit down and discuss.\n",
|
||||
"\n",
|
||||
"Header:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Samantha Bee, your role is Hollywood Correspondent, and you are located in Los Angeles.\n",
|
||||
"\n",
|
||||
"Your description is as follows: Samantha Bee, your location in Los Angeles as the Hollywood Correspondent gives you a front-row seat to the latest and sometimes outrageous trends in fitness. Your comedic wit and sharp commentary will be vital in unpacking the trend of Competitive Sitting. Let's sit down and discuss.\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"System Message:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Samantha Bee, your role is Hollywood Correspondent, and you are located in Los Angeles.\n",
|
||||
"\n",
|
||||
"Your description is as follows: Samantha Bee, your location in Los Angeles as the Hollywood Correspondent gives you a front-row seat to the latest and sometimes outrageous trends in fitness. Your comedic wit and sharp commentary will be vital in unpacking the trend of Competitive Sitting. Let's sit down and discuss.\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"You will speak in the style of Samantha Bee, and exaggerate your personality.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of Samantha Bee\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of Samantha Bee.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to 50 words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Aasif Mandvi Description:\n",
|
||||
"\n",
|
||||
"Aasif Mandvi, the CIA Correspondent in the heart of Washington D.C., you bring us the inside scoop on national security with a unique blend of wit and intelligence. The nation's capital is lucky to have you, Aasif - keep those secrets safe!\n",
|
||||
"\n",
|
||||
"Header:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Aasif Mandvi, your role is CIA Correspondent, and you are located in Washington D.C..\n",
|
||||
"\n",
|
||||
"Your description is as follows: Aasif Mandvi, the CIA Correspondent in the heart of Washington D.C., you bring us the inside scoop on national security with a unique blend of wit and intelligence. The nation's capital is lucky to have you, Aasif - keep those secrets safe!\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"System Message:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Aasif Mandvi, your role is CIA Correspondent, and you are located in Washington D.C..\n",
|
||||
"\n",
|
||||
"Your description is as follows: Aasif Mandvi, the CIA Correspondent in the heart of Washington D.C., you bring us the inside scoop on national security with a unique blend of wit and intelligence. The nation's capital is lucky to have you, Aasif - keep those secrets safe!\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"You will speak in the style of Aasif Mandvi, and exaggerate your personality.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of Aasif Mandvi\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of Aasif Mandvi.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to 50 words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Ronny Chieng Description:\n",
|
||||
"\n",
|
||||
"Ronny Chieng, you're the Average American Correspondent in Cleveland, Ohio? Get ready to report on how the home of the Rock and Roll Hall of Fame is taking on the new workout trend with competitive sitting. Let's see if this couch potato craze will take root in the Buckeye State.\n",
|
||||
"\n",
|
||||
"Header:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Ronny Chieng, your role is Average American Correspondent, and you are located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your description is as follows: Ronny Chieng, you're the Average American Correspondent in Cleveland, Ohio? Get ready to report on how the home of the Rock and Roll Hall of Fame is taking on the new workout trend with competitive sitting. Let's see if this couch potato craze will take root in the Buckeye State.\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"System Message:\n",
|
||||
"This is a Daily Show episode discussing the following topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"The episode features \n",
|
||||
"- Jon Stewart: Host of the Daily Show, located in New York\n",
|
||||
"- Samantha Bee: Hollywood Correspondent, located in Los Angeles\n",
|
||||
"- Aasif Mandvi: CIA Correspondent, located in Washington D.C.\n",
|
||||
"- Ronny Chieng: Average American Correspondent, located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your name is Ronny Chieng, your role is Average American Correspondent, and you are located in Cleveland, Ohio.\n",
|
||||
"\n",
|
||||
"Your description is as follows: Ronny Chieng, you're the Average American Correspondent in Cleveland, Ohio? Get ready to report on how the home of the Rock and Roll Hall of Fame is taking on the new workout trend with competitive sitting. Let's see if this couch potato craze will take root in the Buckeye State.\n",
|
||||
"\n",
|
||||
"You are discussing the topic: The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze.\n",
|
||||
"\n",
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\n",
|
||||
"You will speak in the style of Ronny Chieng, and exaggerate your personality.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of Ronny Chieng\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of Ronny Chieng.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to 50 words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for name, description, header, system_message in zip(agent_summaries, agent_descriptions, agent_headers, agent_system_messages):\n",
|
||||
" print(f'\\n\\n{name} Description:')\n",
|
||||
" print(f'\\n{description}')\n",
|
||||
" print(f'\\nHeader:\\n{header}')\n",
|
||||
" print(f'\\nSystem Message:\\n{system_message.content}')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use an LLM to create an elaborate on debate topic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Original topic:\n",
|
||||
"The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze\n",
|
||||
"\n",
|
||||
"Detailed topic:\n",
|
||||
"What is driving people to embrace \"competitive sitting\" as the newest fitness trend despite the immense benefits of regular physical exercise?\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_specifier_prompt = [\n",
|
||||
" SystemMessage(content=\"You can make a task more specific.\"),\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{conversation_description}\n",
|
||||
" \n",
|
||||
" Please elaborate on the topic. \n",
|
||||
" Frame the topic as a single question to be answered.\n",
|
||||
" Be creative and imaginative.\n",
|
||||
" Please reply with the specified topic in {word_limit} words or less. \n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content\n",
|
||||
"\n",
|
||||
"print(f\"Original topic:\\n{topic}\\n\")\n",
|
||||
"print(f\"Detailed topic:\\n{specified_topic}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the speaker selection function\n",
|
||||
"Lastly we will define a speaker selection function `select_next_speaker` that takes each agent's bid and selects the agent with the highest bid (with ties broken randomly).\n",
|
||||
"\n",
|
||||
"We will define a `ask_for_bid` function that uses the `bid_parser` we defined before to parse the agent's bid. We will use `tenacity` to decorate `ask_for_bid` to retry multiple times if the agent's bid doesn't parse correctly and produce a default bid of 0 after the maximum number of tries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def select_next_speaker(step: int, agents: List[DialogueAgent], director: DirectorDialogueAgent) -> int:\n",
|
||||
" \"\"\"\n",
|
||||
" If the step is even, then select the director\n",
|
||||
" Otherwise, the director selects the next speaker.\n",
|
||||
" \"\"\" \n",
|
||||
" # the director speaks on odd steps\n",
|
||||
" if step % 2 == 1:\n",
|
||||
" idx = 0\n",
|
||||
" else:\n",
|
||||
" # here the director chooses the next speaker\n",
|
||||
" idx = director.select_next_speaker() + 1 # +1 because we excluded the director\n",
|
||||
" return idx"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Main Loop"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"director = DirectorDialogueAgent(\n",
|
||||
" name=director_name, \n",
|
||||
" system_message=agent_system_messages[0],\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" speakers=[name for name in agent_summaries if name != director_name],\n",
|
||||
" stopping_probability=0.2\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agents = [director]\n",
|
||||
"for name, system_message in zip(list(agent_summaries.keys())[1:], agent_system_messages[1:]): \n",
|
||||
" agents.append(DialogueAgent(\n",
|
||||
" name=name,\n",
|
||||
" system_message=system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" ))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"(Audience member): What is driving people to embrace \"competitive sitting\" as the newest fitness trend despite the immense benefits of regular physical exercise?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\tStop? False\n",
|
||||
"\n",
|
||||
"\tNext speaker: Samantha Bee\n",
|
||||
"\n",
|
||||
"(Jon Stewart): Well, I think it's safe to say that laziness has officially become the new fitness craze. I mean, who needs to break a sweat when you can just sit your way to victory? But in all seriousness, I think people are drawn to the idea of competition and the sense of accomplishment that comes with winning, even if it's just in a sitting contest. Plus, let's be real, sitting is something we all excel at. Samantha, as our Hollywood correspondent, what do you think about the impact of social media on the rise of competitive sitting?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Samantha Bee): Oh, Jon, you know I love a good social media trend. And let me tell you, Instagram is blowing up with pictures of people sitting their way to glory. It's like the ultimate humble brag. \"Oh, just won my third sitting competition this week, no big deal.\" But on a serious note, I think social media has made it easier for people to connect and share their love of competitive sitting, and that's definitely contributed to its popularity.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\tStop? False\n",
|
||||
"\n",
|
||||
"\tNext speaker: Ronny Chieng\n",
|
||||
"\n",
|
||||
"(Jon Stewart): It's interesting to see how our society's definition of \"fitness\" has evolved. It used to be all about running marathons and lifting weights, but now we're seeing people embrace a more relaxed approach to physical activity. Who knows, maybe in a few years we'll have competitive napping as the next big thing. *leans back in chair* I could definitely get behind that. Ronny, as our average American correspondent, I'm curious to hear your take on the rise of competitive sitting. Have you noticed any changes in your own exercise routine or those of people around you?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Ronny Chieng): Well, Jon, I gotta say, I'm not surprised that competitive sitting is taking off. I mean, have you seen the size of the chairs these days? They're practically begging us to sit in them all day. And as for exercise routines, let's just say I've never been one for the gym. But I can definitely see the appeal of sitting competitions. It's like a sport for the rest of us. Plus, I think it's a great way to bond with friends and family. Who needs a game of catch when you can have a sit-off?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\tStop? False\n",
|
||||
"\n",
|
||||
"\tNext speaker: Aasif Mandvi\n",
|
||||
"\n",
|
||||
"(Jon Stewart): It's interesting to see how our society's definition of \"fitness\" has evolved. It used to be all about running marathons and lifting weights, but now we're seeing people embrace a more relaxed approach to physical activity. Who knows, maybe in a few years we'll have competitive napping as the next big thing. *leans back in chair* I could definitely get behind that. Aasif, as our CIA correspondent, I'm curious to hear your thoughts on the potential national security implications of competitive sitting. Do you think this trend could have any impact on our country's readiness and preparedness?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Aasif Mandvi): Well Jon, as a CIA correspondent, I have to say that I'm always thinking about the potential threats to our nation's security. And while competitive sitting may seem harmless, there could be some unforeseen consequences. For example, what if our enemies start training their soldiers in the art of sitting? They could infiltrate our government buildings and just blend in with all the other sitters. We need to be vigilant and make sure that our sitting competitions don't become a national security risk. *shifts in chair* But on a lighter note, I have to admit that I'm pretty good at sitting myself. Maybe I should start training for the next competition.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\tStop? False\n",
|
||||
"\n",
|
||||
"\tNext speaker: Ronny Chieng\n",
|
||||
"\n",
|
||||
"(Jon Stewart): Well, it's clear that competitive sitting has sparked some interesting discussions and perspectives. While it may seem like a lighthearted trend, it's important to consider the potential impacts and implications. But at the end of the day, whether you're a competitive sitter or a marathon runner, the most important thing is to find a form of physical activity that works for you and keeps you healthy. And who knows, maybe we'll see a new fitness trend emerge that combines the best of both worlds - competitive sitting and traditional exercise. *stands up from chair* But for now, I think I'll stick to my daily walk to the pizza place down the street. Ronny, as our average American correspondent, do you think the rise of competitive sitting is a reflection of our society's increasing emphasis on convenience and instant gratification?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Ronny Chieng): Absolutely, Jon. We live in a world where everything is at our fingertips, and we expect things to be easy and convenient. So it's no surprise that people are drawn to a fitness trend that requires minimal effort and can be done from the comfort of their own homes. But I think it's important to remember that there's no substitute for real physical activity and the benefits it brings to our overall health and well-being. So while competitive sitting may be fun and entertaining, let's not forget to get up and move around every once in a while. *stands up from chair and stretches*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\tStop? False\n",
|
||||
"\n",
|
||||
"\tNext speaker: Samantha Bee\n",
|
||||
"\n",
|
||||
"(Jon Stewart): It's clear that competitive sitting has sparked some interesting discussions and perspectives. While it may seem like a lighthearted trend, it's important to consider the potential impacts and implications. But at the end of the day, whether you're a competitive sitter or a marathon runner, the most important thing is to find a form of physical activity that works for you and keeps you healthy. That's a great point, Ronny. Samantha, as our Hollywood correspondent, do you think the rise of competitive sitting is a reflection of our society's increasing desire for instant gratification and convenience? Or is there something deeper at play here?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Samantha Bee): Oh, Jon, you know I love a good conspiracy theory. And let me tell you, I think there's something more sinister at play here. I mean, think about it - what if the government is behind this whole competitive sitting trend? They want us to be lazy and complacent so we don't question their actions. It's like the ultimate mind control. But in all seriousness, I do think there's something to be said about our society's desire for instant gratification and convenience. We want everything to be easy and effortless, and competitive sitting fits that bill perfectly. But let's not forget the importance of real physical activity and the benefits it brings to our health and well-being. *stands up from chair and does a few stretches*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\tStop? True\n",
|
||||
"\n",
|
||||
"(Jon Stewart): Well, it's clear that competitive sitting has sparked some interesting discussions and perspectives. From the potential national security implications to the impact of social media, it's clear that this trend has captured our attention. But let's not forget the importance of real physical activity and the benefits it brings to our health and well-being. Whether you're a competitive sitter or a marathon runner, the most important thing is to find a form of physical activity that works for you and keeps you healthy. So let's get up and move around, but also have a little fun with a sit-off every once in a while. Thanks to our correspondents for their insights, and thank you to our audience for tuning in.\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"simulator = DialogueSimulator(\n",
|
||||
" agents=agents,\n",
|
||||
" selection_function=functools.partial(select_next_speaker, director=director)\n",
|
||||
")\n",
|
||||
"simulator.reset()\n",
|
||||
"simulator.inject('Audience member', specified_topic)\n",
|
||||
"print(f\"(Audience member): {specified_topic}\")\n",
|
||||
"print('\\n')\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" name, message = simulator.step()\n",
|
||||
" print(f\"({name}): {message}\")\n",
|
||||
" print('\\n')\n",
|
||||
" if director.stop:\n",
|
||||
" break\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,823 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Multi-agent decentralized speaker selection\n",
|
||||
"\n",
|
||||
"This notebook showcases how to implement a multi-agent simulation without a fixed schedule for who speaks when. Instead the agents decide for themselves who speaks. We can implement this by having each agent bid to speak. Whichever agent's bid is the highest gets to speak.\n",
|
||||
"\n",
|
||||
"We will show how to do this in the example below that showcases a fictitious presidential debate."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import LangChain related modules "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate\n",
|
||||
"import re\n",
|
||||
"import tenacity\n",
|
||||
"from typing import List, Dict, Callable\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
")\n",
|
||||
"from simulations import DialogueAgent, DialogueSimulator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `DialogueAgent` and `DialogueSimulator` classes\n",
|
||||
"We will use the same `DialogueAgent` and `DialogueSimulator` classes defined in [Multi-Player Dungeons & Dragons](https://python.langchain.com/en/latest/use_cases/agent_simulations/multi_player_dnd.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class DialogueAgent:\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" system_message: SystemMessage,\n",
|
||||
" model: ChatOpenAI,\n",
|
||||
" ) -> None:\n",
|
||||
" self.name = name\n",
|
||||
" self.system_message = system_message\n",
|
||||
" self.model = model\n",
|
||||
" self.message_history = [\"Here is the conversation so far.\"]\n",
|
||||
" self.prefix = f\"{self.name}:\"\n",
|
||||
"\n",
|
||||
" def send(self) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" return message.content\n",
|
||||
"\n",
|
||||
" def receive(self, name: str, message: str) -> None:\n",
|
||||
" \"\"\"\n",
|
||||
" Concatenates {message} spoken by {name} into message history\n",
|
||||
" \"\"\"\n",
|
||||
" self.message_history.append(f\"{name}: {message}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class DialogueSimulator:\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" agents: List[DialogueAgent],\n",
|
||||
" selection_function: Callable[[int, List[DialogueAgent]], int],\n",
|
||||
" ) -> None:\n",
|
||||
" self.agents = agents\n",
|
||||
" self._step = 0\n",
|
||||
" self.select_next_speaker = selection_function\n",
|
||||
"\n",
|
||||
" def reset(self, name: str, message: str):\n",
|
||||
" \"\"\"\n",
|
||||
" Initiates the conversation with a {message} from {name}\n",
|
||||
" \"\"\"\n",
|
||||
" for agent in self.agents:\n",
|
||||
" agent.receive(name, message)\n",
|
||||
"\n",
|
||||
" # increment time\n",
|
||||
" self._step += 1\n",
|
||||
"\n",
|
||||
" def step(self) -> tuple[str, str]:\n",
|
||||
" # 1. choose the next speaker\n",
|
||||
" speaker_idx = self.select_next_speaker(self._step, self.agents)\n",
|
||||
" speaker = self.agents[speaker_idx]\n",
|
||||
"\n",
|
||||
" # 2. next speaker sends message\n",
|
||||
" message = speaker.send()\n",
|
||||
"\n",
|
||||
" # 3. everyone receives message\n",
|
||||
" for receiver in self.agents:\n",
|
||||
" receiver.receive(speaker.name, message)\n",
|
||||
"\n",
|
||||
" # 4. increment time\n",
|
||||
" self._step += 1\n",
|
||||
"\n",
|
||||
" return speaker.name, message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `BiddingDialogueAgent` class\n",
|
||||
"We define a subclass of `DialogueAgent` that has a `bid()` method that produces a bid given the message history and the most recent message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class BiddingDialogueAgent(DialogueAgent):\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" name,\n",
|
||||
" system_message: SystemMessage,\n",
|
||||
" bidding_template: PromptTemplate,\n",
|
||||
" model: ChatOpenAI,\n",
|
||||
" ) -> None:\n",
|
||||
" super().__init__(name, system_message, model)\n",
|
||||
" self.bidding_template = bidding_template\n",
|
||||
" \n",
|
||||
" def bid(self) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Asks the chat model to output a bid to speak\n",
|
||||
" \"\"\"\n",
|
||||
" prompt = PromptTemplate(\n",
|
||||
" input_variables=['message_history', 'recent_message'],\n",
|
||||
" template = self.bidding_template\n",
|
||||
" ).format(\n",
|
||||
" message_history='\\n'.join(self.message_history),\n",
|
||||
" recent_message=self.message_history[-1])\n",
|
||||
" bid_string = self.model([SystemMessage(content=prompt)]).content\n",
|
||||
" return bid_string\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define participants and debate topic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"character_names = [\"Donald Trump\", \"Kanye West\", \"Elizabeth Warren\"]\n",
|
||||
"topic = \"transcontinental high speed rail\"\n",
|
||||
"word_limit = 50"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate system messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"game_description = f\"\"\"Here is the topic for the presidential debate: {topic}.\n",
|
||||
"The presidential candidates are: {', '.join(character_names)}.\"\"\"\n",
|
||||
"\n",
|
||||
"player_descriptor_system_message = SystemMessage(\n",
|
||||
" content=\"You can add detail to the description of each presidential candidate.\")\n",
|
||||
"\n",
|
||||
"def generate_character_description(character_name):\n",
|
||||
" character_specifier_prompt = [\n",
|
||||
" player_descriptor_system_message,\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. \n",
|
||||
" Speak directly to {character_name}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" character_description = ChatOpenAI(temperature=1.0)(character_specifier_prompt).content\n",
|
||||
" return character_description\n",
|
||||
"\n",
|
||||
"def generate_character_header(character_name, character_description):\n",
|
||||
" return f\"\"\"{game_description}\n",
|
||||
"Your name is {character_name}.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: {character_description}\n",
|
||||
"You are debating the topic: {topic}.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"def generate_character_system_message(character_name, character_header):\n",
|
||||
" return SystemMessage(content=(\n",
|
||||
" f\"\"\"{character_header}\n",
|
||||
"You will speak in the style of {character_name}, and exaggerate their personality.\n",
|
||||
"You will come up with creative ideas related to {topic}.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of {character_name}\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of {character_name}.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to {word_limit} words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \"\"\"\n",
|
||||
" ))\n",
|
||||
"\n",
|
||||
"character_descriptions = [generate_character_description(character_name) for character_name in character_names]\n",
|
||||
"character_headers = [generate_character_header(character_name, character_description) for character_name, character_description in zip(character_names, character_descriptions)]\n",
|
||||
"character_system_messages = [generate_character_system_message(character_name, character_headers) for character_name, character_headers in zip(character_names, character_headers)]\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Donald Trump Description:\n",
|
||||
"\n",
|
||||
"Donald Trump, you exude confidence and a bold personality. You are known for your unpredictability and your desire for greatness. You often speak your mind without reservation, which can be a strength but also a weakness.\n",
|
||||
"\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Donald Trump.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Donald Trump, you exude confidence and a bold personality. You are known for your unpredictability and your desire for greatness. You often speak your mind without reservation, which can be a strength but also a weakness.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Donald Trump.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Donald Trump, you exude confidence and a bold personality. You are known for your unpredictability and your desire for greatness. You often speak your mind without reservation, which can be a strength but also a weakness.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"You will speak in the style of Donald Trump, and exaggerate their personality.\n",
|
||||
"You will come up with creative ideas related to transcontinental high speed rail.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of Donald Trump\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of Donald Trump.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to 50 words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Kanye West Description:\n",
|
||||
"\n",
|
||||
"Kanye West, you are a creative visionary who is unafraid to speak your mind. Your innovative approach to art and music has made you one of the most influential figures of our time. You bring a bold and unconventional perspective to this debate that I look forward to hearing.\n",
|
||||
"\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Kanye West.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Kanye West, you are a creative visionary who is unafraid to speak your mind. Your innovative approach to art and music has made you one of the most influential figures of our time. You bring a bold and unconventional perspective to this debate that I look forward to hearing.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Kanye West.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Kanye West, you are a creative visionary who is unafraid to speak your mind. Your innovative approach to art and music has made you one of the most influential figures of our time. You bring a bold and unconventional perspective to this debate that I look forward to hearing.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"You will speak in the style of Kanye West, and exaggerate their personality.\n",
|
||||
"You will come up with creative ideas related to transcontinental high speed rail.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of Kanye West\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of Kanye West.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to 50 words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Elizabeth Warren Description:\n",
|
||||
"\n",
|
||||
"Elizabeth Warren, you are a fierce advocate for the middle class and a champion of progressive policies. Your tenacity and unwavering dedication to fighting for what you believe in have inspired many. Your policies are guided by a deep sense of empathy and a desire to help those who are most in need.\n",
|
||||
"\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Elizabeth Warren.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Elizabeth Warren, you are a fierce advocate for the middle class and a champion of progressive policies. Your tenacity and unwavering dedication to fighting for what you believe in have inspired many. Your policies are guided by a deep sense of empathy and a desire to help those who are most in need.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Elizabeth Warren.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Elizabeth Warren, you are a fierce advocate for the middle class and a champion of progressive policies. Your tenacity and unwavering dedication to fighting for what you believe in have inspired many. Your policies are guided by a deep sense of empathy and a desire to help those who are most in need.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"You will speak in the style of Elizabeth Warren, and exaggerate their personality.\n",
|
||||
"You will come up with creative ideas related to transcontinental high speed rail.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of Elizabeth Warren\n",
|
||||
"For describing your own body movements, wrap your description in '*'.\n",
|
||||
"Do not change roles!\n",
|
||||
"Do not speak from the perspective of anyone else.\n",
|
||||
"Speak only from the perspective of Elizabeth Warren.\n",
|
||||
"Stop speaking the moment you finish speaking from your perspective.\n",
|
||||
"Never forget to keep your response to 50 words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for character_name, character_description, character_header, character_system_message in zip(character_names, character_descriptions, character_headers, character_system_messages):\n",
|
||||
" print(f'\\n\\n{character_name} Description:')\n",
|
||||
" print(f'\\n{character_description}')\n",
|
||||
" print(f'\\n{character_header}')\n",
|
||||
" print(f'\\n{character_system_message.content}')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output parser for bids\n",
|
||||
"We ask the agents to output a bid to speak. But since the agents are LLMs that output strings, we need to \n",
|
||||
"1. define a format they will produce their outputs in\n",
|
||||
"2. parse their outputs\n",
|
||||
"\n",
|
||||
"We can subclass the [RegexParser](https://github.com/hwchase17/langchain/blob/master/langchain/output_parsers/regex.py) to implement our own custom output parser for bids."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class BidOutputParser(RegexParser):\n",
|
||||
" def get_format_instructions(self) -> str:\n",
|
||||
" return 'Your response should be an integer delimited by angled brackets, like this: <int>.' \n",
|
||||
" \n",
|
||||
"bid_parser = BidOutputParser(\n",
|
||||
" regex=r'<(\\d+)>', \n",
|
||||
" output_keys=['bid'],\n",
|
||||
" default_output_key='bid')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate bidding system message\n",
|
||||
"This is inspired by the prompt used in [Generative Agents](https://arxiv.org/pdf/2304.03442.pdf) for using an LLM to determine the importance of memories. This will use the formatting instructions from our `BidOutputParser`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def generate_character_bidding_template(character_header):\n",
|
||||
" bidding_template = (\n",
|
||||
" f\"\"\"{character_header}\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{{message_history}}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"On the scale of 1 to 10, where 1 is not contradictory and 10 is extremely contradictory, rate how contradictory the following message is to your ideas.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{{recent_message}}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"{bid_parser.get_format_instructions()}\n",
|
||||
"Do nothing else.\n",
|
||||
" \"\"\")\n",
|
||||
" return bidding_template\n",
|
||||
"\n",
|
||||
"character_bidding_templates = [generate_character_bidding_template(character_header) for character_header in character_headers]\n",
|
||||
" \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Donald Trump Bidding Template:\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Donald Trump.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Donald Trump, you exude confidence and a bold personality. You are known for your unpredictability and your desire for greatness. You often speak your mind without reservation, which can be a strength but also a weakness.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{message_history}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"On the scale of 1 to 10, where 1 is not contradictory and 10 is extremely contradictory, rate how contradictory the following message is to your ideas.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{recent_message}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Your response should be an integer delimited by angled brackets, like this: <int>.\n",
|
||||
"Do nothing else.\n",
|
||||
" \n",
|
||||
"Kanye West Bidding Template:\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Kanye West.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Kanye West, you are a creative visionary who is unafraid to speak your mind. Your innovative approach to art and music has made you one of the most influential figures of our time. You bring a bold and unconventional perspective to this debate that I look forward to hearing.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{message_history}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"On the scale of 1 to 10, where 1 is not contradictory and 10 is extremely contradictory, rate how contradictory the following message is to your ideas.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{recent_message}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Your response should be an integer delimited by angled brackets, like this: <int>.\n",
|
||||
"Do nothing else.\n",
|
||||
" \n",
|
||||
"Elizabeth Warren Bidding Template:\n",
|
||||
"Here is the topic for the presidential debate: transcontinental high speed rail.\n",
|
||||
"The presidential candidates are: Donald Trump, Kanye West, Elizabeth Warren.\n",
|
||||
"Your name is Elizabeth Warren.\n",
|
||||
"You are a presidential candidate.\n",
|
||||
"Your description is as follows: Elizabeth Warren, you are a fierce advocate for the middle class and a champion of progressive policies. Your tenacity and unwavering dedication to fighting for what you believe in have inspired many. Your policies are guided by a deep sense of empathy and a desire to help those who are most in need.\n",
|
||||
"You are debating the topic: transcontinental high speed rail.\n",
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{message_history}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"On the scale of 1 to 10, where 1 is not contradictory and 10 is extremely contradictory, rate how contradictory the following message is to your ideas.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{recent_message}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Your response should be an integer delimited by angled brackets, like this: <int>.\n",
|
||||
"Do nothing else.\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for character_name, bidding_template in zip(character_names, character_bidding_templates):\n",
|
||||
" print(f'{character_name} Bidding Template:')\n",
|
||||
" print(bidding_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use an LLM to create an elaborate on debate topic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Original topic:\n",
|
||||
"transcontinental high speed rail\n",
|
||||
"\n",
|
||||
"Detailed topic:\n",
|
||||
"Candidates, with the rise of autonomous technologies, we must address the problem of how to integrate them into our proposed transcontinental high speed rail system. Outline your plan on how to safely integrate autonomous vehicles into rail travel, balancing the need for innovation and safety.\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_specifier_prompt = [\n",
|
||||
" SystemMessage(content=\"You can make a task more specific.\"),\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" \n",
|
||||
" You are the debate moderator.\n",
|
||||
" Please make the debate topic more specific. \n",
|
||||
" Frame the debate topic as a problem to be solved.\n",
|
||||
" Be creative and imaginative.\n",
|
||||
" Please reply with the specified topic in {word_limit} words or less. \n",
|
||||
" Speak directly to the presidential candidates: {*character_names,}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content\n",
|
||||
"\n",
|
||||
"print(f\"Original topic:\\n{topic}\\n\")\n",
|
||||
"print(f\"Detailed topic:\\n{specified_topic}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the speaker selection function\n",
|
||||
"Lastly we will define a speaker selection function `select_next_speaker` that takes each agent's bid and selects the agent with the highest bid (with ties broken randomly).\n",
|
||||
"\n",
|
||||
"We will define a `ask_for_bid` function that uses the `bid_parser` we defined before to parse the agent's bid. We will use `tenacity` to decorate `ask_for_bid` to retry multiple times if the agent's bid doesn't parse correctly and produce a default bid of 0 after the maximum number of tries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tenacity.retry(stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"),\n",
|
||||
" retry_error_callback=lambda retry_state: 0) # Default value when all retries are exhausted\n",
|
||||
"def ask_for_bid(agent) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Ask for agent bid and parses the bid into the correct format.\n",
|
||||
" \"\"\"\n",
|
||||
" bid_string = agent.bid()\n",
|
||||
" bid = int(bid_parser.parse(bid_string)['bid'])\n",
|
||||
" return bid"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:\n",
|
||||
" bids = []\n",
|
||||
" for agent in agents:\n",
|
||||
" bid = ask_for_bid(agent)\n",
|
||||
" bids.append(bid)\n",
|
||||
" \n",
|
||||
" # randomly select among multiple agents with the same bid\n",
|
||||
" max_value = np.max(bids)\n",
|
||||
" max_indices = np.where(bids == max_value)[0]\n",
|
||||
" idx = np.random.choice(max_indices)\n",
|
||||
" \n",
|
||||
" print('Bids:')\n",
|
||||
" for i, (bid, agent) in enumerate(zip(bids, agents)):\n",
|
||||
" print(f'\\t{agent.name} bid: {bid}')\n",
|
||||
" if i == idx:\n",
|
||||
" selected_name = agent.name\n",
|
||||
" print(f'Selected: {selected_name}')\n",
|
||||
" print('\\n')\n",
|
||||
" return idx"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Main Loop"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"characters = []\n",
|
||||
"for character_name, character_system_message, bidding_template in zip(character_names, character_system_messages, character_bidding_templates):\n",
|
||||
" characters.append(BiddingDialogueAgent(\n",
|
||||
" name=character_name,\n",
|
||||
" system_message=character_system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" bidding_template=bidding_template,\n",
|
||||
" ))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"(Debate Moderator): Candidates, with the rise of autonomous technologies, we must address the problem of how to integrate them into our proposed transcontinental high speed rail system. Outline your plan on how to safely integrate autonomous vehicles into rail travel, balancing the need for innovation and safety.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 8\n",
|
||||
"\tKanye West bid: 2\n",
|
||||
"\tElizabeth Warren bid: 1\n",
|
||||
"Selected: Donald Trump\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Donald Trump): Let me tell you, folks, I have the best plan for integrating autonomous vehicles into our high speed rail system. We're going to use the latest technology, the best technology, to ensure safety and efficiency. And let me tell you, we're going to do it in style. We're going to have luxury autonomous cars that will make you feel like you're in a private jet. It's going to be tremendous, believe me. *gestures with hands*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 8\n",
|
||||
"\tKanye West bid: 7\n",
|
||||
"\tElizabeth Warren bid: 10\n",
|
||||
"Selected: Elizabeth Warren\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Elizabeth Warren): Thank you for the question. As someone who has always fought for the safety and well-being of the American people, I believe that any plan for integrating autonomous vehicles into our high speed rail system must prioritize safety above all else. We need to ensure that these vehicles are thoroughly tested and meet strict safety standards before they are allowed on our rails. Additionally, we must invest in the necessary infrastructure to support these vehicles, such as advanced sensors and communication systems. But we must also ensure that these innovations are accessible to all Americans, not just the wealthy. That's why I propose a public-private partnership to fund and build this system, with a focus on creating good-paying jobs and expanding economic opportunities for all Americans. *smiles confidently*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 8\n",
|
||||
"\tKanye West bid: 2\n",
|
||||
"\tElizabeth Warren bid: 1\n",
|
||||
"Selected: Donald Trump\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Donald Trump): Let me tell you, Elizabeth, safety is important, but we also need to think about innovation and progress. We can't let fear hold us back from achieving greatness. That's why I propose a competition, a race to see which company can create the safest and most efficient autonomous vehicles for our high speed rail system. And let me tell you, the winner will receive a huge government contract and be hailed as a hero. It's going to be tremendous, folks. *points finger*\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 3\n",
|
||||
"\tKanye West bid: 8\n",
|
||||
"\tElizabeth Warren bid: 8\n",
|
||||
"Selected: Kanye West\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Kanye West): Yo, yo, yo, let me jump in here. First of all, I gotta say, I love innovation and progress. But we can't forget about the people, man. We need to make sure that this high speed rail system is accessible to everyone, not just the wealthy. And that means we need to invest in public transportation, not just luxury autonomous cars. We need to make sure that people can get from point A to point B safely and efficiently, without breaking the bank. And let me tell you, we can do it in style too. We can have art installations and live performances on the trains, making it a cultural experience. *starts nodding head*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 7\n",
|
||||
"\tKanye West bid: 2\n",
|
||||
"\tElizabeth Warren bid: 1\n",
|
||||
"Selected: Donald Trump\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Donald Trump): Kanye, I hear what you're saying, but let's not forget about the importance of luxury and comfort. We need to make sure that our high speed rail system is not only accessible, but also enjoyable. That's why I propose that we have different tiers of service, from economy to first class, so that everyone can choose the level of luxury they want. And let me tell you, the first class experience will be something else. We're talking about gourmet meals, personal attendants, and even spa services. It's going to be tremendous, folks. *smirks confidently*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 2\n",
|
||||
"\tKanye West bid: 8\n",
|
||||
"\tElizabeth Warren bid: 10\n",
|
||||
"Selected: Elizabeth Warren\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Elizabeth Warren): I agree with Kanye that we need to prioritize accessibility and affordability for all Americans. But we also need to think about the environmental impact of this system. That's why I propose that we invest in renewable energy sources to power our high speed rail system, such as solar and wind power. We can also use this opportunity to create green jobs and reduce our carbon footprint. And let's not forget about the importance of public input and transparency in this process. We need to engage with communities and listen to their concerns and ideas. *raises hand in emphasis*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 8\n",
|
||||
"\tKanye West bid: 1\n",
|
||||
"\tElizabeth Warren bid: 1\n",
|
||||
"Selected: Donald Trump\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Donald Trump): Let me tell you, Elizabeth, I agree that we need to think about the environment, but we also need to think about the economy. That's why I propose that we use American-made materials and labor to build this high speed rail system. We're going to create jobs and boost our economy, all while creating a world-class transportation system. And let me tell you, it's going to be beautiful. We're going to have the best trains, the best tracks, and the best stations. It's going to be tremendous, folks. *smiles confidently*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 2\n",
|
||||
"\tKanye West bid: 7\n",
|
||||
"\tElizabeth Warren bid: 8\n",
|
||||
"Selected: Elizabeth Warren\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Elizabeth Warren): Thank you, but let's not forget that we also need to prioritize safety and reliability. We can't cut corners or sacrifice quality for the sake of speed or cost. That's why I propose that we have rigorous safety and maintenance standards, with regular inspections and repairs. And we need to invest in training and support for our rail workers, so that they can operate and maintain this system with the highest level of expertise and care. *firmly nods head*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 2\n",
|
||||
"\tKanye West bid: 1\n",
|
||||
"\tElizabeth Warren bid: 1\n",
|
||||
"Selected: Donald Trump\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Donald Trump): Let me tell you, Elizabeth, safety is important, but we also need to think about efficiency and speed. That's why I propose that we use the latest technology, such as artificial intelligence and machine learning, to monitor and maintain our high speed rail system. We can detect and fix any issues before they become a problem, ensuring that our trains run smoothly and on time. And let me tell you, we're going to be the envy of the world with this system. It's going to be tremendous, folks. *smirks confidently*\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Bids:\n",
|
||||
"\tDonald Trump bid: 2\n",
|
||||
"\tKanye West bid: 8\n",
|
||||
"\tElizabeth Warren bid: 8\n",
|
||||
"Selected: Kanye West\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(Kanye West): Yo, yo, yo, let me jump in here again. I hear what both of y'all are saying, but let's not forget about the culture, man. We need to make sure that this high speed rail system reflects the diversity and creativity of our country. That means we need to have art installations, live performances, and even fashion shows on the trains. We can showcase the best of American culture and inspire people from all over the world. And let me tell you, it's going to be a vibe. *starts swaying to the beat*\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"max_iters = 10\n",
|
||||
"n = 0\n",
|
||||
"\n",
|
||||
"simulator = DialogueSimulator(\n",
|
||||
" agents=characters,\n",
|
||||
" selection_function=select_next_speaker\n",
|
||||
")\n",
|
||||
"simulator.reset('Debate Moderator', specified_topic)\n",
|
||||
"print(f\"(Debate Moderator): {specified_topic}\")\n",
|
||||
"print('\\n')\n",
|
||||
"\n",
|
||||
"while n < max_iters:\n",
|
||||
" name, message = simulator.step()\n",
|
||||
" print(f\"({name}): {message}\")\n",
|
||||
" print('\\n')\n",
|
||||
" n += 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -410,7 +410,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -219,7 +219,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import BaseTool, DuckDuckGoSearchRun\n",
|
||||
"from langchain.tools import BaseTool, DuckDuckGoSearchTool\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"from pydantic import Field\n",
|
||||
@@ -321,7 +321,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install duckduckgo_search\n",
|
||||
"web_search = DuckDuckGoSearchRun()"
|
||||
"web_search = DuckDuckGoSearchTool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -618,7 +618,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.16"
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -5,6 +5,11 @@ from typing import Optional
|
||||
|
||||
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
|
||||
from langchain.cache import BaseCache
|
||||
from langchain.callbacks import (
|
||||
set_default_callback_manager,
|
||||
set_handler,
|
||||
set_tracing_callback_manager,
|
||||
)
|
||||
from langchain.chains import (
|
||||
ConversationChain,
|
||||
LLMBashChain,
|
||||
@@ -30,7 +35,6 @@ from langchain.llms import (
|
||||
Modal,
|
||||
OpenAI,
|
||||
Petals,
|
||||
PipelineAI,
|
||||
SagemakerEndpoint,
|
||||
StochasticAI,
|
||||
Writer,
|
||||
@@ -43,7 +47,7 @@ from langchain.prompts import (
|
||||
PromptTemplate,
|
||||
)
|
||||
from langchain.sql_database import SQLDatabase
|
||||
from langchain.utilities.arxiv import ArxivAPIWrapper
|
||||
from langchain.utilities import ArxivAPIWrapper
|
||||
from langchain.utilities.google_search import GoogleSearchAPIWrapper
|
||||
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
from langchain.utilities.powerbi import PowerBIDataset
|
||||
@@ -62,6 +66,7 @@ del metadata # optional, avoids polluting the results of dir(__package__)
|
||||
|
||||
verbose: bool = False
|
||||
llm_cache: Optional[BaseCache] = None
|
||||
set_default_callback_manager()
|
||||
|
||||
# For backwards compatibility
|
||||
SerpAPIChain = SerpAPIWrapper
|
||||
@@ -89,7 +94,6 @@ __all__ = [
|
||||
"Modal",
|
||||
"OpenAI",
|
||||
"Petals",
|
||||
"PipelineAI",
|
||||
"StochasticAI",
|
||||
"Writer",
|
||||
"BasePromptTemplate",
|
||||
@@ -113,5 +117,7 @@ __all__ = [
|
||||
"VectorDBQAWithSourcesChain",
|
||||
"QAWithSourcesChain",
|
||||
"PALChain",
|
||||
"set_handler",
|
||||
"set_tracing_callback_manager",
|
||||
"LlamaCpp",
|
||||
]
|
||||
|
||||
@@ -13,13 +13,7 @@ import yaml
|
||||
from pydantic import BaseModel, root_validator
|
||||
|
||||
from langchain.agents.tools import InvalidTool
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManagerForChainRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.input import get_color_mapping
|
||||
@@ -29,6 +23,7 @@ from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import (
|
||||
AgentAction,
|
||||
AgentFinish,
|
||||
BaseLanguageModel,
|
||||
BaseMessage,
|
||||
BaseOutputParser,
|
||||
)
|
||||
@@ -51,17 +46,13 @@ class BaseSingleActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
def plan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -70,17 +61,13 @@ class BaseSingleActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
async def aplan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -183,17 +170,13 @@ class BaseMultiActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
def plan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[List[AgentAction], AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -202,17 +185,13 @@ class BaseMultiActionAgent(BaseModel):
|
||||
|
||||
@abstractmethod
|
||||
async def aplan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[List[AgentAction], AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
@@ -306,52 +285,38 @@ class LLMSingleActionAgent(BaseSingleActionAgent):
|
||||
return list(set(self.llm_chain.input_keys) - {"intermediate_steps"})
|
||||
|
||||
def plan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
output = self.llm_chain.run(
|
||||
intermediate_steps=intermediate_steps,
|
||||
stop=self.stop,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
intermediate_steps=intermediate_steps, stop=self.stop, **kwargs
|
||||
)
|
||||
return self.output_parser.parse(output)
|
||||
|
||||
async def aplan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
output = await self.llm_chain.arun(
|
||||
intermediate_steps=intermediate_steps,
|
||||
stop=self.stop,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
intermediate_steps=intermediate_steps, stop=self.stop, **kwargs
|
||||
)
|
||||
return self.output_parser.parse(output)
|
||||
|
||||
@@ -403,45 +368,37 @@ class Agent(BaseSingleActionAgent):
|
||||
return thoughts
|
||||
|
||||
def plan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
|
||||
full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
|
||||
full_output = self.llm_chain.predict(**full_inputs)
|
||||
return self.output_parser.parse(full_output)
|
||||
|
||||
async def aplan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
|
||||
full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs)
|
||||
full_output = await self.llm_chain.apredict(**full_inputs)
|
||||
return self.output_parser.parse(full_output)
|
||||
|
||||
def get_full_inputs(
|
||||
@@ -497,11 +454,7 @@ class Agent(BaseSingleActionAgent):
|
||||
@classmethod
|
||||
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
|
||||
"""Validate that appropriate tools are passed in."""
|
||||
for tool in tools:
|
||||
if not tool.is_single_input:
|
||||
raise ValueError(
|
||||
f"{cls.__name__} does not support multi-input tool {tool.name}."
|
||||
)
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
@@ -679,27 +632,24 @@ class AgentExecutor(Chain):
|
||||
|
||||
return True
|
||||
|
||||
def _return(
|
||||
self,
|
||||
output: AgentFinish,
|
||||
intermediate_steps: list,
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
if run_manager:
|
||||
run_manager.on_agent_finish(output, color="green", verbose=self.verbose)
|
||||
def _return(self, output: AgentFinish, intermediate_steps: list) -> Dict[str, Any]:
|
||||
self.callback_manager.on_agent_finish(
|
||||
output, color="green", verbose=self.verbose
|
||||
)
|
||||
final_output = output.return_values
|
||||
if self.return_intermediate_steps:
|
||||
final_output["intermediate_steps"] = intermediate_steps
|
||||
return final_output
|
||||
|
||||
async def _areturn(
|
||||
self,
|
||||
output: AgentFinish,
|
||||
intermediate_steps: list,
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
self, output: AgentFinish, intermediate_steps: list
|
||||
) -> Dict[str, Any]:
|
||||
if run_manager:
|
||||
await run_manager.on_agent_finish(
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_agent_finish(
|
||||
output, color="green", verbose=self.verbose
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_agent_finish(
|
||||
output, color="green", verbose=self.verbose
|
||||
)
|
||||
final_output = output.return_values
|
||||
@@ -713,18 +663,13 @@ class AgentExecutor(Chain):
|
||||
color_mapping: Dict[str, str],
|
||||
inputs: Dict[str, str],
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
|
||||
"""Take a single step in the thought-action-observation loop.
|
||||
|
||||
Override this to take control of how the agent makes and acts on choices.
|
||||
"""
|
||||
# Call the LLM to see what to do.
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
output = self.agent.plan(intermediate_steps, **inputs)
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
@@ -735,8 +680,9 @@ class AgentExecutor(Chain):
|
||||
actions = output
|
||||
result = []
|
||||
for agent_action in actions:
|
||||
if run_manager:
|
||||
run_manager.on_agent_action(agent_action, color="green")
|
||||
self.callback_manager.on_agent_action(
|
||||
agent_action, verbose=self.verbose, color="green"
|
||||
)
|
||||
# Otherwise we lookup the tool
|
||||
if agent_action.tool in name_to_tool_map:
|
||||
tool = name_to_tool_map[agent_action.tool]
|
||||
@@ -750,7 +696,6 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool_input,
|
||||
verbose=self.verbose,
|
||||
color=color,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
else:
|
||||
@@ -759,7 +704,6 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool,
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
result.append((agent_action, observation))
|
||||
@@ -771,18 +715,13 @@ class AgentExecutor(Chain):
|
||||
color_mapping: Dict[str, str],
|
||||
inputs: Dict[str, str],
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
|
||||
"""Take a single step in the thought-action-observation loop.
|
||||
|
||||
Override this to take control of how the agent makes and acts on choices.
|
||||
"""
|
||||
# Call the LLM to see what to do.
|
||||
output = await self.agent.aplan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
output = await self.agent.aplan(intermediate_steps, **inputs)
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
@@ -795,8 +734,12 @@ class AgentExecutor(Chain):
|
||||
async def _aperform_agent_action(
|
||||
agent_action: AgentAction,
|
||||
) -> Tuple[AgentAction, str]:
|
||||
if run_manager:
|
||||
await run_manager.on_agent_action(
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_agent_action(
|
||||
agent_action, verbose=self.verbose, color="green"
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_agent_action(
|
||||
agent_action, verbose=self.verbose, color="green"
|
||||
)
|
||||
# Otherwise we lookup the tool
|
||||
@@ -812,7 +755,6 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool_input,
|
||||
verbose=self.verbose,
|
||||
color=color,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
else:
|
||||
@@ -821,7 +763,6 @@ class AgentExecutor(Chain):
|
||||
agent_action.tool,
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
return agent_action, observation
|
||||
@@ -833,11 +774,7 @@ class AgentExecutor(Chain):
|
||||
|
||||
return list(result)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
"""Run text through and get agent response."""
|
||||
# Construct a mapping of tool name to tool for easy lookup
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
@@ -853,16 +790,10 @@ class AgentExecutor(Chain):
|
||||
# We now enter the agent loop (until it returns something).
|
||||
while self._should_continue(iterations, time_elapsed):
|
||||
next_step_output = self._take_next_step(
|
||||
name_to_tool_map,
|
||||
color_mapping,
|
||||
inputs,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
name_to_tool_map, color_mapping, inputs, intermediate_steps
|
||||
)
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
return self._return(
|
||||
next_step_output, intermediate_steps, run_manager=run_manager
|
||||
)
|
||||
return self._return(next_step_output, intermediate_steps)
|
||||
|
||||
intermediate_steps.extend(next_step_output)
|
||||
if len(next_step_output) == 1:
|
||||
@@ -878,11 +809,7 @@ class AgentExecutor(Chain):
|
||||
)
|
||||
return self._return(output, intermediate_steps)
|
||||
|
||||
async def _acall(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, str]:
|
||||
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
"""Run text through and get agent response."""
|
||||
# Construct a mapping of tool name to tool for easy lookup
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
@@ -900,11 +827,7 @@ class AgentExecutor(Chain):
|
||||
try:
|
||||
while self._should_continue(iterations, time_elapsed):
|
||||
next_step_output = await self._atake_next_step(
|
||||
name_to_tool_map,
|
||||
color_mapping,
|
||||
inputs,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
name_to_tool_map, color_mapping, inputs, intermediate_steps
|
||||
)
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
return await self._areturn(next_step_output, intermediate_steps)
|
||||
@@ -922,9 +845,7 @@ class AgentExecutor(Chain):
|
||||
output = self.agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
)
|
||||
return await self._areturn(
|
||||
output, intermediate_steps, run_manager=run_manager
|
||||
)
|
||||
return await self._areturn(output, intermediate_steps)
|
||||
except TimeoutError:
|
||||
# stop early when interrupted by the async timeout
|
||||
output = self.agent.return_stopped_response(
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
"""Agent toolkits."""
|
||||
|
||||
from langchain.agents.agent_toolkits.csv.base import create_csv_agent
|
||||
from langchain.agents.agent_toolkits.file_management.toolkit import (
|
||||
FileManagementToolkit,
|
||||
)
|
||||
from langchain.agents.agent_toolkits.jira.toolkit import JiraToolkit
|
||||
from langchain.agents.agent_toolkits.json.base import create_json_agent
|
||||
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
|
||||
@@ -11,7 +8,6 @@ from langchain.agents.agent_toolkits.nla.toolkit import NLAToolkit
|
||||
from langchain.agents.agent_toolkits.openapi.base import create_openapi_agent
|
||||
from langchain.agents.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
||||
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
|
||||
from langchain.agents.agent_toolkits.playwright.toolkit import PlayWrightBrowserToolkit
|
||||
from langchain.agents.agent_toolkits.powerbi.base import create_pbi_agent
|
||||
from langchain.agents.agent_toolkits.powerbi.chat_base import create_pbi_chat_agent
|
||||
from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
@@ -50,6 +46,4 @@ __all__ = [
|
||||
"create_csv_agent",
|
||||
"ZapierToolkit",
|
||||
"JiraToolkit",
|
||||
"FileManagementToolkit",
|
||||
"PlayWrightBrowserToolkit",
|
||||
]
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
"""Local file management toolkit."""
|
||||
|
||||
from langchain.agents.agent_toolkits.file_management.toolkit import (
|
||||
FileManagementToolkit,
|
||||
)
|
||||
|
||||
__all__ = ["FileManagementToolkit"]
|
||||
@@ -1,61 +0,0 @@
|
||||
"""Toolkit for interacting with the local filesystem."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.file_management.copy import CopyFileTool
|
||||
from langchain.tools.file_management.delete import DeleteFileTool
|
||||
from langchain.tools.file_management.file_search import FileSearchTool
|
||||
from langchain.tools.file_management.list_dir import ListDirectoryTool
|
||||
from langchain.tools.file_management.move import MoveFileTool
|
||||
from langchain.tools.file_management.read import ReadFileTool
|
||||
from langchain.tools.file_management.write import WriteFileTool
|
||||
|
||||
_FILE_TOOLS = {
|
||||
tool_cls.__fields__["name"].default: tool_cls
|
||||
for tool_cls in [
|
||||
CopyFileTool,
|
||||
DeleteFileTool,
|
||||
FileSearchTool,
|
||||
MoveFileTool,
|
||||
ReadFileTool,
|
||||
WriteFileTool,
|
||||
ListDirectoryTool,
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
class FileManagementToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with a Local Files."""
|
||||
|
||||
root_dir: Optional[str] = None
|
||||
"""If specified, all file operations are made relative to root_dir."""
|
||||
selected_tools: Optional[List[str]] = None
|
||||
"""If provided, only provide the selected tools. Defaults to all."""
|
||||
|
||||
@root_validator
|
||||
def validate_tools(cls, values: dict) -> dict:
|
||||
selected_tools = values.get("selected_tools") or []
|
||||
for tool_name in selected_tools:
|
||||
if tool_name not in _FILE_TOOLS:
|
||||
raise ValueError(
|
||||
f"File Tool of name {tool_name} not supported."
|
||||
f" Permitted tools: {list(_FILE_TOOLS)}"
|
||||
)
|
||||
return values
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
allowed_tools = self.selected_tools or _FILE_TOOLS.keys()
|
||||
tools: List[BaseTool] = []
|
||||
for tool in allowed_tools:
|
||||
tool_cls = _FILE_TOOLS[tool]
|
||||
tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore
|
||||
return tools
|
||||
|
||||
|
||||
__all__ = ["FileManagementToolkit"]
|
||||
@@ -28,13 +28,13 @@ from langchain.agents.agent_toolkits.openapi.planner_prompt import (
|
||||
from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.memory import ReadOnlySharedMemory
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.requests import RequestsWrapper
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.requests.tool import BaseRequestsTool
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ def create_pandas_dataframe_agent(
|
||||
prompt = ZeroShotAgent.create_prompt(
|
||||
tools, prefix=prefix, suffix=suffix, input_variables=input_variables
|
||||
)
|
||||
partial_prompt = prompt.partial(df=str(df.head().to_markdown()))
|
||||
partial_prompt = prompt.partial(df=str(df.head()))
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=partial_prompt,
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
"""Playwright browser toolkit."""
|
||||
from langchain.agents.agent_toolkits.playwright.toolkit import PlayWrightBrowserToolkit
|
||||
|
||||
__all__ = ["PlayWrightBrowserToolkit"]
|
||||
@@ -1,83 +0,0 @@
|
||||
"""Playwright web browser toolkit."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List, Optional, Type, cast
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.playwright.base import (
|
||||
BaseBrowserTool,
|
||||
lazy_import_playwright_browsers,
|
||||
)
|
||||
from langchain.tools.playwright.click import ClickTool
|
||||
from langchain.tools.playwright.current_page import CurrentWebPageTool
|
||||
from langchain.tools.playwright.extract_hyperlinks import ExtractHyperlinksTool
|
||||
from langchain.tools.playwright.extract_text import ExtractTextTool
|
||||
from langchain.tools.playwright.get_elements import GetElementsTool
|
||||
from langchain.tools.playwright.navigate import NavigateTool
|
||||
from langchain.tools.playwright.navigate_back import NavigateBackTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.async_api import Browser as AsyncBrowser
|
||||
from playwright.sync_api import Browser as SyncBrowser
|
||||
else:
|
||||
try:
|
||||
# We do this so pydantic can resolve the types when instantiating
|
||||
from playwright.async_api import Browser as AsyncBrowser
|
||||
from playwright.sync_api import Browser as SyncBrowser
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class PlayWrightBrowserToolkit(BaseToolkit):
|
||||
"""Toolkit for web browser tools."""
|
||||
|
||||
sync_browser: Optional["SyncBrowser"] = None
|
||||
async_browser: Optional["AsyncBrowser"] = None
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@root_validator
|
||||
def validate_imports_and_browser_provided(cls, values: dict) -> dict:
|
||||
"""Check that the arguments are valid."""
|
||||
lazy_import_playwright_browsers()
|
||||
if values.get("async_browser") is None and values.get("sync_browser") is None:
|
||||
raise ValueError("Either async_browser or sync_browser must be specified.")
|
||||
return values
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
tool_classes: List[Type[BaseBrowserTool]] = [
|
||||
ClickTool,
|
||||
NavigateTool,
|
||||
NavigateBackTool,
|
||||
ExtractTextTool,
|
||||
ExtractHyperlinksTool,
|
||||
GetElementsTool,
|
||||
CurrentWebPageTool,
|
||||
]
|
||||
|
||||
tools = [
|
||||
tool_cls.from_browser(
|
||||
sync_browser=self.sync_browser, async_browser=self.async_browser
|
||||
)
|
||||
for tool_cls in tool_classes
|
||||
]
|
||||
return cast(List[BaseTool], tools)
|
||||
|
||||
@classmethod
|
||||
def from_browser(
|
||||
cls,
|
||||
sync_browser: Optional[SyncBrowser] = None,
|
||||
async_browser: Optional[AsyncBrowser] = None,
|
||||
) -> PlayWrightBrowserToolkit:
|
||||
"""Instantiate the toolkit."""
|
||||
# This is to raise a better error than the forward ref ones Pydantic would have
|
||||
lazy_import_playwright_browsers()
|
||||
return cls(sync_browser=sync_browser, async_browser=async_browser)
|
||||
@@ -4,10 +4,10 @@ from typing import List, Optional
|
||||
from pydantic import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.powerbi.prompt import QUESTION_TO_QUERY
|
||||
from langchain.tools.powerbi.tool import (
|
||||
@@ -35,20 +35,24 @@ class PowerBIToolkit(BaseToolkit):
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
if self.callback_manager:
|
||||
chain = LLMChain(
|
||||
llm=self.llm,
|
||||
callback_manager=self.callback_manager,
|
||||
prompt=PromptTemplate(
|
||||
template=QUESTION_TO_QUERY,
|
||||
input_variables=["tool_input", "tables", "schemas", "examples"],
|
||||
chain = (
|
||||
LLMChain(
|
||||
llm=self.llm,
|
||||
callback_manager=self.callback_manager,
|
||||
prompt=PromptTemplate(
|
||||
template=QUESTION_TO_QUERY,
|
||||
input_variables=["tool_input", "tables", "schemas", "examples"],
|
||||
),
|
||||
),
|
||||
)
|
||||
else:
|
||||
chain = LLMChain(
|
||||
llm=self.llm,
|
||||
prompt=PromptTemplate(
|
||||
template=QUESTION_TO_QUERY,
|
||||
input_variables=["tool_input", "tables", "schemas", "examples"],
|
||||
chain = (
|
||||
LLMChain(
|
||||
llm=self.llm,
|
||||
prompt=PromptTemplate(
|
||||
template=QUESTION_TO_QUERY,
|
||||
input_variables=["tool_input", "tables", "schemas", "examples"],
|
||||
),
|
||||
),
|
||||
)
|
||||
return [
|
||||
@@ -56,8 +60,8 @@ class PowerBIToolkit(BaseToolkit):
|
||||
InfoPowerBITool(powerbi=self.powerbi),
|
||||
ListPowerBITool(powerbi=self.powerbi),
|
||||
InputToQueryTool(
|
||||
llm_chain=chain,
|
||||
powerbi=self.powerbi,
|
||||
llm_chain=chain,
|
||||
examples=self.examples,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import List
|
||||
from pydantic import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.sql_database import SQLDatabase
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.sql_database.tool import (
|
||||
@@ -19,7 +19,7 @@ class SQLDatabaseToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with SQL databases."""
|
||||
|
||||
db: SQLDatabase = Field(exclude=True)
|
||||
llm: BaseLanguageModel = Field(exclude=True)
|
||||
llm: BaseLLM = Field(exclude=True)
|
||||
|
||||
@property
|
||||
def dialect(self) -> str:
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import List
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.vectorstore.tool import (
|
||||
@@ -31,7 +31,7 @@ class VectorStoreToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with a vector store."""
|
||||
|
||||
vectorstore_info: VectorStoreInfo = Field(exclude=True)
|
||||
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
|
||||
llm: BaseLLM = Field(default_factory=lambda: OpenAI(temperature=0))
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -65,7 +65,7 @@ class VectorStoreRouterToolkit(BaseToolkit):
|
||||
"""Toolkit for routing between vectorstores."""
|
||||
|
||||
vectorstores: List[VectorStoreInfo] = Field(exclude=True)
|
||||
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
|
||||
llm: BaseLLM = Field(default_factory=lambda: OpenAI(temperature=0))
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user