mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-08 10:09:46 +00:00
Compare commits
120 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6e0d3880df | ||
|
|
6ec5780547 | ||
|
|
47d37db2d2 | ||
|
|
4f364db9a9 | ||
|
|
030ce9f506 | ||
|
|
8990122d5d | ||
|
|
52d6bf04d0 | ||
|
|
910da8518f | ||
|
|
2f27ef92fe | ||
|
|
75149d6d38 | ||
|
|
fab7994b74 | ||
|
|
eb80d6e0e4 | ||
|
|
b5667bed9e | ||
|
|
b3be83c750 | ||
|
|
50626a10ee | ||
|
|
6e1b5b8f7e | ||
|
|
eec9b1b306 | ||
|
|
ea142f6a32 | ||
|
|
12f868b292 | ||
|
|
31f9ecfc19 | ||
|
|
273e9bf296 | ||
|
|
f155d9d3ec | ||
|
|
d3d4503ce2 | ||
|
|
1f93c5cf69 | ||
|
|
15b5a08f4b | ||
|
|
ff4a25b841 | ||
|
|
2212520a6c | ||
|
|
d08f940336 | ||
|
|
2280a2cb2f | ||
|
|
ce5d97bcb3 | ||
|
|
8fa1764c60 | ||
|
|
f299bd1416 | ||
|
|
064be93edf | ||
|
|
86822d1cc2 | ||
|
|
a581bce379 | ||
|
|
2ffc643086 | ||
|
|
2136dc94bb | ||
|
|
a92344f476 | ||
|
|
b706966ebc | ||
|
|
1c22657256 | ||
|
|
6f02286805 | ||
|
|
3674074eb0 | ||
|
|
a7e09d46c5 | ||
|
|
fa2e546b76 | ||
|
|
c592b12043 | ||
|
|
9555bbd5bb | ||
|
|
0ca1641b14 | ||
|
|
d5b4393bb2 | ||
|
|
7b6ff7fe00 | ||
|
|
76c7b1f677 | ||
|
|
5aa8ece211 | ||
|
|
f6d24d5740 | ||
|
|
b1c4480d7c | ||
|
|
b6ba989f2f | ||
|
|
04acda55ec | ||
|
|
8e5c4ac867 | ||
|
|
df8702fead | ||
|
|
d5d50c39e6 | ||
|
|
1f18698b2a | ||
|
|
ef4945af6b | ||
|
|
7de2ada3ea | ||
|
|
262d4cb9a8 | ||
|
|
951c158106 | ||
|
|
85e4dd7fc3 | ||
|
|
b1b4a4065a | ||
|
|
08f23c95d9 | ||
|
|
3cf493b089 | ||
|
|
e635c86145 | ||
|
|
779790167e | ||
|
|
3161ced4bc | ||
|
|
3d6fcb85dc | ||
|
|
3701b2901e | ||
|
|
280cb4160d | ||
|
|
80d8db5f60 | ||
|
|
1a8790d808 | ||
|
|
34840f3aee | ||
|
|
8685d53adc | ||
|
|
2f6833d433 | ||
|
|
dd90fd02d5 | ||
|
|
07766a69f3 | ||
|
|
aa854988bf | ||
|
|
96ebe98dc2 | ||
|
|
45f05fc939 | ||
|
|
cf9c3f54f7 | ||
|
|
fbc0c85b90 | ||
|
|
276940fd9b | ||
|
|
cdff6c8181 | ||
|
|
cd45adbea2 | ||
|
|
aff44d0a98 | ||
|
|
8a95fdaee1 | ||
|
|
5d8dc83ede | ||
|
|
b157e0c1c3 | ||
|
|
40e9488055 | ||
|
|
55efbb8a7e | ||
|
|
d6bbf395af | ||
|
|
606605925d | ||
|
|
f93c011456 | ||
|
|
3c24684522 | ||
|
|
b84d190fd0 | ||
|
|
aad4bff098 | ||
|
|
3ea6d9c4d2 | ||
|
|
ced412e1c1 | ||
|
|
1279c8de39 | ||
|
|
c7779c800a | ||
|
|
6f4f771897 | ||
|
|
4a327dd1d6 | ||
|
|
d4edd3c312 | ||
|
|
e72074f78a | ||
|
|
0b29e68c17 | ||
|
|
4d7fdb8957 | ||
|
|
656efe6ef3 | ||
|
|
362586fe8b | ||
|
|
63aa28e2a6 | ||
|
|
c3dfbdf0da | ||
|
|
a2280f321f | ||
|
|
4e13cef05a | ||
|
|
e5c1659864 | ||
|
|
2d098e8869 | ||
|
|
8965a2f0af | ||
|
|
e222ea4ee8 |
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@@ -73,6 +73,8 @@ poetry install -E all
|
||||
|
||||
This will install all requirements for running the package, examples, linting, formatting, tests, and coverage. Note the `-E all` flag will install all optional dependencies necessary for integration testing.
|
||||
|
||||
❗Note: If you're running Poetry 1.4.1 and receive a `WheelFileValidationError` for `debugpy` during installation, you can try either downgrading to Poetry 1.4.0 or disabling "modern installation" (`poetry config installer.modern-installation false`) and re-install requirements. See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
Now, you should be able to run the common tasks in the following section.
|
||||
|
||||
## ✅Common Tasks
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -135,3 +135,9 @@ dmypy.json
|
||||
|
||||
# macOS display setting files
|
||||
.DS_Store
|
||||
|
||||
# Wandb directory
|
||||
wandb/
|
||||
|
||||
# asdf tool versions
|
||||
.tool-versions
|
||||
|
||||
@@ -23,7 +23,7 @@ with open("../pyproject.toml") as f:
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "🦜🔗 LangChain"
|
||||
copyright = "2022, Harrison Chase"
|
||||
copyright = "2023, Harrison Chase"
|
||||
author = "Harrison Chase"
|
||||
|
||||
version = data["tool"]["poetry"]["version"]
|
||||
|
||||
@@ -34,7 +34,8 @@ search = GoogleSerperAPIWrapper()
|
||||
tools = [
|
||||
Tool(
|
||||
name="Intermediate Answer",
|
||||
func=search.run
|
||||
func=search.run,
|
||||
description="useful for when you need to ask with search"
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
20
docs/ecosystem/milvus.md
Normal file
20
docs/ecosystem/milvus.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Milvus
|
||||
|
||||
This page covers how to use the Milvus ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Milvus wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install pymilvus`
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around Milvus indexes, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Milvus
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Miluvs wrapper, see [this notebook](../modules/indexes/vectorstore_examples/milvus.ipynb)
|
||||
29
docs/ecosystem/pgvector.md
Normal file
29
docs/ecosystem/pgvector.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# PGVector
|
||||
|
||||
This page covers how to use the Postgres [PGVector](https://github.com/pgvector/pgvector) ecosystem within LangChain
|
||||
It is broken into two parts: installation and setup, and then references to specific PGVector wrappers.
|
||||
|
||||
## Installation
|
||||
- Install the Python package with `pip install pgvector`
|
||||
|
||||
|
||||
## Setup
|
||||
1. The first step is to create a database with the `pgvector` extension installed.
|
||||
|
||||
Follow the steps at [PGVector Installation Steps](https://github.com/pgvector/pgvector#installation) to install the database and the extension. The docker image is the easiest way to get started.
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around Postgres vector databases, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores.pgvector import PGVector
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
For a more detailed walkthrough of the PGVector Wrapper, see [this notebook](../modules/indexes/vectorstore_examples/pgvector.ipynb)
|
||||
@@ -25,9 +25,25 @@ from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(pl_tags=["langchain-requests", "chatbot"])
|
||||
```
|
||||
|
||||
To get the PromptLayer request id, use the argument `return_pl_id` when instanializing the LLM
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(return_pl_id=True)
|
||||
```
|
||||
This will add the PromptLayer request ID in the `generation_info` field of the `Generation` returned when using `.generate` or `.agenerate`
|
||||
|
||||
For example:
|
||||
```python
|
||||
llm_results = llm.generate(["hello world"])
|
||||
for res in llm_results.generations:
|
||||
print("pl request id: ", res[0].generation_info["pl_request_id"])
|
||||
```
|
||||
You can use the PromptLayer request ID to add a prompt, score, or other metadata to your request. [Read more about it here](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
|
||||
This LLM is identical to the [OpenAI LLM](./openai), except that
|
||||
- all your requests will be logged to your PromptLayer account
|
||||
- you can add `pl_tags` when instantializing to tag your requests on PromptLayer
|
||||
- you can add `return_pl_id` when instantializing to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
|
||||
|
||||
PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](../modules/chat/examples/promptlayer_chat_openai.ipynb)
|
||||
PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](../modules/chat/examples/promptlayer_chat_openai.ipynb) and `PromptLayerOpenAIChat`
|
||||
|
||||
20
docs/ecosystem/qdrant.md
Normal file
20
docs/ecosystem/qdrant.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Qdrant
|
||||
|
||||
This page covers how to use the Qdrant ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Qdrant wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install qdrant-client`
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around Qdrant indexes, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Qdrant
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Qdrant wrapper, see [this notebook](../modules/indexes/vectorstore_examples/qdrant.ipynb)
|
||||
@@ -17,9 +17,12 @@ This page is broken into two parts: installation and setup, and then references
|
||||
- `poppler-utils`
|
||||
- `tesseract-ocr`
|
||||
- `libreoffice`
|
||||
- If you are parsing PDFs, run the following to install the `detectron2` model, which
|
||||
- If you are parsing PDFs using the `"hi_res"` strategy, run the following to install the `detectron2` model, which
|
||||
`unstructured` uses for layout detection:
|
||||
- `pip install "detectron2@git+https://github.com/facebookresearch/detectron2.git@v0.6#egg=detectron2"`
|
||||
- If `detectron2` is not installed, `unstructured` will fallback to processing PDFs
|
||||
using the `"fast"` strategy, which uses `pdfminer` directly and doesn't require
|
||||
`detectron2`.
|
||||
|
||||
## Wrappers
|
||||
|
||||
|
||||
625
docs/ecosystem/wandb_tracking.ipynb
Normal file
625
docs/ecosystem/wandb_tracking.ipynb
Normal file
@@ -0,0 +1,625 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Weights & Biases\n",
|
||||
"\n",
|
||||
"This notebook goes over how to track your LangChain experiments into one centralized Weights and Biases dashboard. To learn more about prompt engineering and the callback please refer to this Report which explains both alongside the resultant dashboards you can expect to see.\n",
|
||||
"\n",
|
||||
"Run in Colab: https://colab.research.google.com/drive/1DXH4beT4HFaRKy_Vm4PoxhXVDRf7Ym8L?usp=sharing\n",
|
||||
"\n",
|
||||
"View Report: https://wandb.ai/a-sh0ts/langchain_callback_demo/reports/Prompt-Engineering-LLMs-with-LangChain-and-W-B--VmlldzozNjk1NTUw#👋-how-to-build-a-callback-in-langchain-for-better-prompt-engineering"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install wandb\n",
|
||||
"!pip install pandas\n",
|
||||
"!pip install textstat\n",
|
||||
"!pip install spacy\n",
|
||||
"!python -m spacy download en_core_web_sm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"id": "T1bSmKd6V2If"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"WANDB_API_KEY\"] = \"\"\n",
|
||||
"# os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
||||
"# os.environ[\"SERPAPI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "8WAGnTWpUUnD"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import datetime\n",
|
||||
"from langchain.callbacks import WandbCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"Callback Handler that logs to Weights and Biases.\n",
|
||||
"\n",
|
||||
"Parameters:\n",
|
||||
" job_type (str): The type of job.\n",
|
||||
" project (str): The project to log to.\n",
|
||||
" entity (str): The entity to log to.\n",
|
||||
" tags (list): The tags to log.\n",
|
||||
" group (str): The group to log to.\n",
|
||||
" name (str): The name of the run.\n",
|
||||
" notes (str): The notes to log.\n",
|
||||
" visualize (bool): Whether to visualize the run.\n",
|
||||
" complexity_metrics (bool): Whether to log complexity metrics.\n",
|
||||
" stream_logs (bool): Whether to stream callback actions to W&B\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cxBFfZR8d9FC"
|
||||
},
|
||||
"source": [
|
||||
"```\n",
|
||||
"Default values for WandbCallbackHandler(...)\n",
|
||||
"\n",
|
||||
"visualize: bool = False,\n",
|
||||
"complexity_metrics: bool = False,\n",
|
||||
"stream_logs: bool = False,\n",
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"NOTE: For beta workflows we have made the default analysis based on textstat and the visualizations based on spacy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"id": "KAz8weWuUeXF"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mharrison-chase\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Tracking run with wandb version 0.14.0"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Run data is saved locally in <code>/Users/harrisonchase/workplace/langchain/docs/ecosystem/wandb/run-20230318_150408-e47j1914</code>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Syncing run <strong><a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/e47j1914' target=\"_blank\">llm</a></strong> to <a href='https://wandb.ai/harrison-chase/langchain_callback_demo' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View project at <a href='https://wandb.ai/harrison-chase/langchain_callback_demo' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo</a>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View run at <a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/e47j1914' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo/runs/e47j1914</a>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m The wandb callback is currently in beta and is subject to change based on updates to `langchain`. Please report any issues to https://github.com/wandb/wandb/issues with the tag `langchain`.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\"\"\"Main function.\n",
|
||||
"\n",
|
||||
"This function is used to try the callback handler.\n",
|
||||
"Scenarios:\n",
|
||||
"1. OpenAI LLM\n",
|
||||
"2. Chain with multiple SubChains on multiple generations\n",
|
||||
"3. Agent with Tools\n",
|
||||
"\"\"\"\n",
|
||||
"session_group = datetime.now().strftime(\"%m.%d.%Y_%H.%M.%S\")\n",
|
||||
"wandb_callback = WandbCallbackHandler(\n",
|
||||
" job_type=\"inference\",\n",
|
||||
" project=\"langchain_callback_demo\",\n",
|
||||
" group=f\"minimal_{session_group}\",\n",
|
||||
" name=\"llm\",\n",
|
||||
" tags=[\"test\"],\n",
|
||||
")\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), wandb_callback])\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Q-65jwrDeK6w"
|
||||
},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"# Defaults for WandbCallbackHandler.flush_tracker(...)\n",
|
||||
"\n",
|
||||
"reset: bool = True,\n",
|
||||
"finish: bool = False,\n",
|
||||
"```\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `flush_tracker` function is used to log LangChain sessions to Weights & Biases. It takes in the LangChain module or agent, and logs at minimum the prompts and generations alongside the serialized form of the LangChain module to the specified Weights & Biases project. By default we reset the session as opposed to concluding the session outright."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"id": "o_VmneyIUyx8"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Waiting for W&B process to finish... <strong style=\"color:green\">(success).</strong>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View run <strong style=\"color:#cdcd00\">llm</strong> at: <a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/e47j1914' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo/runs/e47j1914</a><br/>Synced 5 W&B file(s), 2 media file(s), 5 artifact file(s) and 0 other file(s)"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Find logs at: <code>./wandb/run-20230318_150408-e47j1914/logs</code>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "0d7b4307ccdb450ea631497174fca2d1",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"VBox(children=(Label(value='Waiting for wandb.init()...\\r'), FloatProgress(value=0.016745895149999985, max=1.0…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Tracking run with wandb version 0.14.0"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Run data is saved locally in <code>/Users/harrisonchase/workplace/langchain/docs/ecosystem/wandb/run-20230318_150534-jyxma7hu</code>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Syncing run <strong><a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/jyxma7hu' target=\"_blank\">simple_sequential</a></strong> to <a href='https://wandb.ai/harrison-chase/langchain_callback_demo' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View project at <a href='https://wandb.ai/harrison-chase/langchain_callback_demo' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo</a>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View run at <a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/jyxma7hu' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo/runs/jyxma7hu</a>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# SCENARIO 1 - LLM\n",
|
||||
"llm_result = llm.generate([\"Tell me a joke\", \"Tell me a poem\"] * 3)\n",
|
||||
"wandb_callback.flush_tracker(llm, name=\"simple_sequential\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"id": "trxslyb1U28Y"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains import LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"id": "uauQk10SUzF6"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Waiting for W&B process to finish... <strong style=\"color:green\">(success).</strong>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View run <strong style=\"color:#cdcd00\">simple_sequential</strong> at: <a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/jyxma7hu' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo/runs/jyxma7hu</a><br/>Synced 4 W&B file(s), 2 media file(s), 6 artifact file(s) and 0 other file(s)"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Find logs at: <code>./wandb/run-20230318_150534-jyxma7hu/logs</code>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "dbdbf28fb8ed40a3a60218d2e6d1a987",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"VBox(children=(Label(value='Waiting for wandb.init()...\\r'), FloatProgress(value=0.016736786816666675, max=1.0…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Tracking run with wandb version 0.14.0"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Run data is saved locally in <code>/Users/harrisonchase/workplace/langchain/docs/ecosystem/wandb/run-20230318_150550-wzy59zjq</code>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Syncing run <strong><a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/wzy59zjq' target=\"_blank\">agent</a></strong> to <a href='https://wandb.ai/harrison-chase/langchain_callback_demo' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View project at <a href='https://wandb.ai/harrison-chase/langchain_callback_demo' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo</a>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View run at <a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/wzy59zjq' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo/runs/wzy59zjq</a>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# SCENARIO 2 - Chain\n",
|
||||
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
|
||||
"Title: {title}\n",
|
||||
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [\n",
|
||||
" {\n",
|
||||
" \"title\": \"documentary about good video games that push the boundary of game design\"\n",
|
||||
" },\n",
|
||||
" {\"title\": \"cocaine bear vs heroin wolf\"},\n",
|
||||
" {\"title\": \"the best in class mlops tooling\"},\n",
|
||||
"]\n",
|
||||
"synopsis_chain.apply(test_prompts)\n",
|
||||
"wandb_callback.flush_tracker(synopsis_chain, name=\"agent\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"id": "_jN73xcPVEpI"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"id": "Gpq4rk6VT9cu"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who Leo DiCaprio's girlfriend is and then calculate her age raised to the 0.43 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Leo DiCaprio girlfriend\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mDiCaprio had a steady girlfriend in Camila Morrone. He had been with the model turned actress for nearly five years, as they were first said to be dating at the end of 2017. And the now 26-year-old Morrone is no stranger to Hollywood.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate her age raised to the 0.43 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 26^0.43\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 4.059182145592686\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Leo DiCaprio's girlfriend is Camila Morrone and her current age raised to the 0.43 power is 4.059182145592686.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Waiting for W&B process to finish... <strong style=\"color:green\">(success).</strong>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
" View run <strong style=\"color:#cdcd00\">agent</strong> at: <a href='https://wandb.ai/harrison-chase/langchain_callback_demo/runs/wzy59zjq' target=\"_blank\">https://wandb.ai/harrison-chase/langchain_callback_demo/runs/wzy59zjq</a><br/>Synced 5 W&B file(s), 2 media file(s), 7 artifact file(s) and 0 other file(s)"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"Find logs at: <code>./wandb/run-20230318_150550-wzy59zjq/logs</code>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# SCENARIO 3 - Agent with Tools\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=\"zero-shot-react-description\",\n",
|
||||
" callback_manager=manager,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")\n",
|
||||
"wandb_callback.flush_tracker(agent, reset=False, finish=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -158,14 +158,14 @@ Open Source
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/jerryjliu/gpt_index
|
||||
.. link-button:: https://github.com/jerryjliu/llama_index
|
||||
:type: url
|
||||
:text: GPT Index
|
||||
:text: LlamaIndex
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
GPT Index is a project consisting of a set of data structures that are created using GPT-3 and can be traversed using GPT-3 in order to answer queries.
|
||||
LlamaIndex (formerly GPT Index) is a project consisting of a set of data structures that are created using GPT-3 and can be traversed using GPT-3 in order to answer queries.
|
||||
|
||||
---
|
||||
|
||||
@@ -322,5 +322,14 @@ Proprietary
|
||||
|
||||
By Zahid Khawaja, this demo utilizes question answering to answer questions about a given website. A followup added this for `YouTube videos <https://twitter.com/chillzaza_/status/1593739682013220865?s=20&t=EhU8jl0KyCPJ7vE9Rnz-cQ>`_, and then another followup added it for `Wikipedia <https://twitter.com/chillzaza_/status/1594847151238037505?s=20&t=EhU8jl0KyCPJ7vE9Rnz-cQ>`_.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://mynd.so
|
||||
:type: url
|
||||
:text: Mynd
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A journaling app for self-care that uses AI to uncover insights and patterns over time.
|
||||
|
||||
|
||||
@@ -97,6 +97,8 @@ The above modules can be used in a variety of ways. LangChain also provides guid
|
||||
|
||||
- `Summarization <./use_cases/summarization.html>`_: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.
|
||||
|
||||
- `Querying Tabular Data <./use_cases/tabular.html>`_: If you want to understand how to use LLMs to query data that is stored in a tabular format (csvs, SQL, dataframes, etc) you should read this page.
|
||||
|
||||
- `Evaluation <./use_cases/evaluation.html>`_: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
|
||||
|
||||
- `Generate similar examples <./use_cases/generate_examples.html>`_: Generating similar examples to a given input. This is a common use case for many applications, and LangChain provides some prompts/chains for assisting in this.
|
||||
@@ -117,6 +119,8 @@ The above modules can be used in a variety of ways. LangChain also provides guid
|
||||
./use_cases/combine_docs.md
|
||||
./use_cases/question_answering.md
|
||||
./use_cases/summarization.md
|
||||
./use_cases/tabular.rst
|
||||
./use_cases/extraction.md
|
||||
./use_cases/evaluation.rst
|
||||
./use_cases/model_laboratory.ipynb
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 1,
|
||||
"id": "2e87c10a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -30,13 +30,14 @@
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain import OpenAI, VectorDBQA\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"execution_count": 2,
|
||||
"id": "f2675861",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -62,17 +63,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"execution_count": 4,
|
||||
"id": "bc5403d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"state_of_union = VectorDBQA.from_chain_type(llm=llm, chain_type=\"stuff\", vectorstore=docsearch)"
|
||||
"state_of_union = RetrievalQA.from_chain_type(llm=llm, chain_type=\"stuff\", retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"execution_count": 5,
|
||||
"id": "1431cded",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -82,7 +83,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"execution_count": 6,
|
||||
"id": "915d3ff3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -92,7 +93,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"execution_count": 7,
|
||||
"id": "96a2edf8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -109,7 +110,7 @@
|
||||
"docs = loader.load()\n",
|
||||
"ruff_texts = text_splitter.split_documents(docs)\n",
|
||||
"ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name=\"ruff\")\n",
|
||||
"ruff = VectorDBQA.from_chain_type(llm=llm, chain_type=\"stuff\", vectorstore=ruff_db)"
|
||||
"ruff = RetrievalQA.from_chain_type(llm=llm, chain_type=\"stuff\", retriever=ruff_db.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -264,9 +265,9 @@
|
||||
"id": "9161ba91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also set `return_direct=True` if you intend to use the agent as a router and just want to directly return the result of the VectorDBQaChain.\n",
|
||||
"You can also set `return_direct=True` if you intend to use the agent as a router and just want to directly return the result of the RetrievalQAChain.\n",
|
||||
"\n",
|
||||
"Notice that in the above examples the agent did some extra work after querying the VectorDBQAChain. You can avoid that and just return the result directly."
|
||||
"Notice that in the above examples the agent did some extra work after querying the RetrievalQAChain. You can avoid that and just return the result directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
132
docs/modules/agents/examples/human_tools.ipynb
Normal file
132
docs/modules/agents/examples/human_tools.ipynb
Normal file
@@ -0,0 +1,132 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Human as a tool\n",
|
||||
"\n",
|
||||
"Human are AGI so they can certainly be used as a tool to help out AI agent \n",
|
||||
"when it is confused."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.agents import load_tools, initialize_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0.0)\n",
|
||||
"math_llm = OpenAI(temperature=0.0)\n",
|
||||
"tools = load_tools(\n",
|
||||
" [\"human\", \"llm-math\"], \n",
|
||||
" llm=math_llm,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent_chain = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=\"zero-shot-react-description\",\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In the above code you can see the tool takes input directly from command line.\n",
|
||||
"You can customize `prompt_func` and `input_func` according to your need."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mI don't know Eric Zhu, so I should ask a human for guidance.\n",
|
||||
"Action: Human\n",
|
||||
"Action Input: \"Do you know when Eric Zhu's birthday is?\"\u001b[0m\n",
|
||||
"\n",
|
||||
"Do you know when Eric Zhu's birthday is?\n",
|
||||
"last week\n",
|
||||
"\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mlast week\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThat's not very helpful. I should ask for more information.\n",
|
||||
"Action: Human\n",
|
||||
"Action Input: \"Do you know the specific date of Eric Zhu's birthday?\"\u001b[0m\n",
|
||||
"\n",
|
||||
"Do you know the specific date of Eric Zhu's birthday?\n",
|
||||
"august 1st\n",
|
||||
"\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3maugust 1st\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mNow that I have the date, I can check if it's a leap year or not.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: \"Is 2021 a leap year?\"\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mAnswer: False\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI have all the information I need to answer the original question.\n",
|
||||
"Final Answer: Eric Zhu's birthday is on August 1st and it is not a leap year in 2021.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Eric Zhu's birthday is on August 1st and it is not a leap year in 2021.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"agent_chain.run(\"What is Eric Zhu's birthday?\")\n",
|
||||
"# Answer with \"last week\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -61,7 +61,8 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
@@ -24,11 +24,13 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=docstore.search\n",
|
||||
" func=docstore.search,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Lookup\",\n",
|
||||
" func=docstore.lookup\n",
|
||||
" func=docstore.lookup,\n",
|
||||
" description=\"useful for when you need to ask with lookup\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
@@ -52,7 +52,8 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
@@ -13,3 +13,4 @@ For more detailed information on tools, and different types of tools in LangChai
|
||||
Toolkits are groups of tools that are best used together.
|
||||
They allow you to logically group and initialize a set of tools that share a particular resource (such as a database connection or json object).
|
||||
They can be used to construct an agent for a specific use-case.
|
||||
For more detailed information on toolkits and their use cases, see [this documentation](how_to_guides.rst#agent-toolkits) (the "Agent Toolkits" section).
|
||||
@@ -145,3 +145,10 @@ Below is a list of all supported tools and relevant information:
|
||||
- Requires LLM: No
|
||||
- Extra Parameters: `top_k_results`
|
||||
|
||||
**podcast-api**
|
||||
|
||||
- Tool Name: Podcast API
|
||||
- Tool Description: Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.
|
||||
- Notes: A natural language connection to the Listen Notes Podcast API (`https://www.PodcastAPI.com`), specifically the `/search/` endpoint.
|
||||
- Requires LLM: Yes
|
||||
- Extra Parameters: `listen_api_key` (your api key to access this endpoint)
|
||||
|
||||
@@ -149,6 +149,33 @@
|
||||
"chain.run(\"Search for 'Avatar'\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Listen API Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains.api import podcast_docs\n",
|
||||
"from langchain.chains import APIChain\n",
|
||||
"\n",
|
||||
"# Get api key here: https://www.listennotes.com/api/pricing/\n",
|
||||
"listen_api_key = 'xxx'\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"headers = {\"X-ListenAPI-Key\": listen_api_key}\n",
|
||||
"chain = APIChain.from_llm_and_api_docs(llm, podcast_docs.PODCAST_DOCS, headers=headers, verbose=True)\n",
|
||||
"chain.run(\"Search for 'silicon valley bank' podcast episodes, audio length is more than 30 minutes, return only 1 results\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -173,7 +200,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -532,7 +532,7 @@
|
||||
"id": "5fc6f507",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note how our custom table definition and sample rows for `Track` overrides the `sample_rows_in_table_info` parameter. Tables that are not overriden by `custom_table_info`, in this example `Playlist`, will have their table info gathered automatically as usual."
|
||||
"Note how our custom table definition and sample rows for `Track` overrides the `sample_rows_in_table_info` parameter. Tables that are not overridden by `custom_table_info`, in this example `Playlist`, will have their table info gathered automatically as usual."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -679,7 +679,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"This notebook goes over how to set up a chat model to chat with a vector database.\n",
|
||||
"\n",
|
||||
"This notebook is very similar to the example of using an LLM in the ChatVectorDBChain. The only differences here are (1) using a ChatModel, and (2) passing in a ChatPromptTemplate (optimized for chat models)."
|
||||
"This notebook is very similar to the example of using an LLM in the ConversationalRetrievalChain. The only differences here are (1) using a ChatModel, and (2) passing in a ChatPromptTemplate (optimized for chat models)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,7 +24,7 @@
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.chains import ChatVectorDBChain"
|
||||
"from langchain.chains import ConversationalRetrievalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -157,7 +157,7 @@
|
||||
"id": "3c96b118",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now initialize the ChatVectorDBChain"
|
||||
"We now initialize the ConversationalRetrievalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -169,7 +169,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ChatVectorDBChain.from_llm(ChatOpenAI(temperature=0), vectorstore,qa_prompt=prompt)"
|
||||
"qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0), vectorstore,qa_prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,7 +205,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"The President nominated Circuit Court of Appeals Judge Ketanji Brown Jackson to serve on the United States Supreme Court. He described her as one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and a consensus builder. He also mentioned that she has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
"\"The President nominated Circuit Court of Appeals Judge Ketanji Brown Jackson to serve on the United States Supreme Court. He described her as one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and a consensus builder. She has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -227,7 +227,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 10,
|
||||
"id": "00b4cf00",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -241,7 +241,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 11,
|
||||
"id": "f01828d1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -250,10 +250,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The context does not provide information about the predecessor of Ketanji Brown Jackson.'"
|
||||
"\"The President mentioned Circuit Court of Appeals Judge Ketanji Brown Jackson as the nominee for the United States Supreme Court. He described her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence. The President did not mention any specific sources of support for Judge Jackson, but he did note that advancing immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -267,14 +267,14 @@
|
||||
"id": "2324cdc6-98bf-4708-b8cd-02a98b1e5b67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Vector DB with streaming to `stdout`\n",
|
||||
"## ConversationalRetrievalChain with streaming to `stdout`\n",
|
||||
"\n",
|
||||
"Output from the chain will be streamed to `stdout` token by token in this example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 12,
|
||||
"id": "2efacec3-2690-4b05-8de3-a32fd2ac3911",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -285,7 +285,7 @@
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT\n",
|
||||
"from langchain.chains.chat_index.prompts import CONDENSE_QUESTION_PROMPT\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"\n",
|
||||
"# Construct a ChatVectorDBChain with a streaming llm for combine docs\n",
|
||||
@@ -296,12 +296,12 @@
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_chain(streaming_llm, chain_type=\"stuff\", prompt=prompt)\n",
|
||||
"\n",
|
||||
"qa = ChatVectorDBChain(vectorstore=vectorstore, combine_docs_chain=doc_chain, question_generator=question_generator)"
|
||||
"qa = ConversationalRetrievalChain(retriever=vectorstore.as_retriever(), combine_docs_chain=doc_chain, question_generator=question_generator)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 13,
|
||||
"id": "fd6d43f4-7428-44a4-81bc-26fe88a98762",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -323,7 +323,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 14,
|
||||
"id": "5ab38978-f3e8-4fa7-808c-c79dec48379a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
|
||||
@@ -123,6 +123,40 @@
|
||||
"id": "05e9e2fe",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c43803d1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using PromptLayer Track\n",
|
||||
"If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantializing the PromptLayer LLM to get the request id. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b7d4db01",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = PromptLayerChatOpenAI(return_pl_id=True)\n",
|
||||
"chat_results = chat.generate([[HumanMessage(content=\"I am a cat and I want\")]])\n",
|
||||
"\n",
|
||||
"for res in chat_results.generations:\n",
|
||||
" pl_request_id = res[0].generation_info[\"pl_request_id\"]\n",
|
||||
" promptlayer.track.score(request_id=pl_request_id, score=100)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13e56507",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Using this allows you to track the performance of your model in the PromptLayer dashboard. If you are using a prompt template, you can attach a template to a request as well.\n",
|
||||
"Overall, this gives you the opportunity to track the performance of different templates and models in the PromptLayer dashboard."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -141,11 +175,11 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
"version": "3.8.8 (default, Apr 13 2021, 12:59:45) \n[Clang 10.0.0 ]"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "c4fe2cd85a8d9e8baaec5340ce66faff1c77581a9f43e6c45e85e09b6fced008"
|
||||
"hash": "8a5edab282632443219e051e4ade2d1d5bbc671c781051bf1437897cbdfea0f1"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -5,16 +5,16 @@
|
||||
"id": "07c1e3b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vector DB Question/Answering\n",
|
||||
"# Retrieval Question/Answering\n",
|
||||
"\n",
|
||||
"This example showcases using a chat model to do question answering over a vector database.\n",
|
||||
"\n",
|
||||
"This notebook is very similar to the example of using an LLM in the ChatVectorDBChain. The only differences here are (1) using a ChatModel, and (2) passing in a ChatPromptTemplate (optimized for chat models)."
|
||||
"This notebook is very similar to the example of using an LLM in the RetrievalQA. The only differences here are (1) using a ChatModel, and (2) passing in a ChatPromptTemplate (optimized for chat models)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 7,
|
||||
"id": "82525493",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -22,7 +22,7 @@
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.chains import VectorDBQA"
|
||||
"from langchain.chains import RetrievalQA"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -100,28 +100,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"id": "3018f865",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_type_kwargs = {\"prompt\": prompt}\n",
|
||||
"qa = VectorDBQA.from_chain_type(llm=ChatOpenAI(), chain_type=\"stuff\", vectorstore=docsearch, chain_type_kwargs=chain_type_kwargs)"
|
||||
"qa = RetrievalQA.from_chain_type(llm=ChatOpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever(), chain_type_kwargs=chain_type_kwargs)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"id": "032a47f8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"The President nominated Ketanji Brown Jackson as a Judge for the United States Supreme Court. He described her as one of the nation's top legal minds and a former top litigator in private practice, a former federal public defender, and a consensus builder.\""
|
||||
"\"The president nominated Ketanji Brown Jackson to serve on the United States Supreme Court. He referred to her as one of our nation's top legal minds, a former federal public defender, a consensus builder, and from a family of public school educators and police officers. Since she's been nominated, she has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
"id": "efc5be67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# VectorDB Question Answering with Sources\n",
|
||||
"# Retrieval Question Answering with Sources\n",
|
||||
"\n",
|
||||
"This notebook goes over how to do question-answering with sources with a chat model over a vector database. It does this by using the `VectorDBQAWithSourcesChain`, which does the lookup of the documents from a vector database. \n",
|
||||
"This notebook goes over how to do question-answering with sources with a chat model over a vector database. It does this by using the `RetrievalQAWithSourcesChain`, which does the lookup of the documents from a vector database. \n",
|
||||
"\n",
|
||||
"This notebook is very similar to the example of using an LLM in the ChatVectorDBChain. The only differences here are (1) using a ChatModel, and (2) passing in a ChatPromptTemplate (optimized for chat models)."
|
||||
"This notebook is very similar to the example of using an LLM in the RetrievalQAWithSources. The only differences here are (1) using a ChatModel, and (2) passing in a ChatPromptTemplate (optimized for chat models)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -51,6 +51,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n",
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
@@ -62,12 +64,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 9,
|
||||
"id": "8aa571ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import VectorDBQAWithSourcesChain"
|
||||
"from langchain.chains import RetrievalQAWithSourcesChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -101,7 +103,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 6,
|
||||
"id": "ed00e906",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -130,23 +132,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 10,
|
||||
"id": "aa859d4c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_type_kwargs = {\"prompt\": prompt}\n",
|
||||
"chain = VectorDBQAWithSourcesChain.from_chain_type(\n",
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(\n",
|
||||
" ChatOpenAI(temperature=0), \n",
|
||||
" chain_type=\"stuff\", \n",
|
||||
" vectorstore=docsearch,\n",
|
||||
" retriever=docsearch.as_retriever(),\n",
|
||||
" chain_type_kwargs=chain_type_kwargs\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 11,
|
||||
"id": "8ba36fa7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -154,10 +156,10 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': 'The President honored Justice Stephen Breyer, an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court, for his dedicated service to the country. \\n',\n",
|
||||
" 'sources': '30-pl'}"
|
||||
" 'sources': '31-pl'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -168,25 +170,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c91fdc8a",
|
||||
"execution_count": null,
|
||||
"id": "8308fbf7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': ' The president honored Justice Stephen Breyer for his service.\\n',\n",
|
||||
" 'sources': '30-pl'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
|
||||
]
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "522686de",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -36,7 +36,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -56,7 +56,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "76a6e7b0-e927-4bfb-a414-1332a4149106",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -68,7 +68,7 @@
|
||||
"AIMessage(content=\"J'aime programmer.\", additional_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -87,7 +87,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "ce16ad78-8e6f-48cd-954e-98be75eb5836",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -99,7 +99,7 @@
|
||||
"AIMessage(content=\"J'aime programmer.\", additional_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -122,7 +122,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "2b21fc52-74b6-4950-ab78-45d12c68fb4d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -131,10 +131,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output=None)"
|
||||
"LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -150,7 +150,39 @@
|
||||
" HumanMessage(content=\"Translate this sentence from English to French. I love artificial intelligence.\")\n",
|
||||
" ],\n",
|
||||
"]\n",
|
||||
"chat.generate(batch_messages)"
|
||||
"result = chat.generate(batch_messages)\n",
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2960f50f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can recover things like token usage from this LLMResult"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a6186bee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'token_usage': {'prompt_tokens': 71,\n",
|
||||
" 'completion_tokens': 18,\n",
|
||||
" 'total_tokens': 89}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result.llm_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
38
docs/modules/document_loaders/examples/blackboard.ipynb
Normal file
38
docs/modules/document_loaders/examples/blackboard.ipynb
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Blackboard\n",
|
||||
"\n",
|
||||
"This covers how to load data from a Blackboard Learn instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import BlackboardLoader\n",
|
||||
"\n",
|
||||
"loader = BlackboardLoader(\n",
|
||||
" blackboard_course_url=\"https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1\",\n",
|
||||
" bbrouter=\"expires:12345...\",\n",
|
||||
" load_all_recursively=True,\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,5 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Test Title</title>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<h1>My First Heading</h1>
|
||||
|
||||
79
docs/modules/document_loaders/examples/figma.ipynb
Normal file
79
docs/modules/document_loaders/examples/figma.ipynb
Normal file
@@ -0,0 +1,79 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "33205b12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Figma\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from the Figma REST API into a format that can be ingested into LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "90b69c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.document_loaders import FigmaFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "13deb0f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = FigmaFileLoader(\n",
|
||||
" os.environ.get('ACCESS_TOKEN'),\n",
|
||||
" os.environ.get('NODE_IDS'),\n",
|
||||
" os.environ.get('FILE_KEY')\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9ccc1e2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3e64cac2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -48,9 +48,7 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='My First Heading\\n\\nMy first paragraph.', lookup_str='', metadata={'source': 'example_data/fake-content.html'}, lookup_index=0)]"
|
||||
]
|
||||
"text/plain": "[Document(page_content='My First Heading\\n\\nMy first paragraph.', lookup_str='', metadata={'source': 'example_data/fake-content.html'}, lookup_index=0)]"
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
@@ -61,13 +59,57 @@
|
||||
"data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Loading HTML with BeautifulSoup4\n",
|
||||
"\n",
|
||||
"We can also use BeautifulSoup4 to load HTML documents using the `BSHTMLLoader`. This will extract the text from the html into `page_content`, and the page title as `title` into `metadata`."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 16,
|
||||
"id": "79b1bce4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"from langchain.document_loaders import BSHTMLLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "[Document(page_content='\\n\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', lookup_str='', metadata={'source': 'example_data/fake-content.html', 'title': 'Test Title'}, lookup_index=0)]"
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = BSHTMLLoader(\"example_data/fake-content.html\")\n",
|
||||
"data = loader.load()\n",
|
||||
"data"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "34c90eed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Microsoft Word\n",
|
||||
"\n",
|
||||
"This notebook shows how to load text from Microsoft word documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "28ded768",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredDocxLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f1f26035",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredDocxLoader('example_data/fake.docx')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2c87dde9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0e4a884c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'example_data/fake.docx'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d1472e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retain Elements\n",
|
||||
"\n",
|
||||
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "93abf60b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredDocxLoader('example_data/fake.docx', mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "c35cdbcc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "fae2d730",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'example_data/fake.docx'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "961a7b1d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -158,7 +158,72 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7874d01d",
|
||||
"id": "672733fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define a Partitioning Strategy\n",
|
||||
"\n",
|
||||
"Unstructured document loader allow users to pass in a `strategy` parameter that lets `unstructured` know how to partitioning the document. Currently supported strategies are `\"hi_res\"` (the default) and `\"fast\"`. Hi res partitioning strategies are more accurate, but take longer to process. Fast strategies partition the document more quickly, but trade-off accuracy. Not all document types have separate hi res and fast partitioning strategies. For those document types, the `strategy` kwarg is ignored. In some cases, the high res strategy will fallback to fast if there is a dependency missing (i.e. a model for document partitioning). You can see how to apply a strategy to an `UnstructuredFileLoader` below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "767238a4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9518b425",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\"layout-parser-paper-fast.pdf\", strategy=\"fast\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "645f29e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "60685353",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='1', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='0', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='n', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'Title'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8de9ef16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PDF Example\n",
|
||||
@@ -166,7 +231,6 @@
|
||||
"Processing PDF documents works exactly the same way. Unstructured detects the file type and extracts the same types of `elements`. "
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -225,7 +289,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8ca8a648",
|
||||
"id": "f52b04cb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -247,7 +311,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredWordDocumentLoader(\"fake.docx\")"
|
||||
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -78,7 +78,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredWordDocumentLoader(\"fake.docx\", mode=\"elements\")"
|
||||
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,8 +21,6 @@ There are a lot of different document loaders that LangChain supports. Below are
|
||||
|
||||
`GoogleDrive <./examples/googledrive.html>`_: A walkthrough of how to load data from Google drive.
|
||||
|
||||
`Microsoft Word <./examples/microsoft_word.html>`_: A walkthrough of how to load data from Microsoft Word files.
|
||||
|
||||
`Obsidian <./examples/obsidian.html>`_: A walkthrough of how to load data from an Obsidian file dump.
|
||||
|
||||
`Roam <./examples/roam.html>`_: A walkthrough of how to load data from a Roam file export.
|
||||
@@ -59,6 +57,28 @@ There are a lot of different document loaders that LangChain supports. Below are
|
||||
|
||||
`iFixit <./examples/ifixit.html>`_: A walkthrough of how to search and load data like guides, technical Q&A's, and device wikis from iFixit.com
|
||||
|
||||
`Notebook <./examples/notebook.html>`_: A walkthrough of how to load data from .ipynb notebook.
|
||||
|
||||
`Copypaste <./examples/copypaste.html>`_: A walkthrough of how to load a document object from something you just want to copy and paste.
|
||||
|
||||
`CSV <./examples/csv.html>`_: A walkthrough of how to load data from a .csv file.
|
||||
|
||||
`Facebook Chat <./examples/facebook_chat.html>`_: A walkthrough of how to load data from a Facebook Chat json file.
|
||||
|
||||
`Image <./examples/image.html>`_: A walkthrough of how to load images such as JPGs PNGs into a document format that can be used downstream.
|
||||
|
||||
`Markdown <./examples/markdown.html>`_: A walkthrough of how to load data from a markdown file.
|
||||
|
||||
`SRT <./examples/srt.html>`_: A walkthrough of how to load data from a subtitle (`.srt`) file.
|
||||
|
||||
`Telegram <./examples/telegram.html>`_: A walkthrough of how to load data from a Telegram Chat json file.
|
||||
|
||||
`URL <./examples/url.html>`_: A walkthrough of how to load HTML documents from a list of URLs into a document format that we can use downstream.
|
||||
|
||||
`Word Document <./examples/word_document.html>`_: A walkthrough of how to load data from Microsoft Word files.
|
||||
|
||||
`Blackboard <./examples/blackboard.html>`_: A walkthrough of how to load data from a Blackboard course.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
@@ -3,16 +3,24 @@ Indexes
|
||||
|
||||
Indexes refer to ways to structure documents so that LLMs can best interact with them.
|
||||
This module contains utility functions for working with documents, different types of indexes, and then examples for using those indexes in chains.
|
||||
LangChain provides common indices for working with data (most prominently support for vector databases).
|
||||
For more complicated index structures, it is worth checking out `GPTIndex <https://gpt-index.readthedocs.io/en/latest/index.html>`_.
|
||||
|
||||
The most common way that indexes are used in chains is in a "retrieval" step.
|
||||
This step refers to taking a user's query and returning the most relevant documents.
|
||||
We draw this distinction because (1) an index can be used for other things besides retrieval, and (2) retrieval can use other logic besides an index to find relevant documents.
|
||||
We therefor have a concept of a "Retriever" interface - this is the interface that most chains work with.
|
||||
|
||||
Most of the time when we talk about indexes and retrieval we are talking about indexing and retrieving unstructured data (like text documents).
|
||||
For interacting with structured data (SQL tables, etc) or APIs, please see the corresponding use case sections for links to relevant functionality.
|
||||
The primary index and retrieval types supported by LangChain are currently centered around vector databases, and therefore
|
||||
a lot of the functionality we dive deep on those topics.
|
||||
|
||||
The following sections of documentation are provided:
|
||||
|
||||
- `Getting Started <./indexes/getting_started.html>`_: An overview of all the functionality LangChain provides for working with indexes.
|
||||
- `Getting Started <./indexes/getting_started.html>`_: An overview of the base "Retriever" interface, and then all the functionality LangChain provides for working with indexes.
|
||||
|
||||
- `Key Concepts <./indexes/key_concepts.html>`_: A conceptual guide going over the various concepts related to indexes and the tools needed to create them.
|
||||
|
||||
- `How-To Guides <./indexes/how_to_guides.html>`_: A collection of how-to guides. These highlight how to use all the relevant tools, the different types of vector databases, and how to use indexes in chains.
|
||||
- `How-To Guides <./indexes/how_to_guides.html>`_: A collection of how-to guides. These highlight how to use all the relevant tools, the different types of vector databases, different types of retrievers, and how to use retrievers and indexes in chains.
|
||||
|
||||
|
||||
.. toctree::
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
"id": "134a0785",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat Vector DB\n",
|
||||
"# Chat Index\n",
|
||||
"\n",
|
||||
"This notebook goes over how to set up a chain to chat with a vector database. The only difference between this chain and the [VectorDBQAChain](./vector_db_qa.ipynb) is that this allows for passing in of a chat history which can be used to allow for follow up questions."
|
||||
"This notebook goes over how to set up a chain to chat with an index. The only difference between this chain and the [RetrievalQAChain](./vector_db_qa.ipynb) is that this allows for passing in of a chat history which can be used to allow for follow up questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -23,7 +23,7 @@
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import ChatVectorDBChain"
|
||||
"from langchain.chains import ConversationalRetrievalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -109,7 +109,7 @@
|
||||
"id": "3c96b118",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now initialize the ChatVectorDBChain"
|
||||
"We now initialize the ConversationalRetrievalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -121,7 +121,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ChatVectorDBChain.from_llm(OpenAI(temperature=0), vectorstore)"
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -220,22 +220,22 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return Source Documents\n",
|
||||
"You can also easily return source documents from the ChatVectorDBChain. This is useful for when you want to inspect what documents were returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "562769c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ChatVectorDBChain.from_llm(OpenAI(temperature=0), vectorstore, return_source_documents=True)"
|
||||
"You can also easily return source documents from the ConversationalRetrievalChain. This is useful for when you want to inspect what documents were returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "562769c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore, return_source_documents=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "ea478300",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -247,17 +247,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 13,
|
||||
"id": "4cb75b4e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \\n\\nWe cannot let this happen. \\n\\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0)"
|
||||
"Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0)"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -271,13 +271,13 @@
|
||||
"id": "4f49beab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Vector DB with `search_distance`\n",
|
||||
"## ConversationalRetrievalChain with `search_distance`\n",
|
||||
"If you are using a vector store that supports filtering by search distance, you can add a threshold value parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 14,
|
||||
"id": "5ed8d612",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -287,12 +287,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 15,
|
||||
"id": "6a7b3459",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ChatVectorDBChain.from_llm(OpenAI(temperature=0), vectorstore, return_source_documents=True)\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore, return_source_documents=True)\n",
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = qa({\"question\": query, \"chat_history\": chat_history, \"vectordbkwargs\": vectordbkwargs})"
|
||||
@@ -303,25 +303,25 @@
|
||||
"id": "99b96dae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Vector DB with `map_reduce`\n",
|
||||
"We can also use different types of combine document chains with the Chat Vector DB chain."
|
||||
"## ConversationalRetrievalChain with `map_reduce`\n",
|
||||
"We can also use different types of combine document chains with the ConversationalRetrievalChain chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 16,
|
||||
"id": "e53a9d66",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT"
|
||||
"from langchain.chains.chat_index.prompts import CONDENSE_QUESTION_PROMPT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 19,
|
||||
"id": "bf205e35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -330,8 +330,8 @@
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_chain(llm, chain_type=\"map_reduce\")\n",
|
||||
"\n",
|
||||
"chain = ChatVectorDBChain(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
"chain = ConversationalRetrievalChain(\n",
|
||||
" retriever=vectorstore.as_retriever(),\n",
|
||||
" question_generator=question_generator,\n",
|
||||
" combine_docs_chain=doc_chain,\n",
|
||||
")"
|
||||
@@ -339,7 +339,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 20,
|
||||
"id": "78155887",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -351,7 +351,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 21,
|
||||
"id": "e54b5fa2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -361,7 +361,7 @@
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, from a family of public school educators and police officers, a consensus builder, and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -375,14 +375,14 @@
|
||||
"id": "a2fe6b14",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Vector DB with Question Answering with sources\n",
|
||||
"## ConversationalRetrievalChain with Question Answering with sources\n",
|
||||
"\n",
|
||||
"You can also use this chain with the question answering with sources chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 22,
|
||||
"id": "d1058fd2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -392,7 +392,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 23,
|
||||
"id": "a6594482",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -401,8 +401,8 @@
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_with_sources_chain(llm, chain_type=\"map_reduce\")\n",
|
||||
"\n",
|
||||
"chain = ChatVectorDBChain(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
"chain = ConversationalRetrievalChain(\n",
|
||||
" retriever=vectorstore.as_retriever(),\n",
|
||||
" question_generator=question_generator,\n",
|
||||
" combine_docs_chain=doc_chain,\n",
|
||||
")"
|
||||
@@ -410,7 +410,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 24,
|
||||
"id": "e2badd21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -422,7 +422,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 25,
|
||||
"id": "edb31fe5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -432,7 +432,7 @@
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, from a family of public school educators and police officers, a consensus builder, and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\nSOURCES: ../../state_of_the_union.txt\""
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -446,14 +446,14 @@
|
||||
"id": "2324cdc6-98bf-4708-b8cd-02a98b1e5b67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Vector DB with streaming to `stdout`\n",
|
||||
"## ConversationalRetrievalChain with streaming to `stdout`\n",
|
||||
"\n",
|
||||
"Output from the chain will be streamed to `stdout` token by token in this example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 26,
|
||||
"id": "2efacec3-2690-4b05-8de3-a32fd2ac3911",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -463,7 +463,7 @@
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT\n",
|
||||
"from langchain.chains.chat_index.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"\n",
|
||||
"# Construct a ChatVectorDBChain with a streaming llm for combine docs\n",
|
||||
@@ -474,12 +474,13 @@
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_chain(streaming_llm, chain_type=\"stuff\", prompt=QA_PROMPT)\n",
|
||||
"\n",
|
||||
"qa = ChatVectorDBChain(vectorstore=vectorstore, combine_docs_chain=doc_chain, question_generator=question_generator)"
|
||||
"qa = ConversationalRetrievalChain(\n",
|
||||
" retriever=vectorstore.as_retriever(), combine_docs_chain=doc_chain, question_generator=question_generator)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 27,
|
||||
"id": "fd6d43f4-7428-44a4-81bc-26fe88a98762",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -501,7 +502,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 28,
|
||||
"id": "5ab38978-f3e8-4fa7-808c-c79dec48379a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -532,7 +533,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 29,
|
||||
"id": "a7ba9d8c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -542,12 +543,12 @@
|
||||
" for human, ai in inputs:\n",
|
||||
" res.append(f\"Human:{human}\\nAI:{ai}\")\n",
|
||||
" return \"\\n\".join(res)\n",
|
||||
"qa = ChatVectorDBChain.from_llm(OpenAI(temperature=0), vectorstore, get_chat_history=get_chat_history)"
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore, get_chat_history=get_chat_history)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 30,
|
||||
"id": "a3e33c0d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -559,7 +560,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 31,
|
||||
"id": "936dc62f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -569,7 +570,7 @@
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers. He also said that she is a consensus builder and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -178,16 +178,16 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new GraphQAChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new GraphQAChain chain...\u001b[0m\n",
|
||||
"Entities Extracted:\n",
|
||||
"\u001B[32;1m\u001B[1;3m Intel\u001B[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m Intel\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001B[32;1m\u001B[1;3mIntel is going to build $20 billion semiconductor \"mega site\"\n",
|
||||
"\u001b[32;1m\u001b[1;3mIntel is going to build $20 billion semiconductor \"mega site\"\n",
|
||||
"Intel is building state-of-the-art factories\n",
|
||||
"Intel is creating 10,000 new good-paying jobs\n",
|
||||
"Intel is helping build Silicon Valley\u001B[0m\n",
|
||||
"Intel is helping build Silicon Valley\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,10 +205,76 @@
|
||||
"chain.run(\"what is Intel going to build?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "410aafa0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Save the graph\n",
|
||||
"We can also save and load the graph."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bc72cca0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph.write_to_gml(\"graph.gml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "652760ad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes.graph import NetworkxEntityGraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "eae591fe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loaded_graph = NetworkxEntityGraph.from_gml(\"graph.gml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9439d419",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('Intel', '$20 billion semiconductor \"mega site\"', 'is going to build'),\n",
|
||||
" ('Intel', 'state-of-the-art factories', 'is building'),\n",
|
||||
" ('Intel', '10,000 new good-paying jobs', 'is creating'),\n",
|
||||
" ('Intel', 'Silicon Valley', 'is helping build'),\n",
|
||||
" ('Field of dreams',\n",
|
||||
" \"America's future will be built\",\n",
|
||||
" 'is the ground on which')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loaded_graph.get_triples()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f70b9ada",
|
||||
"id": "045796cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
"id": "07c1e3b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vector DB Question/Answering\n",
|
||||
"# Retrieval Question/Answering\n",
|
||||
"\n",
|
||||
"This example showcases question answering over a vector database."
|
||||
"This example showcases question answering over an index."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -20,7 +20,8 @@
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain import OpenAI, VectorDBQA"
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import RetrievalQA"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,7 +57,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", vectorstore=docsearch)"
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -68,7 +69,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice and federal public defender, from a family of public school educators and police officers, a consensus builder, and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
"\" The president said that she is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers. He also said that she is a consensus builder and has received a broad range of support, from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -87,7 +88,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chain Type\n",
|
||||
"You can easily specify different chain types to load and use in the VectorDBQA chain. For a more detailed walkthrough of these types, please see [this notebook](question_answering.ipynb).\n",
|
||||
"You can easily specify different chain types to load and use in the RetrievalQA chain. For a more detailed walkthrough of these types, please see [this notebook](question_answering.ipynb).\n",
|
||||
"\n",
|
||||
"There are two ways to load different chain types. First, you can specify the chain type argument in the `from_chain_type` method. This allows you to pass in the name of the chain type you want to use. For example, in the below we change the chain type to `map_reduce`."
|
||||
]
|
||||
@@ -99,7 +100,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type=\"map_reduce\", vectorstore=docsearch)"
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"map_reduce\", retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -111,7 +112,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, from a family of public school educators and police officers, a consensus builder, and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
"\" The president said that Judge Ketanji Brown Jackson is one of our nation's top legal minds, a former top litigator in private practice and a former federal public defender, from a family of public school educators and police officers, a consensus builder and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -129,24 +130,24 @@
|
||||
"id": "60368f38",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above way allows you to really simply change the chain_type, but it does provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](question_answering.ipynb)) and then pass that directly to the the VectorDBQA chain with the `combine_documents_chain` parameter. For example:"
|
||||
"The above way allows you to really simply change the chain_type, but it does provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](question_answering.ipynb)) and then pass that directly to the the RetrievalQA chain with the `combine_documents_chain` parameter. For example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 9,
|
||||
"id": "7b403f0d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n",
|
||||
"qa = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch)"
|
||||
"qa = RetrievalQA(combine_documents_chain=qa_chain, retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 10,
|
||||
"id": "9e04a9ac",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -156,7 +157,7 @@
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers. He also said that she is a consensus builder and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -177,7 +178,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 11,
|
||||
"id": "a45232a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -196,28 +197,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 13,
|
||||
"id": "9b5c8d1d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_type_kwargs = {\"prompt\": PROMPT}\n",
|
||||
"qa = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", vectorstore=docsearch, chain_type_kwargs=chain_type_kwargs)"
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever(), chain_type_kwargs=chain_type_kwargs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 14,
|
||||
"id": "26ee7671",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Il Presidente ha detto che Ketanji Brown Jackson è uno dei pensatori legali più importanti del nostro Paese, che continuerà l'eccellente eredità di giustizia Breyer. È un ex principale litigante in pratica privata, un ex difensore federale pubblico e appartiene a una famiglia di insegnanti e poliziotti delle scuole pubbliche. È un costruttore di consenso che ha ricevuto un ampio supporto da parte di Fraternal Order of Police e giudici designati da democratici e repubblicani.\""
|
||||
"\" Il presidente ha detto che Ketanji Brown Jackson è una delle menti legali più importanti del paese, che continuerà l'eccellenza di Justice Breyer e che ha ricevuto un ampio sostegno, da Fraternal Order of Police a ex giudici nominati da democratici e repubblicani.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -238,17 +239,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 15,
|
||||
"id": "af093aba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", vectorstore=docsearch, return_source_documents=True)"
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever(), return_source_documents=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 16,
|
||||
"id": "eac11321",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -259,17 +260,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 17,
|
||||
"id": "7d75945a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of our nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice and a former federal public defender from a family of public school educators and police officers, and that she has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -280,20 +281,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 18,
|
||||
"id": "35b4f31f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \\n\\nWe cannot let this happen. \\n\\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', lookup_str='', metadata={}, lookup_index=0),\n",
|
||||
" Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', lookup_str='', metadata={}, lookup_index=0),\n",
|
||||
" Document(page_content='And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \\n\\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \\n\\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \\n\\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \\n\\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \\n\\nFirst, beat the opioid epidemic.', lookup_str='', metadata={}, lookup_index=0),\n",
|
||||
" Document(page_content='As I’ve told Xi Jinping, it is never a good bet to bet against the American people. \\n\\nWe’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \\n\\nAnd we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. \\n\\nWe’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. \\n\\n4,000 projects have already been announced. \\n\\nAnd tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. \\n\\nWhen we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs.', lookup_str='', metadata={}, lookup_index=0)]"
|
||||
"[Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" Document(page_content='And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \\n\\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \\n\\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \\n\\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \\n\\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \\n\\nFirst, beat the opioid epidemic.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" Document(page_content='Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \\n\\nAnd as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. \\n\\nThat ends on my watch. \\n\\nMedicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \\n\\nWe’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \\n\\nLet’s pass the Paycheck Fairness Act and paid leave. \\n\\nRaise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \\n\\nLet’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
"id": "efc5be67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# VectorDB Question Answering with Sources\n",
|
||||
"# Retrieval Question Answering with Sources\n",
|
||||
"\n",
|
||||
"This notebook goes over how to do question-answering with sources over a vector database. It does this by using the `VectorDBQAWithSourcesChain`, which does the lookup of the documents from a vector database. "
|
||||
"This notebook goes over how to do question-answering with sources over an Index. It does this by using the `RetrievalQAWithSourcesChain`, which does the lookup of the documents from an Index. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -41,7 +41,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 3,
|
||||
"id": "0e745d99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -50,8 +50,7 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n",
|
||||
"Exiting: Cleaning up .chroma directory\n"
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -61,40 +60,40 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"id": "8aa571ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import VectorDBQAWithSourcesChain"
|
||||
"from langchain.chains import RetrievalQAWithSourcesChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"id": "aa859d4c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI\n",
|
||||
"\n",
|
||||
"chain = VectorDBQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"stuff\", vectorstore=docsearch)"
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"stuff\", retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 6,
|
||||
"id": "8ba36fa7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': ' The president thanked Justice Breyer for his service and mentioned his legacy of excellence.\\n',\n",
|
||||
" 'sources': '30-pl'}"
|
||||
"{'answer': ' The president honored Justice Breyer for his service and mentioned his legacy of excellence.\\n',\n",
|
||||
" 'sources': '31-pl'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -109,35 +108,35 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chain Type\n",
|
||||
"You can easily specify different chain types to load and use in the VectorDBQAWithSourcesChain chain. For a more detailed walkthrough of these types, please see [this notebook](qa_with_sources.ipynb).\n",
|
||||
"You can easily specify different chain types to load and use in the RetrievalQAWithSourcesChain chain. For a more detailed walkthrough of these types, please see [this notebook](qa_with_sources.ipynb).\n",
|
||||
"\n",
|
||||
"There are two ways to load different chain types. First, you can specify the chain type argument in the `from_chain_type` method. This allows you to pass in the name of the chain type you want to use. For example, in the below we change the chain type to `map_reduce`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"id": "8b35b30a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = VectorDBQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"map_reduce\", vectorstore=docsearch)"
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"map_reduce\", retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"id": "58bd424f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': ' The president honored Justice Stephen Breyer for his service.\\n',\n",
|
||||
" 'sources': '30-pl'}"
|
||||
"{'answer': ' The president said \"Justice Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\"\\n',\n",
|
||||
" 'sources': '31-pl'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -151,19 +150,19 @@
|
||||
"id": "21e14eed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above way allows you to really simply change the chain_type, but it does provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](qa_with_sources.ipynb)) and then pass that directly to the the VectorDBQA chain with the `combine_documents_chain` parameter. For example:"
|
||||
"The above way allows you to really simply change the chain_type, but it does provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](qa_with_sources.ipynb)) and then pass that directly to the the RetrievalQAWithSourcesChain chain with the `combine_documents_chain` parameter. For example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 10,
|
||||
"id": "af35f0c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n",
|
||||
"qa_chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n",
|
||||
"qa = VectorDBQAWithSourcesChain(combine_documents_chain=qa_chain, vectorstore=docsearch)"
|
||||
"qa = RetrievalQAWithSourcesChain(combine_documents_chain=qa_chain, retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -175,8 +174,8 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': ' The president honored Justice Stephen Breyer for his service.\\n',\n",
|
||||
" 'sources': '30-pl'}"
|
||||
"{'answer': ' The president honored Justice Breyer for his service and mentioned his legacy of excellence.\\n',\n",
|
||||
" 'sources': '31-pl'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
@@ -187,6 +186,14 @@
|
||||
"source": [
|
||||
"qa({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3c594296",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"import requests\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chromama\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"import pathlib\n",
|
||||
|
||||
@@ -19,20 +19,20 @@ to pass to the language model. This is implemented in LangChain as the `StuffDoc
|
||||
|
||||
**Cons:** Most LLMs have a context length, and for large documents (or many documents) this will not work as it will result in a prompt larger than the context length.
|
||||
|
||||
The main downside of this method is that it only works one smaller pieces of data. Once you are working
|
||||
The main downside of this method is that it only works on smaller pieces of data. Once you are working
|
||||
with many pieces of data, this approach is no longer feasible. The next two approaches are designed to help deal with that.
|
||||
|
||||
## Map Reduce
|
||||
This method involves an initial prompt on each chunk of data (for summarization tasks, this
|
||||
This method involves running an initial prompt on each chunk of data (for summarization tasks, this
|
||||
could be a summary of that chunk; for question-answering tasks, it could be an answer based solely on that chunk).
|
||||
Then a different prompt is run to combine all the initial outputs. This is implemented in the LangChain as the `MapReduceDocumentsChain`.
|
||||
|
||||
**Pros:** Can scale to larger documents (and more documents) than `StuffDocumentsChain`. The calls to the LLM on individual documents are independent and can therefore be parallelized.
|
||||
|
||||
**Cons:** Requires many more calls to the LLM than `StuffDocumentsChain`. Loses some information during the final combining call.
|
||||
**Cons:** Requires many more calls to the LLM than `StuffDocumentsChain`. Loses some information during the final combined call.
|
||||
|
||||
## Refine
|
||||
This method involves an initial prompt on the first chunk of data, generating some output.
|
||||
This method involves running an initial prompt on the first chunk of data, generating some output.
|
||||
For the remaining documents, that output is passed in, along with the next document,
|
||||
asking the LLM to refine the output based on the new document.
|
||||
|
||||
@@ -46,6 +46,6 @@ This method involves running an initial prompt on each chunk of data, that not o
|
||||
task but also gives a score for how certain it is in its answer. The responses are then
|
||||
ranked according to this score, and the highest score is returned.
|
||||
|
||||
**Pros:** Similar pros as `MapReduceDocumentsChain`. Compared to `MapReduceDocumentsChain`, it requires fewer calls.
|
||||
**Pros:** Similar pros as `MapReduceDocumentsChain`. Requires fewer calls, compared to `MapReduceDocumentsChain`.
|
||||
|
||||
**Cons:** Cannot combine information between documents. This means it is most useful when you expect there to be a single simple answer in a single document.
|
||||
|
||||
@@ -76,6 +76,129 @@
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bb61bbeb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's load the OpenAI Embedding class with first generation models (e.g. text-search-ada-doc-001/text-search-ada-query-001). Note: These are not recommended models - see [here](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c0b072cc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a56b70f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings(model_name=\"ada\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "14aefb64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3c39ed33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e3221db6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3852491",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AzureOpenAI\n",
|
||||
"\n",
|
||||
"Let's load the OpenAI Embedding class with environment variables set to indicate to use Azure endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1b40f827",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set the environment variables needed for openai package to know to reach out to azure\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_TYPE\"] = \"azure\"\n",
|
||||
"os.environ[\"OPENAI_API_BASE\"] = \"https://<your-endpoint.openai.azure.com/\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your AzureOpenAI key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bb36d16c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings(model=\"your-embeddings-deployment-name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "228abcbb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "60dd7fad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "83bc1a72",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "42f76e43",
|
||||
@@ -86,6 +209,12 @@
|
||||
"Let's load the Cohere Embedding class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ca9e2b3a",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -103,7 +232,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = CohereEmbeddings(cohere_api_key= cohere_api_key)"
|
||||
"embeddings = CohereEmbeddings(cohere_api_key=cohere_api_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -290,7 +419,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings = HuggingFaceInstructEmbeddings(query_instruction=\"Represent the query for retrieval: \")"
|
||||
"embeddings = HuggingFaceInstructEmbeddings(\n",
|
||||
" query_instruction=\"Represent the query for retrieval: \"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -332,9 +463,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import (\n",
|
||||
" SelfHostedEmbeddings, \n",
|
||||
" SelfHostedHuggingFaceEmbeddings, \n",
|
||||
" SelfHostedHuggingFaceInstructEmbeddings\n",
|
||||
" SelfHostedEmbeddings,\n",
|
||||
" SelfHostedHuggingFaceEmbeddings,\n",
|
||||
" SelfHostedHuggingFaceInstructEmbeddings,\n",
|
||||
")\n",
|
||||
"import runhouse as rh"
|
||||
]
|
||||
@@ -353,7 +484,7 @@
|
||||
"# gpu = rh.cluster(name='rh-a10x', instance_type='g5.2xlarge', provider='aws')\n",
|
||||
"\n",
|
||||
"# For an existing cluster\n",
|
||||
"# gpu = rh.cluster(ips=['<ip of the cluster>'], \n",
|
||||
"# gpu = rh.cluster(ips=['<ip of the cluster>'],\n",
|
||||
"# ssh_creds={'ssh_user': '...', 'ssh_private_key':'<path_to_key>'},\n",
|
||||
"# name='my-cluster')"
|
||||
]
|
||||
@@ -424,16 +555,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_pipeline():\n",
|
||||
" from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # Must be inside the function in notebooks\n",
|
||||
" from transformers import (\n",
|
||||
" AutoModelForCausalLM,\n",
|
||||
" AutoTokenizer,\n",
|
||||
" pipeline,\n",
|
||||
" ) # Must be inside the function in notebooks\n",
|
||||
"\n",
|
||||
" model_id = \"facebook/bart-base\"\n",
|
||||
" tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
" model = AutoModelForCausalLM.from_pretrained(model_id)\n",
|
||||
" return pipeline(\"feature-extraction\", model=model, tokenizer=tokenizer)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def inference_fn(pipeline, prompt):\n",
|
||||
" # Return last hidden state of the model\n",
|
||||
" if isinstance(prompt, list):\n",
|
||||
" return [emb[0][-1] for emb in pipeline(prompt)] \n",
|
||||
" return [emb[0][-1] for emb in pipeline(prompt)]\n",
|
||||
" return pipeline(prompt)[0][-1]"
|
||||
]
|
||||
},
|
||||
@@ -445,10 +582,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = SelfHostedEmbeddings(\n",
|
||||
" model_load_fn=get_pipeline, \n",
|
||||
" model_load_fn=get_pipeline,\n",
|
||||
" hardware=gpu,\n",
|
||||
" model_reqs=[\"./\", \"torch\", \"transformers\"],\n",
|
||||
" inference_fn=inference_fn\n",
|
||||
" inference_fn=inference_fn,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -514,12 +651,101 @@
|
||||
"doc_results = embeddings.embed_documents([\"foo\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f83f273",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## SageMaker Endpoint Embeddings\n",
|
||||
"\n",
|
||||
"Let's load the SageMaker Endpoints Embeddings class. The class can be used if you host, e.g. your own Hugging Face model on SageMaker.\n",
|
||||
"\n",
|
||||
"For instrucstions on how to do this, please see [here](https://www.philschmid.de/custom-inference-huggingface-sagemaker)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "88d366bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip3 install langchain boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "1e9b926a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Dict\n",
|
||||
"from langchain.embeddings import SagemakerEndpointEmbeddings\n",
|
||||
"from langchain.llms.sagemaker_endpoint import ContentHandlerBase\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ContentHandler(ContentHandlerBase):\n",
|
||||
" content_type = \"application/json\"\n",
|
||||
" accepts = \"application/json\"\n",
|
||||
"\n",
|
||||
" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:\n",
|
||||
" input_str = json.dumps({\"inputs\": prompt, **model_kwargs})\n",
|
||||
" return input_str.encode('utf-8')\n",
|
||||
" \n",
|
||||
" def transform_output(self, output: bytes) -> str:\n",
|
||||
" response_json = json.loads(output.read().decode(\"utf-8\"))\n",
|
||||
" return response_json[\"embeddings\"]\n",
|
||||
"\n",
|
||||
"content_handler = ContentHandler()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"embeddings = SagemakerEndpointEmbeddings(\n",
|
||||
" # endpoint_name=\"endpoint-name\", \n",
|
||||
" # credentials_profile_name=\"credentials-profile-name\", \n",
|
||||
" endpoint_name=\"huggingface-pytorch-inference-2023-03-21-16-14-03-834\", \n",
|
||||
" region_name=\"us-east-1\", \n",
|
||||
" content_handler=content_handler\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fe9797b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(\"foo\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "76f1b752",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_results = embeddings.embed_documents([\"foo\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fff99b21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aaad49f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
@@ -543,7 +769,7 @@
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "ce6f9b0d7cdac41515b0e0c38d0e6e153a2edce81d579281cb1ab99da6e8ea6d"
|
||||
"hash": "7377c2ccc78bc62c2683122d48c8cd1fb85a53850a1b1fc29736ed39852c9885"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -176,6 +176,77 @@
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a2f572e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Latex Text Splitter\n",
|
||||
"\n",
|
||||
"LatexTextSplitter splits text along Latex headings, headlines, enumerations and more. It's implemented as a simple subclass of RecursiveCharacterSplitter with Latex-specific separators. See the source code to see the Latex syntax expected by default.\n",
|
||||
"\n",
|
||||
"1. How the text is split: by list of latex specific tags\n",
|
||||
"2. How the chunk size is measured: by length function passed in (defaults to number of characters)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c2503917",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import LatexTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e46b753b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"latex_text = \"\"\"\n",
|
||||
"\\documentclass{article}\n",
|
||||
"\n",
|
||||
"\\begin{document}\n",
|
||||
"\n",
|
||||
"\\maketitle\n",
|
||||
"\n",
|
||||
"\\section{Introduction}\n",
|
||||
"Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis.\n",
|
||||
"\n",
|
||||
"\\subsection{History of LLMs}\n",
|
||||
"The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance.\n",
|
||||
"\n",
|
||||
"\\subsection{Applications of LLMs}\n",
|
||||
"LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics.\n",
|
||||
"\n",
|
||||
"\\end{document}\n",
|
||||
"\"\"\"\n",
|
||||
"latex_splitter = LatexTextSplitter(chunk_size=400, chunk_overlap=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "73b5bd33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = latex_splitter.create_documents([latex_text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e1c7fbd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c350765d",
|
||||
|
||||
@@ -1,20 +1,71 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fcc8bb1c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"LangChain primary focuses on constructing indexes with the goal of using them as a Retriever. In order to best understand what this means, it's worth highlighting what the base Retriever interface is. The `BaseRetriever` class in LangChain is as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b09ac324",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from abc import ABC, abstractmethod\n",
|
||||
"from typing import List\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"\n",
|
||||
"class BaseRetriever(ABC):\n",
|
||||
" @abstractmethod\n",
|
||||
" def get_relevant_documents(self, query: str) -> List[Document]:\n",
|
||||
" \"\"\"Get texts relevant for a query.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" query: string to find relevant tests for\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" List of relevant documents\n",
|
||||
" \"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e19d4adb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's that simple! The `get_relevant_documents` method can be implemented however you see fit.\n",
|
||||
"\n",
|
||||
"Of course, we also help construct what we think useful Retrievers are. The main type of Retriever that we focus on is a Vectorstore retriever. We will focus on that for the rest of this guide.\n",
|
||||
"\n",
|
||||
"In order to understand what a vectorstore retriever is, it's important to understand what a Vectorstore is. So let's look at that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2244801b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Getting Started\n",
|
||||
"By default, LangChain uses [Chroma](../../ecosystem/chroma.md) as the vectorstore to index and search embeddings. To walk through this tutorial, we'll first need to install `chromadb`.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install chromadb\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This example showcases question answering over documents.\n",
|
||||
"We have chosen this as the example for getting started because it nicely combines a lot of different elements (Text splitters, embeddings, vectorstores) and then also shows how to use them in a chain.\n",
|
||||
"\n",
|
||||
"Question answering over documents consists of three steps:\n",
|
||||
"Question answering over documents consists of four steps:\n",
|
||||
"\n",
|
||||
"1. Create an index\n",
|
||||
"2. Create a question answering chain\n",
|
||||
"3. Ask questions!\n",
|
||||
"2. Create a Retriever from that index\n",
|
||||
"3. Create a question answering chain\n",
|
||||
"4. Ask questions!\n",
|
||||
"\n",
|
||||
"Each of the steps has multiple sub steps and potential configurations. In this notebook we will primarily focus on (1). We will start by showing the one-liner for doing so, but then break down what is actually going on.\n",
|
||||
"\n",
|
||||
@@ -23,12 +74,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"id": "8d369452",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import VectorDBQA\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -37,12 +88,12 @@
|
||||
"id": "07c1e3b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next in the generic setup, let's specify the document loader we want to use."
|
||||
"Next in the generic setup, let's specify the document loader we want to use. You can download the `state_of_the_union.txt` file [here](https://github.com/hwchase17/langchain/blob/master/docs/modules/state_of_the_union.txt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "33958a86",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -63,7 +114,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"id": "403fc231",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -73,7 +124,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"id": "57a8a199",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -100,7 +151,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"id": "23d0d234",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -110,7 +161,7 @@
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers. He also said that she is a consensus builder and has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -122,7 +173,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"id": "ae46b239",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -134,7 +185,7 @@
|
||||
" 'sources': '../state_of_the_union.txt'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -154,17 +205,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"id": "b04f3c10",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<langchain.vectorstores.chroma.Chroma at 0x113a3a700>"
|
||||
"<langchain.vectorstores.chroma.Chroma at 0x119aa5940>"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -173,6 +224,35 @@
|
||||
"index.vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "297ccfa4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we then want to access the VectorstoreRetriever, we can do that with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b8fef77d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"VectorStoreRetriever(vectorstore=<langchain.vectorstores.chroma.Chroma object at 0x119aa5940>, search_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"index.vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2cb6d2eb",
|
||||
@@ -195,7 +275,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 11,
|
||||
"id": "54270abc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -213,7 +293,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 12,
|
||||
"id": "afecb8cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -233,7 +313,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"id": "9eaaa735",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -252,7 +332,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 14,
|
||||
"id": "5c7049db",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -270,38 +350,55 @@
|
||||
"db = Chroma.from_documents(texts, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0ef85a6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So that's creating the index. Then, we expose this index in a retriever interface."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "13495c77",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = db.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30c4e5c6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So that's creating the index.\n",
|
||||
"Then, as before, we create a chain and use it to answer questions!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 16,
|
||||
"id": "3018f865",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", vectorstore=db)"
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=retriever)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 17,
|
||||
"id": "032a47f8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The President said that Ketanji Brown Jackson is one of the nation's top legal minds and a consensus builder, with a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. She is a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers.\""
|
||||
"\" The President said that Judge Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers. He said she is a consensus builder and has received a broad range of support from organizations such as the Fraternal Order of Police and former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -46,12 +46,16 @@ In the below guides, we cover different types of vectorstores and how to use the
|
||||
|
||||
`Milvus <./vectorstore_examples/milvus.html>`_: A walkthrough of how to use the Milvus vectorstore wrapper.
|
||||
|
||||
`Open Search <./vectorstore_examples/opensearch.html>`_: A walkthrough of how to use the OpenSearch wrapper.
|
||||
|
||||
`Pinecone <./vectorstore_examples/pinecone.html>`_: A walkthrough of how to use the Pinecone vectorstore wrapper.
|
||||
|
||||
`Qdrant <./vectorstore_examples/qdrant.html>`_: A walkthrough of how to use the Qdrant vectorstore wrapper.
|
||||
|
||||
`Weaviate <./vectorstore_examples/weaviate.html>`_: A walkthrough of how to use the Weaviate vectorstore wrapper.
|
||||
|
||||
`PGVector <./vectorstore_examples/pgvector.html>`_: A walkthrough of how to use the PGVector (Postgres Vector DB) vectorstore wrapper.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
@@ -63,6 +67,29 @@ In the below guides, we cover different types of vectorstores and how to use the
|
||||
vectorstore_examples/*
|
||||
|
||||
|
||||
Retrievers
|
||||
------------
|
||||
|
||||
|
||||
The retriever interface is a generic interface that makes it easy to combine documents with
|
||||
language models. This interface exposes a `get_relevant_documents` method which takes in a query
|
||||
(a string) and returns a list of documents.
|
||||
|
||||
`Vectorstore Retriever <./retriever_examples/vectorstore-retriever.html>`_: A walkthrough of how to use a VectorStore as a Retriever.
|
||||
|
||||
`ChatGPT Plugin Retriever <./retriever_examples/chatgpt-plugin-retriever.html>`_: A walkthrough of how to use the ChatGPT Plugin Retriever within the LangChain framework.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
:caption: Retrievers
|
||||
:name: retrievers
|
||||
:hidden:
|
||||
|
||||
retriever_examples/*
|
||||
|
||||
|
||||
Chains
|
||||
------
|
||||
|
||||
@@ -94,4 +121,4 @@ The examples here are all end-to-end chains that use indexes or utils covered ab
|
||||
:name: chains
|
||||
:hidden:
|
||||
|
||||
./chain_examples/*
|
||||
./chain_examples/*
|
||||
|
||||
@@ -0,0 +1,148 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1edb9e6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGPT Plugin Retriever\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the ChatGPT Retriever Plugin within LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "074b0004",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create\n",
|
||||
"\n",
|
||||
"First, let's go over how to create the ChatGPT Retriever Plugin.\n",
|
||||
"\n",
|
||||
"To set up the ChatGPT Retriever Plugin, please follow instructions [here](https://github.com/openai/chatgpt-retrieval-plugin).\n",
|
||||
"\n",
|
||||
"You can also create the ChatGPT Retriever Plugin from LangChain document loaders. The below code walks through how to do that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "bbe89ca0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# STEP 1: Load\n",
|
||||
"\n",
|
||||
"# Load documents using LangChain's DocumentLoaders\n",
|
||||
"# This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html\n",
|
||||
"\n",
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"loader = CSVLoader(file_path='../../document_loaders/examples/example_data/mlb_teams_2012.csv')\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# STEP 2: Convert\n",
|
||||
"\n",
|
||||
"# Convert Document to format expected by https://github.com/openai/chatgpt-retrieval-plugin\n",
|
||||
"from typing import List\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"def write_json(path: str, documents: List[Document])-> None:\n",
|
||||
" results = [{\"text\": doc.page_content} for doc in documents]\n",
|
||||
" with open(path, \"w\") as f:\n",
|
||||
" json.dump(results, f, indent=2)\n",
|
||||
"\n",
|
||||
"write_json(\"foo.json\", data)\n",
|
||||
"\n",
|
||||
"# STEP 3: Use\n",
|
||||
"\n",
|
||||
"# Ingest this as you would any other json file in https://github.com/openai/chatgpt-retrieval-plugin/tree/main/scripts/process_json\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0474661d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the ChatGPT Retriever Plugin\n",
|
||||
"\n",
|
||||
"Okay, so we've created the ChatGPT Retriever Plugin, but how do we actually use it?\n",
|
||||
"\n",
|
||||
"The below code walks through how to do that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39d6074e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import ChatGPTPluginRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "33fd23d1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = ChatGPTPluginRetriever(url=\"http://0.0.0.0:8000\", bearer_token=\"foo\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "16250bdf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content=\"This is Alice's phone number: 123-456-7890\", lookup_str='', metadata={'id': '456_0', 'metadata': {'source': 'email', 'source_id': '567', 'url': None, 'created_at': '1609592400.0', 'author': 'Alice', 'document_id': '456'}, 'embedding': None, 'score': 0.925571561}, lookup_index=0),\n",
|
||||
" Document(page_content='This is a document about something', lookup_str='', metadata={'id': '123_0', 'metadata': {'source': 'file', 'source_id': 'https://example.com/doc1', 'url': 'https://example.com/doc1', 'created_at': '1609502400.0', 'author': 'Alice', 'document_id': '123'}, 'embedding': None, 'score': 0.6987589}, lookup_index=0),\n",
|
||||
" Document(page_content='Team: Angels \"Payroll (millions)\": 154.49 \"Wins\": 89', lookup_str='', metadata={'id': '59c2c0c1-ae3f-4272-a1da-f44a723ea631_0', 'metadata': {'source': None, 'source_id': None, 'url': None, 'created_at': None, 'author': None, 'document_id': '59c2c0c1-ae3f-4272-a1da-f44a723ea631'}, 'embedding': None, 'score': 0.697888613}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"alice's phone number\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c8b5794b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc0db1bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# VectorStore Retriever\n",
|
||||
"\n",
|
||||
"The index - and therefor the retriever - that LangChain has the most support for is a VectorStoreRetriever. As the name suggests, this retriever is backed heavily by a VectorStore.\n",
|
||||
"\n",
|
||||
"Once you construct a VectorStore, its very easy to construct a retriever. Let's walk through an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5831703b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../state_of_the_union.txt')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "9fbcc58f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"db = Chroma.from_documents(texts, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "0cbfb1af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = db.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fc12700b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -62,10 +62,6 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
@@ -200,10 +196,104 @@
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "57da60d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Merging\n",
|
||||
"You can also merge two FAISS vectorstores"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "6dfd2b78",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db1 = FAISS.from_texts([\"foo\"], embeddings)\n",
|
||||
"db2 = FAISS.from_texts([\"bar\"], embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "29960da7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'e0b74348-6c93-4893-8764-943139ec1d17': Document(page_content='foo', lookup_str='', metadata={}, lookup_index=0)}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db1.docstore._dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "83392605",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'bdc50ae3-a1bb-4678-9260-1b0979578f40': Document(page_content='bar', lookup_str='', metadata={}, lookup_index=0)}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db2.docstore._dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "a3fcc1c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db1.merge_from(db2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "41c51f89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'e0b74348-6c93-4893-8764-943139ec1d17': Document(page_content='foo', lookup_str='', metadata={}, lookup_index=0),\n",
|
||||
" 'd5211050-c777-493d-8825-4800e74cfdb6': Document(page_content='bar', lookup_str='', metadata={}, lookup_index=0)}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db1.docstore._dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc8b71f7",
|
||||
"id": "f80b60de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
||||
194
docs/modules/indexes/vectorstore_examples/pgvector.ipynb
Normal file
194
docs/modules/indexes/vectorstore_examples/pgvector.ipynb
Normal file
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PGVector\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Postgres vector database (PGVector)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## Loading Environment Variables\n",
|
||||
"from typing import List, Tuple\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores.pgvector import PGVector\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.docstore.document import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## PGVector needs the connection string to the database.\n",
|
||||
"## We will load it from the environment variables.\n",
|
||||
"import os\n",
|
||||
"CONNECTION_STRING = PGVector.connection_string_from_db_params(\n",
|
||||
" driver=os.environ.get(\"PGVECTOR_DRIVER\", \"psycopg2\"),\n",
|
||||
" host=os.environ.get(\"PGVECTOR_HOST\", \"localhost\"),\n",
|
||||
" port=int(os.environ.get(\"PGVECTOR_PORT\", \"5432\")),\n",
|
||||
" database=os.environ.get(\"PGVECTOR_DATABASE\", \"postgres\"),\n",
|
||||
" user=os.environ.get(\"PGVECTOR_USER\", \"postgres\"),\n",
|
||||
" password=os.environ.get(\"PGVECTOR_PASSWORD\", \"postgres\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Example\n",
|
||||
"# postgresql+psycopg2://username:password@localhost:5432/database_name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity search with score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search with Euclidean Distance (Default)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The PGVector Module will try to create a table with the name of the collection. So, make sure that the collection name is unique and the user has the \n",
|
||||
"# permission to create a table.\n",
|
||||
"\n",
|
||||
"db = PGVector.from_documents(\n",
|
||||
" embedding=embeddings,\n",
|
||||
" documents=docs,\n",
|
||||
" collection_name=\"state_of_the_union\",\n",
|
||||
" connection_string=CONNECTION_STRING,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs_with_score: List[Tuple[Document, float]] = db.similarity_search_with_score(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076628081132506\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076628081132506\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076804780049968\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076804780049968\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for doc, score in docs_with_score:\n",
|
||||
" print(\"-\" * 80)\n",
|
||||
" print(\"Score: \", score)\n",
|
||||
" print(doc.page_content)\n",
|
||||
" print(\"-\" * 80)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
191
docs/modules/indexes/vectorstore_examples/redis.ipynb
Normal file
191
docs/modules/indexes/vectorstore_examples/redis.ipynb
Normal file
@@ -0,0 +1,191 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Redis\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Redis database."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores.redis import Redis"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rds = Redis.from_documents(docs, embeddings, redis_url=\"redis://localhost:6379\", index_name='link')"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'b564189668a343648996bd5a1d353d4e'"
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rds.index_name"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"results = rds.similarity_search(query)\n",
|
||||
"print(results[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['doc:333eadf75bd74be393acafa8bca48669']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(rds.add_texts([\"Ankush went to Princeton\"]))"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ankush went to Princeton\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"Princeton\"\n",
|
||||
"results = rds.similarity_search(query)\n",
|
||||
"print(results[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Query\n",
|
||||
"rds = Redis.from_existing_index(embeddings, redis_url=\"redis://localhost:6379\", index_name='link')\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"results = rds.similarity_search(query)\n",
|
||||
"print(results[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -119,10 +119,39 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "05e9e2fe",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
"source": [
|
||||
"## Using PromptLayer Track\n",
|
||||
"If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantializing the PromptLayer LLM to get the request id. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1a7315b9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = PromptLayerOpenAI(return_pl_id=True)\n",
|
||||
"llm_results = llm.generate([\"Tell me a joke\"])\n",
|
||||
"\n",
|
||||
"for res in llm_results.generations:\n",
|
||||
" pl_request_id = res[0].generation_info[\"pl_request_id\"]\n",
|
||||
" promptlayer.track.score(request_id=pl_request_id, score=100)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7eb19139",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Using this allows you to track the performance of your model in the PromptLayer dashboard. If you are using a prompt template, you can attach a template to a request as well.\n",
|
||||
"Overall, this gives you the opportunity to track the performance of different templates and models in the PromptLayer dashboard."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -145,7 +174,7 @@
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "c4fe2cd85a8d9e8baaec5340ce66faff1c77581a9f43e6c45e85e09b6fced008"
|
||||
"hash": "8a5edab282632443219e051e4ade2d1d5bbc671c781051bf1437897cbdfea0f1"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
131
docs/modules/llms/integrations/sagemaker.ipynb
Normal file
131
docs/modules/llms/integrations/sagemaker.ipynb
Normal file
@@ -0,0 +1,131 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SageMakerEndpoint\n",
|
||||
"\n",
|
||||
"This notebooks goes over how to use an LLM hosted on a SageMaker endpoint."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip3 install langchain boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_doc_1 = \"\"\"\n",
|
||||
"Peter and Elizabeth took a taxi to attend the night party in the city. While in the party, Elizabeth collapsed and was rushed to the hospital.\n",
|
||||
"Since she was diagnosed with a brain injury, the doctor told Peter to stay besides her until she gets well.\n",
|
||||
"Therefore, Peter stayed with her at the hospital for 3 days without leaving.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=example_doc_1,\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Dict\n",
|
||||
"\n",
|
||||
"from langchain import PromptTemplate, SagemakerEndpoint\n",
|
||||
"from langchain.llms.sagemaker_endpoint import ContentHandlerBase\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"query = \"\"\"How long was Elizabeth hospitalized?\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"Use the following pieces of context to answer the question at the end.\n",
|
||||
"\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" template=prompt_template, input_variables=[\"context\", \"question\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"class ContentHandler(ContentHandlerBase):\n",
|
||||
" content_type = \"application/json\"\n",
|
||||
" accepts = \"application/json\"\n",
|
||||
"\n",
|
||||
" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:\n",
|
||||
" input_str = json.dumps({prompt: prompt, **model_kwargs})\n",
|
||||
" return input_str.encode('utf-8')\n",
|
||||
" \n",
|
||||
" def transform_output(self, output: bytes) -> str:\n",
|
||||
" response_json = json.loads(output.read().decode(\"utf-8\"))\n",
|
||||
" return response_json[0][\"generated_text\"]\n",
|
||||
"\n",
|
||||
"content_handler = ContentHandler()\n",
|
||||
"\n",
|
||||
"chain = load_qa_chain(\n",
|
||||
" llm=SagemakerEndpoint(\n",
|
||||
" endpoint_name=\"endpoint-name\", \n",
|
||||
" credentials_profile_name=\"credentials-profile-name\", \n",
|
||||
" region_name=\"us-west-2\", \n",
|
||||
" model_kwargs={\"temperature\":1e-10},\n",
|
||||
" content_handler=content_handler\n",
|
||||
" ),\n",
|
||||
" prompt=PROMPT\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -30,36 +30,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ChatMessageHistory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4404d509",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"history = ChatMessageHistory()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "78c1a67b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"history.add_user_message(\"hi!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "525ce606",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"\n",
|
||||
"history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"history.add_ai_message(\"whats up?\")"
|
||||
]
|
||||
},
|
||||
@@ -331,6 +307,99 @@
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fb68bb9e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Saving Message History\n",
|
||||
"\n",
|
||||
"You may often to save messages, and then load them to use again. This can be done easily by first converting the messages to normal python dictionaries, saving those (as json or something) and then loading those. Here is an example of doing that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b5acbc4b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"from langchain.schema import messages_from_dict, messages_to_dict\n",
|
||||
"\n",
|
||||
"history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"history.add_ai_message(\"whats up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7812ee21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dicts = messages_to_dict(history.messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3ed6e6a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'type': 'human', 'data': {'content': 'hi!', 'additional_kwargs': {}}},\n",
|
||||
" {'type': 'ai', 'data': {'content': 'whats up?', 'additional_kwargs': {}}}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dicts"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cdf4ebd2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"new_messages = messages_from_dict(dicts)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9724e24b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!', additional_kwargs={}),\n",
|
||||
" AIMessage(content='whats up?', additional_kwargs={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"new_messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7826c210",
|
||||
|
||||
@@ -9,7 +9,7 @@ both at a short term but also at a long term level. The concept of "Memory" exis
|
||||
One of the simpler forms of memory occurs in chatbots, where they remember previous conversations.
|
||||
There are a few different ways to accomplish this:
|
||||
- Buffer: This is just passing in the past `N` interactions in as context. `N` can be chosen based on a fixed number, the length of the interactions, or other!
|
||||
- Summary: This involves summarizing previous conversations and passing that summary in, instead of the raw dialouge itself. Compared to `Buffer`, this compresses information: meaning it is more lossy, but also less likely to run into context length limits.
|
||||
- Summary: This involves summarizing previous conversations and passing that summary in, instead of the raw dialogue itself. Compared to `Buffer`, this compresses information: meaning it is more lossy, but also less likely to run into context length limits.
|
||||
- Combination: A combination of the above two approaches, where you compute a summary but also pass in some previous interactions directly!
|
||||
|
||||
## Entity Memory
|
||||
|
||||
288
docs/modules/memory/types/token_buffer.ipynb
Normal file
288
docs/modules/memory/types/token_buffer.ipynb
Normal file
@@ -0,0 +1,288 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff4be5f3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationTokenBufferMemory\n",
|
||||
"\n",
|
||||
"`ConversationTokenBufferMemory` keeps a buffer of recent interactions in memory, and uses token length rather than number of interactions to determine when to flush interactions.\n",
|
||||
"\n",
|
||||
"Let's first walk through how to use the utilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "da3384db",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationTokenBufferMemory\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e00d4938",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2fe28a28",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'Human: not much you\\nAI: not much'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf57b97a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also get the history as a list of messages (this is useful if you are using this with a chat model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3422a3a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10, return_messages=True)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6d2569f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using in a chain\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ebd68c10",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great, just enjoying the day. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" # We set a very low max_token_limit for the purposes of testing.\n",
|
||||
" memory=ConversationTokenBufferMemory(llm=OpenAI(), max_token_limit=60),\n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "86207a61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great, just enjoying the day. How about you?\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Sounds like a productive day! What kind of documentation are you writing?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Just working on writing some documentation!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "76a0ab39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great, just enjoying the day. How about you?\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI: Sounds like a productive day! What kind of documentation are you writing?\n",
|
||||
"Human: For LangChain! Have you heard of it?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Yes, I have heard of LangChain! It is a decentralized language-learning platform that connects native speakers and learners in real time. Is that the documentation you're writing about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"For LangChain! Have you heard of it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "8c669db1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: For LangChain! Have you heard of it?\n",
|
||||
"AI: Yes, I have heard of LangChain! It is a decentralized language-learning platform that connects native speakers and learners in real time. Is that the documentation you're writing about?\n",
|
||||
"Human: Haha nope, although a lot of people confuse it for that\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Oh, I see. Is there another language learning platform you're referring to?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see here that the buffer is updated\n",
|
||||
"conversation_with_summary.predict(input=\"Haha nope, although a lot of people confuse it for that\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8c09a239",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -21,16 +21,17 @@
|
||||
"id": "5d56ce86",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a custom prompt template\n",
|
||||
"## Creating a Custom Prompt Template\n",
|
||||
"\n",
|
||||
"The only two requirements for all prompt templates are:\n",
|
||||
"There are essentially two distinct prompt templates available - string prompt templates and chat prompt templates. String prompt templates provides a simple prompt in string format, while chat prompt templates produces a more structured prompt to be used with a chat API.\n",
|
||||
"\n",
|
||||
"1. They have a input_variables attribute that exposes what input variables this prompt template expects.\n",
|
||||
"2. They expose a format method which takes in keyword arguments corresponding to the expected input_variables and returns the formatted prompt.\n",
|
||||
"In this guide, we will create a custom prompt using a string prompt template. \n",
|
||||
"\n",
|
||||
"Let's create a custom prompt template that takes in the function name as input, and formats the prompt template to provide the source code of the function.\n",
|
||||
"To create a custom string prompt template, there are two requirements:\n",
|
||||
"1. It has an input_variables attribute that exposes what input variables the prompt template expects.\n",
|
||||
"2. It exposes a format method that takes in keyword arguments corresponding to the expected input_variables and returns the formatted prompt.\n",
|
||||
"\n",
|
||||
"First, let's create a function that will return the source code of a function given its name."
|
||||
"We will create a custom prompt template that takes in the function name as input and formats the prompt to provide the source code of the function. To achieve this, let's first create a function that will return the source code of a function given its name."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -62,11 +63,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import BasePromptTemplate\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from pydantic import BaseModel, validator\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FunctionExplainerPromptTemplate(BasePromptTemplate, BaseModel):\n",
|
||||
"class FunctionExplainerPromptTemplate(StringPromptTemplate, BaseModel):\n",
|
||||
" \"\"\" A custom prompt template that takes in the function name as input, and formats the prompt template to provide the source code of the function. \"\"\"\n",
|
||||
"\n",
|
||||
" @validator(\"input_variables\")\n",
|
||||
|
||||
@@ -14,9 +14,459 @@
|
||||
"- `get_format_instructions() -> str`: A method which returns a string containing instructions for how the output of a language model should be formatted.\n",
|
||||
"- `parse(str) -> Any`: A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.\n",
|
||||
"\n",
|
||||
"And then one optional one:\n",
|
||||
"\n",
|
||||
"- `parse_with_prompt(str) -> Any`: A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Below we go over some examples of output parsers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5f0c8a33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a1ae632a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PydanticOutputParser\n",
|
||||
"This output parser allows users to specify an arbitrary JSON schema and query LLMs for JSON outputs that conform to that schema.\n",
|
||||
"\n",
|
||||
"Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed JSON. In the OpenAI family, DaVinci can do reliably but Curie's ability already drops off dramatically. \n",
|
||||
"\n",
|
||||
"Use Pydantic to declare your data model. Pydantic's BaseModel like a Python dataclass, but with actual type checking + coercion."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cba6d8e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from pydantic import BaseModel, Field, validator\n",
|
||||
"from typing import List"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0a203100",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = 'text-davinci-003'\n",
|
||||
"temperature = 0.0\n",
|
||||
"model = OpenAI(model_name=model_name, temperature=temperature)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "b3f16168",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why did the chicken cross the road?', punchline='To get to the other side!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Define your desired data structure.\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
" setup: str = Field(description=\"question to set up a joke\")\n",
|
||||
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
|
||||
" \n",
|
||||
" # You can add custom validation logic easily with Pydantic.\n",
|
||||
" @validator('setup')\n",
|
||||
" def question_ends_with_question_mark(cls, field):\n",
|
||||
" if field[-1] != '?':\n",
|
||||
" raise ValueError(\"Badly formed question!\")\n",
|
||||
" return field\n",
|
||||
"\n",
|
||||
"# And a query intented to prompt a language model to populate the data structure.\n",
|
||||
"joke_query = \"Tell me a joke.\"\n",
|
||||
"\n",
|
||||
"# Set up a parser + inject instructions into the prompt template.\n",
|
||||
"parser = PydanticOutputParser(pydantic_object=Joke)\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
|
||||
" input_variables=[\"query\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser.get_format_instructions()}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"_input = prompt.format_prompt(query=joke_query)\n",
|
||||
"\n",
|
||||
"output = model(_input.to_string())\n",
|
||||
"\n",
|
||||
"parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "03049f88",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Actor(name='Tom Hanks', film_names=['Forrest Gump', 'Saving Private Ryan', 'The Green Mile', 'Cast Away', 'Toy Story'])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Here's another example, but with a compound typed field.\n",
|
||||
"class Actor(BaseModel):\n",
|
||||
" name: str = Field(description=\"name of an actor\")\n",
|
||||
" film_names: List[str] = Field(description=\"list of names of films they starred in\")\n",
|
||||
" \n",
|
||||
"actor_query = \"Generate the filmography for a random actor.\"\n",
|
||||
"\n",
|
||||
"parser = PydanticOutputParser(pydantic_object=Actor)\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
|
||||
" input_variables=[\"query\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser.get_format_instructions()}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"_input = prompt.format_prompt(query=actor_query)\n",
|
||||
"\n",
|
||||
"output = model(_input.to_string())\n",
|
||||
"\n",
|
||||
"parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4d6c0c86",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Fixing Output Parsing Mistakes\n",
|
||||
"\n",
|
||||
"The above guardrail simply tries to parse the LLM response. If it does not parse correctly, then it errors.\n",
|
||||
"\n",
|
||||
"But we can do other things besides throw errors. Specifically, we can pass the misformatted output, along with the formatted instructions, to the model and ask it to fix it.\n",
|
||||
"\n",
|
||||
"For this example, we'll use the above OutputParser. Here's what happens if we pass it a result that does not comply with the schema:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "73beb20d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"misformatted = \"{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f0e5ba80",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "OutputParserException",
|
||||
"evalue": "Failed to parse Actor from completion {'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}. Got: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/output_parsers/pydantic.py:23\u001b[0m, in \u001b[0;36mPydanticOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 22\u001b[0m json_str \u001b[38;5;241m=\u001b[39m match\u001b[38;5;241m.\u001b[39mgroup()\n\u001b[0;32m---> 23\u001b[0m json_object \u001b[38;5;241m=\u001b[39m \u001b[43mjson\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloads\u001b[49m\u001b[43m(\u001b[49m\u001b[43mjson_str\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 24\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpydantic_object\u001b[38;5;241m.\u001b[39mparse_obj(json_object)\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.9.1/lib/python3.9/json/__init__.py:346\u001b[0m, in \u001b[0;36mloads\u001b[0;34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 344\u001b[0m parse_int \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m parse_float \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 345\u001b[0m parse_constant \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_pairs_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m kw):\n\u001b[0;32m--> 346\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_default_decoder\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 347\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.9.1/lib/python3.9/json/decoder.py:337\u001b[0m, in \u001b[0;36mJSONDecoder.decode\u001b[0;34m(self, s, _w)\u001b[0m\n\u001b[1;32m 333\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Return the Python representation of ``s`` (a ``str`` instance\u001b[39;00m\n\u001b[1;32m 334\u001b[0m \u001b[38;5;124;03mcontaining a JSON document).\u001b[39;00m\n\u001b[1;32m 335\u001b[0m \n\u001b[1;32m 336\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m--> 337\u001b[0m obj, end \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraw_decode\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43midx\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_w\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mend\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 338\u001b[0m end \u001b[38;5;241m=\u001b[39m _w(s, end)\u001b[38;5;241m.\u001b[39mend()\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.9.1/lib/python3.9/json/decoder.py:353\u001b[0m, in \u001b[0;36mJSONDecoder.raw_decode\u001b[0;34m(self, s, idx)\u001b[0m\n\u001b[1;32m 352\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 353\u001b[0m obj, end \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscan_once\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43midx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m err:\n",
|
||||
"\u001b[0;31mJSONDecodeError\u001b[0m: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)",
|
||||
"\nDuring handling of the above exception, another exception occurred:\n",
|
||||
"\u001b[0;31mOutputParserException\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mparser\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmisformatted\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/output_parsers/pydantic.py:29\u001b[0m, in \u001b[0;36mPydanticOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 27\u001b[0m name \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpydantic_object\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\n\u001b[1;32m 28\u001b[0m msg \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to parse \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m from completion \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m. Got: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m---> 29\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m OutputParserException(msg)\n",
|
||||
"\u001b[0;31mOutputParserException\u001b[0m: Failed to parse Actor from completion {'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}. Got: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"parser.parse(misformatted)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c7c82b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can construct and use a `OutputFixingParser`. This output parser takes as an argument another output parser but also an LLM with which to try to correct any formatting mistakes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "39b1a5ce",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import OutputFixingParser\n",
|
||||
"\n",
|
||||
"new_parser = OutputFixingParser.from_llm(parser=parser, llm=ChatOpenAI())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "0fd96d68",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Actor(name='Tom Hanks', film_names=['Forrest Gump'])"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"new_parser.parse(misformatted)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea34eeaa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Fixing Output Parsing Mistakes with the original prompt\n",
|
||||
"\n",
|
||||
"While in some cases it is possible to fix any parsing mistakes by only looking at the output, in other cases it can't. An example of this is when the output is not just in the incorrect format, but is partially complete. Consider the below example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "67c5e1ac",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Based on the user question, provide an Action and Action Input for what step should be taken.\n",
|
||||
"{format_instructions}\n",
|
||||
"Question: {query}\n",
|
||||
"Response:\"\"\"\n",
|
||||
"class Action(BaseModel):\n",
|
||||
" action: str = Field(description=\"action to take\")\n",
|
||||
" action_input: str = Field(description=\"input to the action\")\n",
|
||||
" \n",
|
||||
"parser = PydanticOutputParser(pydantic_object=Action)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "007aa87f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
|
||||
" input_variables=[\"query\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser.get_format_instructions()}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "10d207ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt_value = prompt.format_prompt(query=\"who is leo di caprios gf?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "68622837",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bad_response = '{\"action\": \"search\"}'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "25631465",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we try to parse this response as is, we will get an error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "894967c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "OutputParserException",
|
||||
"evalue": "Failed to parse Action from completion {\"action\": \"search\"}. Got: 1 validation error for Action\naction_input\n field required (type=value_error.missing)",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/output_parsers/pydantic.py:24\u001b[0m, in \u001b[0;36mPydanticOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 23\u001b[0m json_object \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(json_str)\n\u001b[0;32m---> 24\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpydantic_object\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mjson_object\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 26\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (json\u001b[38;5;241m.\u001b[39mJSONDecodeError, ValidationError) \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pydantic/main.py:527\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.parse_obj\u001b[0;34m()\u001b[0m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pydantic/main.py:342\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
|
||||
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for Action\naction_input\n field required (type=value_error.missing)",
|
||||
"\nDuring handling of the above exception, another exception occurred:\n",
|
||||
"\u001b[0;31mOutputParserException\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[15], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mparser\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbad_response\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/output_parsers/pydantic.py:29\u001b[0m, in \u001b[0;36mPydanticOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 27\u001b[0m name \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpydantic_object\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\n\u001b[1;32m 28\u001b[0m msg \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to parse \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m from completion \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m. Got: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m---> 29\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m OutputParserException(msg)\n",
|
||||
"\u001b[0;31mOutputParserException\u001b[0m: Failed to parse Action from completion {\"action\": \"search\"}. Got: 1 validation error for Action\naction_input\n field required (type=value_error.missing)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"parser.parse(bad_response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6b64696",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we try to use the `OutputFixingParser` to fix this error, it will be confused - namely, it doesn't know what to actually put for action input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "78b2b40d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fix_parser = OutputFixingParser.from_llm(parser=parser, llm=ChatOpenAI())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "4fe1301d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Action(action='search', action_input='keyword')"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"fix_parser.parse(bad_response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9bd9ea7d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Instead, we can use the RetryOutputParser, which passes in the prompt (as well as the original output) to try again to get a better response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "7e8a8a28",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import RetryWithErrorOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "5c86e141",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retry_parser = RetryWithErrorOutputParser.from_llm(parser=parser, llm=ChatOpenAI())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "9c04f731",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Action(action='search', action_input='leo di caprios girlfriend')"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retry_parser.parse_with_prompt(bad_response, prompt_value)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61f67890",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "64bf525a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Older, less powerful parsers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91871002",
|
||||
@@ -24,12 +474,12 @@
|
||||
"source": [
|
||||
"## Structured Output Parser\n",
|
||||
"\n",
|
||||
"This output parser can be used when you want to return multiple fields."
|
||||
"While the Pydantic/JSON parser is more powerful, we initially experimented data structures having text fields only."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 16,
|
||||
"id": "b492997a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -37,18 +487,6 @@
|
||||
"from langchain.output_parsers import StructuredOutputParser, ResponseSchema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "ffb7fc57",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09473dce",
|
||||
@@ -59,7 +497,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 17,
|
||||
"id": "432ac44a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -81,7 +519,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 18,
|
||||
"id": "593cfc25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -104,7 +542,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 19,
|
||||
"id": "106f1ba6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -114,7 +552,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 20,
|
||||
"id": "86d9d24f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -125,7 +563,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 21,
|
||||
"id": "956bdc99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -135,7 +573,7 @@
|
||||
"{'answer': 'Paris', 'source': 'https://en.wikipedia.org/wiki/Paris'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -154,7 +592,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 22,
|
||||
"id": "8f483d7d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -164,7 +602,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 23,
|
||||
"id": "f761cbf1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -180,7 +618,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 24,
|
||||
"id": "edd73ae3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -191,7 +629,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 25,
|
||||
"id": "a3c8b91e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -201,7 +639,7 @@
|
||||
"{'answer': 'Paris', 'source': 'https://en.wikipedia.org/wiki/Paris'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -217,12 +655,12 @@
|
||||
"source": [
|
||||
"## CommaSeparatedListOutputParser\n",
|
||||
"\n",
|
||||
"This output parser can be used to get a list of items as output."
|
||||
"Here's another parser strictly less powerful than Pydantic/JSON parsing."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 26,
|
||||
"id": "872246d7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -232,7 +670,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 27,
|
||||
"id": "c3f9aee6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -242,7 +680,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 28,
|
||||
"id": "e77871b7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -257,7 +695,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 29,
|
||||
"id": "a71cb5d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -267,7 +705,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 30,
|
||||
"id": "783d7d98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -278,7 +716,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 31,
|
||||
"id": "fcb81344",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -292,7 +730,7 @@
|
||||
" 'Cookies and Cream']"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -300,14 +738,6 @@
|
||||
"source": [
|
||||
"output_parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cba6d8e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -120,6 +120,25 @@
|
||||
"!cat simple_prompt.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "de75e959",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = load_prompt(\"simple_prompt.json\")\n",
|
||||
"print(prompt.format(adjective=\"funny\", content=\"chickens\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1d788f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Tell me a funny joke about chickens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d788a83c",
|
||||
|
||||
@@ -121,7 +121,8 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
326
docs/modules/utils/examples/zapier.ipynb
Normal file
326
docs/modules/utils/examples/zapier.ipynb
Normal file
@@ -0,0 +1,326 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "16763ed3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Zapier Natural Language Actions API\n",
|
||||
"\\\n",
|
||||
"Full docs here: https://nla.zapier.com/api/v1/docs\n",
|
||||
"\n",
|
||||
"**Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface.\n",
|
||||
"\n",
|
||||
"NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets, Microsoft Teams, and thousands more apps: https://zapier.com/apps\n",
|
||||
"\n",
|
||||
"Zapier NLA handles ALL the underlying API auth and translation from natural language --> underlying API call --> return simplified output for LLMs. The key idea is you, or your users, expose a set of actions via an oauth-like setup window, which you can then query and execute via a REST API.\n",
|
||||
"\n",
|
||||
"NLA offers both API Key and OAuth for signing NLA API requests.\n",
|
||||
"\n",
|
||||
"1. Server-side (API Key): for quickly getting started, testing, and production scenarios where LangChain will only use actions exposed in the developer's Zapier account (and will use the developer's connected accounts on Zapier.com)\n",
|
||||
"\n",
|
||||
"2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com\n",
|
||||
"\n",
|
||||
"This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/api/v1/docs) or reach out to nla@zapier.com for user-facing oauth developer support.\n",
|
||||
"\n",
|
||||
"This example goes over how to use the Zapier integration with a `SimpleSequentialChain`, then an `Agent`.\n",
|
||||
"In code, below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a363309c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5cf33377",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# get from https://platform.openai.com/\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\", \"\")\n",
|
||||
"\n",
|
||||
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in): \n",
|
||||
"os.environ[\"ZAPIER_NLA_API_KEY\"] = os.environ.get(\"ZAPIER_NLA_API_KEY\", \"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4881b484-1b97-478f-b206-aec407ceff66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example with Agent\n",
|
||||
"Zapier tools can be used with an agent. See the example below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b2044b17-c941-4ffb-8a03-027a35e2df81",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.agents.agent_toolkits import ZapierToolkit\n",
|
||||
"from langchain.utilities.zapier import ZapierNLAWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7b505eeb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 0. expose gmail 'find email' and slack 'send channel message' actions\n",
|
||||
"\n",
|
||||
"# first go here, log in, expose (enable) the two actions: https://nla.zapier.com/demo/start -- for this example, can leave all fields \"Have AI guess\"\n",
|
||||
"# in an oauth scenario, you'd get your own <provider> id (instead of 'demo') which you route your users through first"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "cab18227-c232-4214-9256-bb8dd352266c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"zapier = ZapierNLAWrapper()\n",
|
||||
"toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n",
|
||||
"agent = initialize_agent(toolkit.get_tools(), llm, agent=\"zero-shot-react-description\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "f94713de-b64d-465f-a087-00288b5f80ec",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find the email and summarize it.\n",
|
||||
"Action: Gmail: Find Email\n",
|
||||
"Action Input: Find the latest email from Silicon Valley Bank\u001b[0m\n",
|
||||
"Observation: \u001b[31;1m\u001b[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to summarize the email and send it to the #test-zapier channel in Slack.\n",
|
||||
"Action: Slack: Send Channel Message\n",
|
||||
"Action Input: Send a slack message to the #test-zapier channel with the text \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m{\"message__text\": \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\", \"message__permalink\": \"https://langchain.slack.com/archives/C04TSGU0RA7/p1678859932375259\", \"channel\": \"C04TSGU0RA7\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:58:52Z\", \"message__bot_profile__icons__image_36\": \"https://avatars.slack-edge.com/2022-08-02/3888649620612_f864dc1bb794cf7d82b0_36.png\", \"message__blocks[]block_id\": \"kdZZ\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: I have sent a summary of the last email from Silicon Valley Bank to the #test-zapier channel in Slack.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I have sent a summary of the last email from Silicon Valley Bank to the #test-zapier channel in Slack.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bcdea831",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Example with SimpleSequentialChain\n",
|
||||
"If you need more explicit control, use a chain, like below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "10a46e7e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import LLMChain, TransformChain, SimpleSequentialChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.tools.zapier.tool import ZapierNLARunAction\n",
|
||||
"from langchain.utilities.zapier import ZapierNLAWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "b9358048",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 0. expose gmail 'find email' and slack 'send direct message' actions\n",
|
||||
"\n",
|
||||
"# first go here, log in, expose (enable) the two actions: https://nla.zapier.com/demo/start -- for this example, can leave all fields \"Have AI guess\"\n",
|
||||
"# in an oauth scenario, you'd get your own <provider> id (instead of 'demo') which you route your users through first\n",
|
||||
"\n",
|
||||
"actions = ZapierNLAWrapper().list()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "4e80f461",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 1. gmail find email\n",
|
||||
"\n",
|
||||
"GMAIL_SEARCH_INSTRUCTIONS = \"Grab the latest email from Silicon Valley Bank\"\n",
|
||||
"\n",
|
||||
"def nla_gmail(inputs):\n",
|
||||
" action = next((a for a in actions if a[\"description\"].startswith(\"Gmail: Find Email\")), None)\n",
|
||||
" return {\"email_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(inputs[\"instructions\"])}\n",
|
||||
"gmail_chain = TransformChain(input_variables=[\"instructions\"], output_variables=[\"email_data\"], transform=nla_gmail)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "46893233",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 2. generate draft reply\n",
|
||||
"\n",
|
||||
"template = \"\"\"You are an assisstant who drafts replies to an incoming email. Output draft reply in plain text (not JSON).\n",
|
||||
"\n",
|
||||
"Incoming email:\n",
|
||||
"{email_data}\n",
|
||||
"\n",
|
||||
"Draft email reply:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"email_data\"], template=template)\n",
|
||||
"reply_chain = LLMChain(llm=OpenAI(temperature=.7), prompt=prompt_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "cd85c4f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 3. send draft reply via a slack direct message\n",
|
||||
"\n",
|
||||
"SLACK_HANDLE = \"@Ankush Gola\"\n",
|
||||
"\n",
|
||||
"def nla_slack(inputs):\n",
|
||||
" action = next((a for a in actions if a[\"description\"].startswith(\"Slack: Send Direct Message\")), None)\n",
|
||||
" instructions = f'Send this to {SLACK_HANDLE} in Slack: {inputs[\"draft_reply\"]}'\n",
|
||||
" return {\"slack_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(instructions)}\n",
|
||||
"slack_chain = TransformChain(input_variables=[\"draft_reply\"], output_variables=[\"slack_data\"], transform=nla_slack)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "4829cab4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SimpleSequentialChain chain...\u001b[0m\n",
|
||||
"\u001b[36;1m\u001b[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3m\n",
|
||||
"Dear Silicon Valley Bridge Bank, \n",
|
||||
"\n",
|
||||
"Thank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \n",
|
||||
"\n",
|
||||
"Best regards, \n",
|
||||
"[Your Name]\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3m{\"message__text\": \"Dear Silicon Valley Bridge Bank, \\n\\nThank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \\n\\nBest regards, \\n[Your Name]\", \"message__permalink\": \"https://langchain.slack.com/archives/D04TKF5BBHU/p1678859968241629\", \"channel\": \"D04TKF5BBHU\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:59:28Z\", \"message__blocks[]block_id\": \"p7i\", \"message__blocks[]elements[]elements[]type\": \"[['text']]\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'{\"message__text\": \"Dear Silicon Valley Bridge Bank, \\\\n\\\\nThank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \\\\n\\\\nBest regards, \\\\n[Your Name]\", \"message__permalink\": \"https://langchain.slack.com/archives/D04TKF5BBHU/p1678859968241629\", \"channel\": \"D04TKF5BBHU\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:59:28Z\", \"message__blocks[]block_id\": \"p7i\", \"message__blocks[]elements[]elements[]type\": \"[[\\'text\\']]\", \"message__blocks[]elements[]type\": \"[\\'rich_text_section\\']\"}'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"## finally, execute\n",
|
||||
"\n",
|
||||
"overall_chain = SimpleSequentialChain(chains=[gmail_chain, reply_chain, slack_chain], verbose=True)\n",
|
||||
"overall_chain.run(GMAIL_SEARCH_INSTRUCTIONS)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "09ff954e-45f2-4595-92ea-91627abde4a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,8 +7,8 @@ This page contains instructions for installing and then setting up the environme
|
||||
1. Ensure you have Docker installed (see [Get Docker](https://docs.docker.com/get-docker/)) and that it’s running.
|
||||
2. Install the latest version of `langchain`: `pip install langchain` or `pip install langchain -U` to upgrade your
|
||||
existing version.
|
||||
3. Run `langchain-server`
|
||||
1. This will spin up the server in the terminal.
|
||||
3. Run `langchain-server`. This command was installed automatically when you ran the above command (`pip install langchain`).
|
||||
1. This will spin up the server in the terminal, hosted on port `4137` by default.
|
||||
2. Once you see the terminal
|
||||
output `langchain-langchain-frontend-1 | ➜ Local: [http://localhost:4173/](http://localhost:4173/)`, navigate
|
||||
to [http://localhost:4173/](http://localhost:4173/)
|
||||
|
||||
@@ -76,7 +76,7 @@ Examples of vector database companies include [Pinecone](https://www.pinecone.io
|
||||
|
||||
Although this is perhaps the most common way of document retrieval, people are starting to think about alternative
|
||||
data structures and indexing techniques specifically for working with language models. For a leading example of this,
|
||||
check out [GPT Index](https://github.com/jerryjliu/gpt_index) - a collection of data structures created by and optimized
|
||||
check out [LlamaIndex](https://github.com/jerryjliu/llama_index) - a collection of data structures created by and optimized
|
||||
for language models.
|
||||
|
||||
## Augmenting
|
||||
|
||||
@@ -1,13 +1,89 @@
|
||||
Evaluation
|
||||
==============
|
||||
|
||||
Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
|
||||
This section of documentation covers how we approach and think about evaluation in LangChain.
|
||||
Both evaluation of internal chains/agents, but also how we would recommend people building on top of LangChain approach evaluation.
|
||||
|
||||
The examples here all highlight how to use language models to assist in evaluation of themselves.
|
||||
The Problem
|
||||
-----------
|
||||
|
||||
It can be really hard to evaluate LangChain chains and agents.
|
||||
There are two main reasons for this:
|
||||
|
||||
**# 1: Lack of data**
|
||||
|
||||
You generally don't have a ton of data to evaluate your chains/agents over before starting a project.
|
||||
This is usually because Large Language Models (the core of most chains/agents) are terrific few-shot and zero shot learners,
|
||||
meaning you are almost always able to get started on a particular task (text-to-SQL, question answering, etc) without
|
||||
a large dataset of examples.
|
||||
This is in stark contrast to traditional machine learning where you had to first collect a bunch of datapoints
|
||||
before even getting started using a model.
|
||||
|
||||
**# 2: Lack of metrics**
|
||||
|
||||
Most chains/agents are performing tasks for which there are not very good metrics to evaluate performance.
|
||||
For example, one of the most common use cases is generating text of some form.
|
||||
Evaluating generated text is much more complicated than evaluating a classification prediction, or a numeric prediction.
|
||||
|
||||
The Solution
|
||||
------------
|
||||
|
||||
LangChain attempts to tackle both of those issues.
|
||||
What we have so far are initial passes at solutions - we do not think we have a perfect solution.
|
||||
So we very much welcome feedback, contributions, integrations, and thoughts on this.
|
||||
|
||||
Here is what we have for each problem so far:
|
||||
|
||||
**# 1: Lack of data**
|
||||
|
||||
We have started `LangChainDatasets <https://huggingface.co/LangChainDatasets>`_ a Community space on Hugging Face.
|
||||
We intend this to be a collection of open source datasets for evaluating common chains and agents.
|
||||
We have contributed five datasets of our own to start, but we highly intend this to be a community effort.
|
||||
In order to contribute a dataset, you simply need to join the community and then you will be able to upload datasets.
|
||||
|
||||
We're also aiming to make it as easy as possible for people to create their own datasets.
|
||||
As a first pass at this, we've added a QAGenerationChain, which given a document comes up
|
||||
with question-answer pairs that can be used to evaluate question-answering tasks over that document down the line.
|
||||
See `this notebook <./evaluation/qa_generation.html>`_ for an example of how to use this chain.
|
||||
|
||||
**# 2: Lack of metrics**
|
||||
|
||||
We have two solutions to the lack of metrics.
|
||||
|
||||
The first solution is to use no metrics, and rather just rely on looking at results by eye to get a sense for how the chain/agent is performing.
|
||||
To assist in this, we have developed (and will continue to develop) `tracing <../tracing.html>`_, a UI-based visualizer of your chain and agent runs.
|
||||
|
||||
The second solution we recommend is to use Language Models themselves to evaluate outputs.
|
||||
For this we have a few different chains and prompts aimed at tackling this issue.
|
||||
|
||||
The Examples
|
||||
------------
|
||||
|
||||
We have created a bunch of examples combining the above two solutions to show how we internally evaluate chains and agents when we are developing.
|
||||
In addition to the examples we've curated, we also highly welcome contributions here.
|
||||
To facilitate that, we've included a `template notebook <./evaluation/benchmarking_template.html>`_ for community members to use to build their own examples.
|
||||
|
||||
The existing examples we have are:
|
||||
|
||||
`Question Answering (State of Union) <./evaluation/qa_benchmarking_sota.html>`_: An notebook showing evaluation of a question-answering task over a State-of-the-Union address.
|
||||
|
||||
`Question Answering (Paul Graham Essay) <./evaluation/qa_benchmarking_pg.html>`_: An notebook showing evaluation of a question-answering task over a Paul Graham essay.
|
||||
|
||||
`SQL Question Answering (Chinook) <./evaluation/sql_qa_benchmarking_chinook.html>`_: An notebook showing evaluation of a question-answering task over a SQL database (the Chinook database).
|
||||
|
||||
`Agent Vectorstore <./evaluation/agent_vectordb_sota_pg.html>`_: An notebook showing evaluation of an agent doing question answering while routing between two different vector databases.
|
||||
|
||||
`Agent Search + Calculator <./evaluation/agent_benchmarking.html>`_: An notebook showing evaluation of an agent doing question answering using a Search engine and a Calculator as tools.
|
||||
|
||||
|
||||
Other Examples
|
||||
------------
|
||||
|
||||
In addition, we also have some more generic resources for evaluation.
|
||||
|
||||
`Question Answering <./evaluation/question_answering.html>`_: An overview of LLMs aimed at evaluating question answering systems in general.
|
||||
|
||||
`Data Augmented Question Answering <./evaluation/data_augmented_question_answering.html>`_: An end-to-end example of evaluating a question answering system focused on a specific document (a VectorDBQAChain to be precise). This example highlights how to use LLMs to come up with question/answer examples to evaluate over, and then highlights how to use LLMs to evaluate performance on those generated examples.
|
||||
`Data Augmented Question Answering <./evaluation/data_augmented_question_answering.html>`_: An end-to-end example of evaluating a question answering system focused on a specific document (a RetrievalQAChain to be precise). This example highlights how to use LLMs to come up with question/answer examples to evaluate over, and then highlights how to use LLMs to evaluate performance on those generated examples.
|
||||
|
||||
`Hugging Face Datasets <./evaluation/huggingface_datasets.html>`_: Covers an example of loading and using a dataset from Hugging Face for evaluation.
|
||||
|
||||
|
||||
343
docs/use_cases/evaluation/agent_benchmarking.ipynb
Normal file
343
docs/use_cases/evaluation/agent_benchmarking.ipynb
Normal file
@@ -0,0 +1,343 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Agent Benchmarking: Search + Calculator\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance of an agent on tasks where it has access to a calculator and a search tool.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "46bf9205",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--agent-search-calculator-8a025c0ce5fb99d2/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "3a275586643f4ccfba1a8d54be28c351",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"agent-search-calculator\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to load an agent capable of answering these questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain.agents import initialize_agent, Tool, load_tools\n",
|
||||
"\n",
|
||||
"tools = load_tools(['serpapi', 'llm-math'], llm=OpenAI(temperature=0))\n",
|
||||
"agent = initialize_agent(tools, OpenAI(temperature=0), agent=\"zero-shot-react-description\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68504a8f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cbcafc92",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'38,630,316 people live in Canada as of 2023.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(dataset[0]['question'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrying langchain.llms.openai.completion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised APIConnectionError: Error communicating with OpenAI: ('Connection aborted.', ConnectionResetError(54, 'Connection reset by peer')).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions = []\n",
|
||||
"predicted_dataset = []\n",
|
||||
"error_dataset = []\n",
|
||||
"for data in dataset:\n",
|
||||
" new_data = {\"input\": data[\"question\"], \"answer\": data[\"answer\"]}\n",
|
||||
" try:\n",
|
||||
" predictions.append(agent(new_data))\n",
|
||||
" predicted_dataset.append(new_data)\n",
|
||||
" except Exception:\n",
|
||||
" error_dataset.append(new_data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'How many people live in canada as of 2023?',\n",
|
||||
" 'answer': 'approximately 38,625,801',\n",
|
||||
" 'output': '38,630,316 people live in Canada as of 2023.',\n",
|
||||
" 'intermediate_steps': [(AgentAction(tool='Search', tool_input='Population of Canada 2023', log=' I need to find population data\\nAction: Search\\nAction Input: Population of Canada 2023'),\n",
|
||||
" '38,630,316')]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(dataset, predictions, question_key=\"question\", prediction_key=\"output\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 4, ' INCORRECT': 6})"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': \"who is dua lipa's boyfriend? what is his age raised to the .43 power?\",\n",
|
||||
" 'answer': 'her boyfriend is Romain Gravas. his age raised to the .43 power is approximately 4.9373857399466665',\n",
|
||||
" 'output': \"Isaac Carew, Dua Lipa's boyfriend, is 36 years old and his age raised to the .43 power is 4.6688516567750975.\",\n",
|
||||
" 'intermediate_steps': [(AgentAction(tool='Search', tool_input=\"Dua Lipa's boyfriend\", log=' I need to find out who Dua Lipa\\'s boyfriend is and then calculate his age raised to the .43 power\\nAction: Search\\nAction Input: \"Dua Lipa\\'s boyfriend\"'),\n",
|
||||
" 'Dua and Isaac, a model and a chef, dated on and off from 2013 to 2019. The two first split in early 2017, which is when Dua went on to date LANY ...'),\n",
|
||||
" (AgentAction(tool='Search', tool_input='Isaac Carew age', log=' I need to find out Isaac\\'s age\\nAction: Search\\nAction Input: \"Isaac Carew age\"'),\n",
|
||||
" '36 years'),\n",
|
||||
" (AgentAction(tool='Calculator', tool_input='36^.43', log=' I need to calculate 36 raised to the .43 power\\nAction: Calculator\\nAction Input: 36^.43'),\n",
|
||||
" 'Answer: 4.6688516567750975\\n')],\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
503
docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb
Normal file
503
docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb
Normal file
@@ -0,0 +1,503 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Agent VectorDB Question Answering Benchmarking\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task using an agent to route between multiple vectordatabases.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "7b57a50f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--agent-vectordb-qa-sota-pg-d3ae24016b514f92/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "4c389519842e4b65afc33006a531dcbc",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"agent-vectordb-qa-sota-pg\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "61375342",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of the NATO Alliance?',\n",
|
||||
" 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n",
|
||||
" 'steps': [{'tool': 'State of Union QA System', 'tool_input': None},\n",
|
||||
" {'tool': None, 'tool_input': 'What is the purpose of the NATO Alliance?'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dataset[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "02500304",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of YC?',\n",
|
||||
" 'answer': 'The purpose of YC is to cause startups to be founded that would not otherwise have existed.',\n",
|
||||
" 'steps': [{'tool': 'Paul Graham QA System', 'tool_input': None},\n",
|
||||
" {'tool': None, 'tool_input': 'What is the purpose of YC?'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dataset[-1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to create some pipelines for doing question answering. Step one in that is creating indexes over the data in question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7f0de2b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ef84ff99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore_sota = VectorstoreIndexCreator(vectorstore_kwargs={\"collection_name\":\"sota\"}).from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a question answering chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "573719a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_sota = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0), chain_type=\"stuff\", retriever=vectorstore_sota, input_key=\"question\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e48b03d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we do the same for the Paul Graham data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "c2dbb014",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../modules/paul_graham_essay.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "98d16f08",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore_pg = VectorstoreIndexCreator(vectorstore_kwargs={\"collection_name\":\"paul_graham\"}).from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "ec0aab02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_pg = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0), chain_type=\"stuff\", retriever=vectorstore_pg, input_key=\"question\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "76b5f8fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now set up an agent to route between them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "ade1aafa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" func=chain_sota.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Paul Graham System\",\n",
|
||||
" func=chain_pg.run,\n",
|
||||
" description=\"useful for when you need to answer questions about Paul Graham. Input should be a fully formed question.\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "104853f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, OpenAI(temperature=0), agent=\"zero-shot-react-description\", max_iterations=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7f036641",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "4664e79f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The purpose of the NATO Alliance is to promote peace and security in the North Atlantic region by providing a collective defense against potential threats.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(dataset[0]['question'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "799f6c17",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = []\n",
|
||||
"predicted_dataset = []\n",
|
||||
"error_dataset = []\n",
|
||||
"for data in dataset:\n",
|
||||
" new_data = {\"input\": data[\"question\"], \"answer\": data[\"answer\"]}\n",
|
||||
" try:\n",
|
||||
" predictions.append(agent(new_data))\n",
|
||||
" predicted_dataset.append(new_data)\n",
|
||||
" except Exception:\n",
|
||||
" error_dataset.append(new_data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(predicted_dataset, predictions, question_key=\"input\", prediction_key=\"output\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 19, ' INCORRECT': 14})"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What is the purpose of the Bipartisan Innovation Act mentioned in the text?',\n",
|
||||
" 'answer': 'The Bipartisan Innovation Act will make record investments in emerging technologies and American manufacturing to level the playing field with China and other competitors.',\n",
|
||||
" 'output': 'The purpose of the Bipartisan Innovation Act is to promote innovation and entrepreneurship in the United States by providing tax incentives and other support for startups and small businesses.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
160
docs/use_cases/evaluation/benchmarking_template.ipynb
Normal file
160
docs/use_cases/evaluation/benchmarking_template.ipynb
Normal file
@@ -0,0 +1,160 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a175c650",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Benchmarking Template\n",
|
||||
"\n",
|
||||
"This is an example notebook that can be used to create a benchmarking notebook for a task of your choice. Evaluation is really hard, and so we greatly welcome any contributions that can make it easier for people to experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "9fe4d1b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f66405e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "79402a8f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This notebook should so how to load the dataset from LangChainDatasets on Hugging Face\n",
|
||||
"\n",
|
||||
"# Please upload your dataset to https://huggingface.co/LangChainDatasets\n",
|
||||
"\n",
|
||||
"# The value passed into `load_dataset` should NOT have the `LangChainDatasets/` prefix\n",
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"TODO\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"\n",
|
||||
"This next section should have an example of setting up a chain that can be run on this dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a2661ce0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c0062e7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "d28c5e7d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Example of running the chain on a single datapoint (`dataset[0]`) goes here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Example of running the chain on many predictions goes here\n",
|
||||
"\n",
|
||||
"# Sometimes its as simple as `chain.apply(dataset)`\n",
|
||||
"\n",
|
||||
"# Othertimes you may want to write a for loop to catch errors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"\n",
|
||||
"Any guide to evaluating performance in a more systematic manner goes here."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -23,7 +23,8 @@
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain import OpenAI, VectorDBQA"
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import RetrievalQA"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -50,7 +51,7 @@
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docsearch = Chroma.from_documents(texts, embeddings)\n",
|
||||
"qa = VectorDBQA.from_llm(llm=OpenAI(), vectorstore=docsearch)"
|
||||
"qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=docsearch.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -434,7 +435,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
306
docs/use_cases/evaluation/llm_math.ipynb
Normal file
306
docs/use_cases/evaluation/llm_math.ipynb
Normal file
@@ -0,0 +1,306 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a4734146",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM Math\n",
|
||||
"\n",
|
||||
"Evaluating chains that know how to do math."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "fdd7afae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ce05ffea",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "d028a511cede4de2b845b9a9954d6bea",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading readme: 0%| | 0.00/21.0 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Downloading and preparing dataset json/LangChainDatasets--llm-math to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--llm-math-509b11d101165afa/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a71c8e5a21dd4da5a20a354b544f7a58",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "ae530ca624154a1a934075c47d1093a6",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data: 0%| | 0.00/631 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "7a4968df05d84bc483aa2c5039aecafe",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Extracting data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Generating train split: 0 examples [00:00, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dataset json downloaded and prepared to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--llm-math-509b11d101165afa/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51. Subsequent calls will reuse this data.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "9a2caed96225410fb1cc0f8f155eb766",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"llm-math\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a998d6f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to create some pipelines for doing math."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "7078f7f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import LLMMathChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "2bd70c46",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "954c3270",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = LLMMathChain(llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "f252027e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = chain.apply(dataset)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "c8af7041",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"numeric_output = [float(p['answer'].strip().strip(\"Answer: \")) for p in predictions]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "cc09ffe4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"correct = [example['answer'] == numeric_output[i] for i, example in enumerate(dataset)]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "585244e4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1.0"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sum(correct) / len(correct)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "0d14ac78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"input: 5\n",
|
||||
"expected output : 5.0\n",
|
||||
"prediction: 5.0\n",
|
||||
"input: 5 + 3\n",
|
||||
"expected output : 8.0\n",
|
||||
"prediction: 8.0\n",
|
||||
"input: 2^3.171\n",
|
||||
"expected output : 9.006708689094099\n",
|
||||
"prediction: 9.006708689094099\n",
|
||||
"input: 2 ^3.171 \n",
|
||||
"expected output : 9.006708689094099\n",
|
||||
"prediction: 9.006708689094099\n",
|
||||
"input: two to the power of three point one hundred seventy one\n",
|
||||
"expected output : 9.006708689094099\n",
|
||||
"prediction: 9.006708689094099\n",
|
||||
"input: five + three squared minus 1\n",
|
||||
"expected output : 13.0\n",
|
||||
"prediction: 13.0\n",
|
||||
"input: 2097 times 27.31\n",
|
||||
"expected output : 57269.07\n",
|
||||
"prediction: 57269.07\n",
|
||||
"input: two thousand ninety seven times twenty seven point thirty one\n",
|
||||
"expected output : 57269.07\n",
|
||||
"prediction: 57269.07\n",
|
||||
"input: 209758 / 2714\n",
|
||||
"expected output : 77.28739867354459\n",
|
||||
"prediction: 77.28739867354459\n",
|
||||
"input: 209758.857 divided by 2714.31\n",
|
||||
"expected output : 77.27888745205964\n",
|
||||
"prediction: 77.27888745205964\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for i, example in enumerate(dataset):\n",
|
||||
" print(\"input: \", example[\"question\"])\n",
|
||||
" print(\"expected output :\", example[\"answer\"])\n",
|
||||
" print(\"prediction: \", numeric_output[i])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b9021ffd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
374
docs/use_cases/evaluation/qa_benchmarking_pg.ipynb
Normal file
374
docs/use_cases/evaluation/qa_benchmarking_pg.ipynb
Normal file
@@ -0,0 +1,374 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Question Answering Benchmarking: Paul Graham Essay\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task over a Paul Graham essay.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "3bd13ab7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--question-answering-paul-graham-76e8f711e038d742/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "9264acfe710b4faabf060f0fcf4f7308",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"question-answering-paul-graham\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to create some pipelines for doing question answering. Step one in that is creating an index over the data in question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader(\"../../modules/paul_graham_essay.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7f0de2b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ef84ff99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore = VectorstoreIndexCreator().from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a question answering chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "573719a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=vectorstore.as_retriever(), input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "53b5aa23",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "3f81d951",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What were the two main things the author worked on before college?',\n",
|
||||
" 'answer': 'The two main things the author worked on before college were writing and programming.',\n",
|
||||
" 'result': ' Writing and programming.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain(dataset[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = chain.apply(dataset)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What were the two main things the author worked on before college?',\n",
|
||||
" 'answer': 'The two main things the author worked on before college were writing and programming.',\n",
|
||||
" 'result': ' Writing and programming.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(dataset, predictions, question_key=\"question\", prediction_key=\"result\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 12, ' INCORRECT': 10})"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What did the author write their dissertation on?',\n",
|
||||
" 'answer': 'The author wrote their dissertation on applications of continuations.',\n",
|
||||
" 'result': ' The author does not mention what their dissertation was on, so it is not known.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
374
docs/use_cases/evaluation/qa_benchmarking_sota.ipynb
Normal file
374
docs/use_cases/evaluation/qa_benchmarking_sota.ipynb
Normal file
@@ -0,0 +1,374 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Question Answering Benchmarking: State of the Union Address\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task over a state of the union address.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f127fb04",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--question-answering-state-of-the-union-a7e5a3b2db4f440d/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"question-answering-state-of-the-union\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to create some pipelines for doing question answering. Step one in that is creating an index over the data in question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "7f0de2b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ef84ff99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore = VectorstoreIndexCreator().from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a question answering chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "573719a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=vectorstore.as_retriever(), input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "37d669e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "3089e409",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of the NATO Alliance?',\n",
|
||||
" 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n",
|
||||
" 'result': ' The NATO Alliance was created to secure peace and stability in Europe after World War 2.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain(dataset[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = chain.apply(dataset)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of the NATO Alliance?',\n",
|
||||
" 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n",
|
||||
" 'result': ' The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(dataset, predictions, question_key=\"question\", prediction_key=\"result\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 7, ' INCORRECT': 4})"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the U.S. Department of Justice doing to combat the crimes of Russian oligarchs?',\n",
|
||||
" 'answer': 'The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.',\n",
|
||||
" 'result': ' The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs and is naming a chief prosecutor for pandemic fraud.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
117
docs/use_cases/evaluation/qa_generation.ipynb
Normal file
117
docs/use_cases/evaluation/qa_generation.ipynb
Normal file
@@ -0,0 +1,117 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ee2a3a21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# QA Generation\n",
|
||||
"This notebook shows how to use the `QAGenerationChain` to come up with question-answer pairs over a specific document.\n",
|
||||
"This is important because often times you may not have data to evaluate your question-answer system over, so this is a cheap and lightweight way to generate it!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "33d3f0b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2029a29c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "87edb84c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc = loader.load()[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "04125b6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import QAGenerationChain\n",
|
||||
"chain = QAGenerationChain.from_llm(ChatOpenAI(temperature = 0))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "4f1593e4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = chain.run(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "ee831f92",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the U.S. Department of Justice doing to combat the crimes of Russian oligarchs?',\n",
|
||||
" 'answer': 'The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7028754e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -191,7 +191,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "782ae8c8",
|
||||
"metadata": {},
|
||||
@@ -316,7 +315,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -330,7 +329,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7 (default, Sep 16 2021, 08:50:36) \n[Clang 10.0.0 ]"
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
423
docs/use_cases/evaluation/sql_qa_benchmarking_chinook.ipynb
Normal file
423
docs/use_cases/evaluation/sql_qa_benchmarking_chinook.ipynb
Normal file
@@ -0,0 +1,423 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SQL Question Answering Benchmarking: Chinook\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task over a SQL database.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "44874486",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f66405e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0df1393f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "b220d07ee5d14909bc842b4545cdc0de",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading readme: 0%| | 0.00/21.0 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Downloading and preparing dataset json/LangChainDatasets--sql-qa-chinook to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--sql-qa-chinook-7528565d2d992b47/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "e89e3c8ef76f49889c4b39c624828c71",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a8421df6c26045e8978c7086cb418222",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data: 0%| | 0.00/1.44k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "d1fb6becc3324a85bf039a53caf30924",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Extracting data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Generating train split: 0 examples [00:00, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dataset json downloaded and prepared to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--sql-qa-chinook-7528565d2d992b47/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51. Subsequent calls will reuse this data.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "9d68ad1b3e4a4bd79f92597aac4d3cc9",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"sql-qa-chinook\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ab44d504",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How many employees are there?', 'answer': '8'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dataset[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"This uses the example Chinook database.\n",
|
||||
"To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the `.db` file in a notebooks folder at the root of this repository.\n",
|
||||
"\n",
|
||||
"Note that here we load a simple chain. If you want to experiment with more complex chains, or an agent, just create the `chain` object in a different way."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI, SQLDatabase, SQLDatabaseChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "33cdcbfc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///../../../notebooks/Chinook.db\")\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a SQL database chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = SQLDatabaseChain(llm=llm, database=db, input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c0062e7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "d28c5e7d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How many employees are there?',\n",
|
||||
" 'answer': '8',\n",
|
||||
" 'result': ' There are 8 employees.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain(dataset[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions. Note that we add a try-except because this chain can sometimes error (if SQL is written incorrectly, etc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = []\n",
|
||||
"predicted_dataset = []\n",
|
||||
"error_dataset = []\n",
|
||||
"for data in dataset:\n",
|
||||
" try:\n",
|
||||
" predictions.append(chain(data))\n",
|
||||
" predicted_dataset.append(data)\n",
|
||||
" except:\n",
|
||||
" error_dataset.append(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. We can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(predicted_dataset, predictions, question_key=\"question\", prediction_key=\"result\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 3, ' INCORRECT': 4})"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How many employees are also customers?',\n",
|
||||
" 'answer': 'None',\n",
|
||||
" 'result': ' 59 employees are also customers.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
20
docs/use_cases/extraction.md
Normal file
20
docs/use_cases/extraction.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Extraction
|
||||
|
||||
Most APIs and databases still deal with structured information.
|
||||
Therefore, in order to better work with those, it can be useful to extract structured information from text.
|
||||
Examples of this include:
|
||||
|
||||
- Extracting a structured row to insert into a database from a sentence
|
||||
- Extracting multiple rows to insert into a database from a long document
|
||||
- Extracting the correct API parameters from a user query
|
||||
|
||||
This work is extremely related to [output parsing](../modules/prompts/examples/output_parsers.ipynb).
|
||||
Output parsers are responsible for instructing the LLM to respond in a specific format.
|
||||
In this case, the output parsers specify the format of the data you would like to extract from the document.
|
||||
Then, in addition to the output format instructions, the prompt should also contain the data you would like to extract information from.
|
||||
|
||||
While normal output parsers are good enough for basic structuring of response data,
|
||||
when doing extraction you often want to extract more complicated or nested structures.
|
||||
For a deep dive on extraction, we recommend checking out [`kor`](https://eyurtsev.github.io/kor/),
|
||||
a library that uses the existing LangChain chain and OutputParser abstractions
|
||||
but deep dives on allowing extraction of more complicated schemas.
|
||||
31
docs/use_cases/tabular.md
Normal file
31
docs/use_cases/tabular.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Querying Tabular Data
|
||||
|
||||
Lots of data and information is stored in tabular data, whether it be csvs, excel sheets, or SQL tables.
|
||||
This page covers all resources available in LangChain for working with data in this format.
|
||||
|
||||
## Document Loading
|
||||
If you have text data stored in a tabular format, you may want to load the data into a Document and then index it as you would
|
||||
other text/unstructured data. For this, you should use a document loader like the [CSVLoader](../modules/document_loaders/examples/csv.ipynb)
|
||||
and then you should [create an index](../modules/indexes.rst) over that data, and [query it that way](../modules/indexes/chain_examples/vector_db_qa.ipynb).
|
||||
|
||||
## Querying
|
||||
If you have more numeric tabular data, or have a large amount of data and don't want to index it, you should get started
|
||||
by looking at various chains and agents we have for dealing with this data.
|
||||
|
||||
### Chains
|
||||
|
||||
If you are just getting started, and you have relatively small/simple tabular data, you should get started with chains.
|
||||
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
|
||||
understand what is happening better.
|
||||
|
||||
- [SQL Database Chain](../modules/chains/examples/sqlite.ipynb)
|
||||
|
||||
### Agents
|
||||
|
||||
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
|
||||
The downside of agents are that you have less control. The upside is that they are more powerful,
|
||||
which allows you to use them on larger databases and more complex schemas.
|
||||
|
||||
- [SQL Agent](../modules/agents/agent_toolkits/sql_database.ipynb)
|
||||
- [Pandas Agent](../modules/agents/agent_toolkits/pandas.ipynb)
|
||||
- [CSV Agent](../modules/agents/agent_toolkits/csv.ipynb)
|
||||
@@ -33,6 +33,7 @@ from langchain.llms import (
|
||||
Modal,
|
||||
OpenAI,
|
||||
Petals,
|
||||
SagemakerEndpoint,
|
||||
StochasticAI,
|
||||
Writer,
|
||||
)
|
||||
@@ -90,6 +91,7 @@ __all__ = [
|
||||
"ReActChain",
|
||||
"Wikipedia",
|
||||
"HuggingFaceHub",
|
||||
"SagemakerEndpoint",
|
||||
"HuggingFacePipeline",
|
||||
"SQLDatabase",
|
||||
"SQLDatabaseChain",
|
||||
|
||||
@@ -453,9 +453,15 @@ class AgentExecutor(Chain, BaseModel):
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
self.callback_manager.on_agent_action(
|
||||
output, verbose=self.verbose, color="green"
|
||||
)
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_agent_action(
|
||||
output, verbose=self.verbose, color="green"
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_agent_action(
|
||||
output, verbose=self.verbose, color="green"
|
||||
)
|
||||
|
||||
# Otherwise we lookup the tool
|
||||
if output.tool in name_to_tool_map:
|
||||
tool = name_to_tool_map[output.tool]
|
||||
|
||||
@@ -18,6 +18,7 @@ from langchain.agents.agent_toolkits.vectorstore.toolkit import (
|
||||
VectorStoreRouterToolkit,
|
||||
VectorStoreToolkit,
|
||||
)
|
||||
from langchain.agents.agent_toolkits.zapier.toolkit import ZapierToolkit
|
||||
|
||||
__all__ = [
|
||||
"create_json_agent",
|
||||
@@ -34,4 +35,5 @@ __all__ = [
|
||||
"VectorStoreRouterToolkit",
|
||||
"create_pandas_dataframe_agent",
|
||||
"create_csv_agent",
|
||||
"ZapierToolkit",
|
||||
]
|
||||
|
||||
1
langchain/agents/agent_toolkits/zapier/__init__.py
Normal file
1
langchain/agents/agent_toolkits/zapier/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Zapier Toolkit."""
|
||||
34
langchain/agents/agent_toolkits/zapier/toolkit.py
Normal file
34
langchain/agents/agent_toolkits/zapier/toolkit.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Zapier Toolkit."""
|
||||
from typing import List
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.zapier.tool import ZapierNLARunAction
|
||||
from langchain.utilities.zapier import ZapierNLAWrapper
|
||||
|
||||
|
||||
class ZapierToolkit(BaseToolkit):
|
||||
"""Zapier Toolkit."""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
@classmethod
|
||||
def from_zapier_nla_wrapper(
|
||||
cls, zapier_nla_wrapper: ZapierNLAWrapper
|
||||
) -> "ZapierToolkit":
|
||||
"""Create a toolkit from a ZapierNLAWrapper."""
|
||||
actions = zapier_nla_wrapper.list()
|
||||
tools = [
|
||||
ZapierNLARunAction(
|
||||
action_id=action["id"],
|
||||
zapier_description=action["description"],
|
||||
params_schema=action["params"],
|
||||
api_wrapper=zapier_nla_wrapper,
|
||||
)
|
||||
for action in actions
|
||||
]
|
||||
return cls(tools=tools)
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
return self.tools
|
||||
@@ -13,7 +13,6 @@ from langchain.agents.conversational_chat.prompt import (
|
||||
)
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.output_parsers.base import BaseOutputParser
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
@@ -26,6 +25,7 @@ from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseLanguageModel,
|
||||
BaseMessage,
|
||||
BaseOutputParser,
|
||||
HumanMessage,
|
||||
)
|
||||
from langchain.tools.base import BaseTool
|
||||
@@ -39,6 +39,8 @@ class AgentOutputParser(BaseOutputParser):
|
||||
cleaned_output = text.strip()
|
||||
if "```json" in cleaned_output:
|
||||
_, cleaned_output = cleaned_output.split("```json")
|
||||
if "```" in cleaned_output:
|
||||
cleaned_output, _ = cleaned_output.split("```")
|
||||
if cleaned_output.startswith("```json"):
|
||||
cleaned_output = cleaned_output[len("```json") :]
|
||||
if cleaned_output.startswith("```"):
|
||||
@@ -55,6 +57,10 @@ class ConversationalChatAgent(Agent):
|
||||
|
||||
output_parser: BaseOutputParser
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
|
||||
@@ -27,7 +27,9 @@ def initialize_agent(
|
||||
`react-docstore`
|
||||
`self-ask-with-search`
|
||||
`conversational-react-description`
|
||||
If None and agent_path is also None, will default to
|
||||
`chat-zero-shot-react-description`,
|
||||
`chat-conversational-react-description`,
|
||||
If None and agent_path is also None, will default to
|
||||
`zero-shot-react-description`.
|
||||
callback_manager: CallbackManager to use. Global callback manager is used if
|
||||
not provided. Defaults to None.
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Any, List, Optional
|
||||
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.api import news_docs, open_meteo_docs, tmdb_docs
|
||||
from langchain.chains.api import news_docs, open_meteo_docs, tmdb_docs, podcast_docs
|
||||
from langchain.chains.api.base import APIChain
|
||||
from langchain.chains.llm_math.base import LLMMathChain
|
||||
from langchain.chains.pal.base import PALChain
|
||||
@@ -13,6 +13,7 @@ from langchain.requests import RequestsWrapper
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.bing_search.tool import BingSearchRun
|
||||
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
|
||||
from langchain.tools.human.tool import HumanInputRun
|
||||
from langchain.tools.python.tool import PythonREPLTool
|
||||
from langchain.tools.requests.tool import RequestsGetTool
|
||||
from langchain.tools.wikipedia.tool import WikipediaQueryRun
|
||||
@@ -118,6 +119,20 @@ def _get_tmdb_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
|
||||
)
|
||||
|
||||
|
||||
def _get_podcast_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
|
||||
listen_api_key = kwargs["listen_api_key"]
|
||||
chain = APIChain.from_llm_and_api_docs(
|
||||
llm,
|
||||
podcast_docs.PODCAST_DOCS,
|
||||
headers={"X-ListenAPI-Key": listen_api_key},
|
||||
)
|
||||
return Tool(
|
||||
name="Podcast API",
|
||||
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
|
||||
func=chain.run,
|
||||
)
|
||||
|
||||
|
||||
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
|
||||
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
|
||||
|
||||
@@ -163,9 +178,14 @@ def _get_bing_search(**kwargs: Any) -> BaseTool:
|
||||
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
|
||||
|
||||
|
||||
def _get_human_tool(**kwargs: Any) -> BaseTool:
|
||||
return HumanInputRun(**kwargs)
|
||||
|
||||
|
||||
_EXTRA_LLM_TOOLS = {
|
||||
"news-api": (_get_news_api, ["news_api_key"]),
|
||||
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
|
||||
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
|
||||
}
|
||||
|
||||
_EXTRA_OPTIONAL_TOOLS = {
|
||||
@@ -180,6 +200,7 @@ _EXTRA_OPTIONAL_TOOLS = {
|
||||
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
|
||||
"searx-search": (_get_searx_search, ["searx_host"]),
|
||||
"wikipedia": (_get_wikipedia, ["top_k_results"]),
|
||||
"human": (_get_human_tool, ["prompt_func", "input_func"]),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -3,11 +3,16 @@ import os
|
||||
from contextlib import contextmanager
|
||||
from typing import Generator, Optional
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler, BaseCallbackManager
|
||||
from langchain.callbacks.base import (
|
||||
BaseCallbackHandler,
|
||||
BaseCallbackManager,
|
||||
CallbackManager,
|
||||
)
|
||||
from langchain.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain.callbacks.shared import SharedCallbackManager
|
||||
from langchain.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain.callbacks.tracers import SharedLangChainTracer
|
||||
from langchain.callbacks.wandb_callback import WandbCallbackHandler
|
||||
|
||||
|
||||
def get_callback_manager() -> BaseCallbackManager:
|
||||
@@ -58,3 +63,17 @@ def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
|
||||
manager.add_handler(handler)
|
||||
yield handler
|
||||
manager.remove_handler(handler)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"CallbackManager",
|
||||
"OpenAICallbackHandler",
|
||||
"SharedCallbackManager",
|
||||
"StdOutCallbackHandler",
|
||||
"WandbCallbackHandler",
|
||||
"get_openai_callback",
|
||||
"set_tracing_callback_manager",
|
||||
"set_default_callback_manager",
|
||||
"set_handler",
|
||||
"get_callback_manager",
|
||||
]
|
||||
|
||||
819
langchain/callbacks/wandb_callback.py
Normal file
819
langchain/callbacks/wandb_callback.py
Normal file
@@ -0,0 +1,819 @@
|
||||
import hashlib
|
||||
import json
|
||||
import tempfile
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
def import_wandb() -> Any:
|
||||
try:
|
||||
import wandb # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"To use the wandb callback manager you need to have the `wandb` python "
|
||||
"package installed. Please install it with `pip install wandb`"
|
||||
)
|
||||
return wandb
|
||||
|
||||
|
||||
def import_spacy() -> Any:
|
||||
try:
|
||||
import spacy # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"To use the wandb callback manager you need to have the `spacy` python "
|
||||
"package installed. Please install it with `pip install spacy`"
|
||||
)
|
||||
return spacy
|
||||
|
||||
|
||||
def import_pandas() -> Any:
|
||||
try:
|
||||
import pandas # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"To use the wandb callback manager you need to have the `pandas` python "
|
||||
"package installed. Please install it with `pip install pandas`"
|
||||
)
|
||||
return pandas
|
||||
|
||||
|
||||
def import_textstat() -> Any:
|
||||
try:
|
||||
import textstat # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"To use the wandb callback manager you need to have the `textstat` python "
|
||||
"package installed. Please install it with `pip install textstat`"
|
||||
)
|
||||
return textstat
|
||||
|
||||
|
||||
def _flatten_dict(
|
||||
nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_"
|
||||
) -> Iterable[Tuple[str, Any]]:
|
||||
"""
|
||||
Generator that yields flattened items from a nested dictionary for a flat dict.
|
||||
|
||||
Parameters:
|
||||
nested_dict (dict): The nested dictionary to flatten.
|
||||
parent_key (str): The prefix to prepend to the keys of the flattened dict.
|
||||
sep (str): The separator to use between the parent key and the key of the
|
||||
flattened dictionary.
|
||||
|
||||
Yields:
|
||||
(str, any): A key-value pair from the flattened dictionary.
|
||||
"""
|
||||
for key, value in nested_dict.items():
|
||||
new_key = parent_key + sep + key if parent_key else key
|
||||
if isinstance(value, dict):
|
||||
yield from _flatten_dict(value, new_key, sep)
|
||||
else:
|
||||
yield new_key, value
|
||||
|
||||
|
||||
def flatten_dict(
|
||||
nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_"
|
||||
) -> Dict[str, Any]:
|
||||
"""Flattens a nested dictionary into a flat dictionary.
|
||||
|
||||
Parameters:
|
||||
nested_dict (dict): The nested dictionary to flatten.
|
||||
parent_key (str): The prefix to prepend to the keys of the flattened dict.
|
||||
sep (str): The separator to use between the parent key and the key of the
|
||||
flattened dictionary.
|
||||
|
||||
Returns:
|
||||
(dict): A flat dictionary.
|
||||
|
||||
"""
|
||||
flat_dict = {k: v for k, v in _flatten_dict(nested_dict, parent_key, sep)}
|
||||
return flat_dict
|
||||
|
||||
|
||||
def hash_string(s: str) -> str:
|
||||
"""Hash a string using sha1.
|
||||
|
||||
Parameters:
|
||||
s (str): The string to hash.
|
||||
|
||||
Returns:
|
||||
(str): The hashed string.
|
||||
"""
|
||||
return hashlib.sha1(s.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def load_json_to_dict(json_path: Union[str, Path]) -> dict:
|
||||
"""Load json file to a dictionary.
|
||||
|
||||
Parameters:
|
||||
json_path (str): The path to the json file.
|
||||
|
||||
Returns:
|
||||
(dict): The dictionary representation of the json file.
|
||||
"""
|
||||
with open(json_path, "r") as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
|
||||
def analyze_text(
|
||||
text: str,
|
||||
complexity_metrics: bool = True,
|
||||
visualize: bool = True,
|
||||
nlp: Any = None,
|
||||
output_dir: Optional[Union[str, Path]] = None,
|
||||
) -> dict:
|
||||
"""Analyze text using textstat and spacy.
|
||||
|
||||
Parameters:
|
||||
text (str): The text to analyze.
|
||||
complexity_metrics (bool): Whether to compute complexity metrics.
|
||||
visualize (bool): Whether to visualize the text.
|
||||
nlp (spacy.lang): The spacy language model to use for visualization.
|
||||
output_dir (str): The directory to save the visualization files to.
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary containing the complexity metrics and visualization
|
||||
files serialized in a wandb.Html element.
|
||||
"""
|
||||
resp = {}
|
||||
textstat = import_textstat()
|
||||
wandb = import_wandb()
|
||||
spacy = import_spacy()
|
||||
if complexity_metrics:
|
||||
text_complexity_metrics = {
|
||||
"flesch_reading_ease": textstat.flesch_reading_ease(text),
|
||||
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
|
||||
"smog_index": textstat.smog_index(text),
|
||||
"coleman_liau_index": textstat.coleman_liau_index(text),
|
||||
"automated_readability_index": textstat.automated_readability_index(text),
|
||||
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
|
||||
"difficult_words": textstat.difficult_words(text),
|
||||
"linsear_write_formula": textstat.linsear_write_formula(text),
|
||||
"gunning_fog": textstat.gunning_fog(text),
|
||||
"text_standard": textstat.text_standard(text),
|
||||
"fernandez_huerta": textstat.fernandez_huerta(text),
|
||||
"szigriszt_pazos": textstat.szigriszt_pazos(text),
|
||||
"gutierrez_polini": textstat.gutierrez_polini(text),
|
||||
"crawford": textstat.crawford(text),
|
||||
"gulpease_index": textstat.gulpease_index(text),
|
||||
"osman": textstat.osman(text),
|
||||
}
|
||||
resp.update(text_complexity_metrics)
|
||||
|
||||
if visualize and nlp and output_dir is not None:
|
||||
doc = nlp(text)
|
||||
|
||||
dep_out = spacy.displacy.render( # type: ignore
|
||||
doc, style="dep", jupyter=False, page=True
|
||||
)
|
||||
dep_output_path = Path(output_dir, hash_string(f"dep-{text}") + ".html")
|
||||
dep_output_path.open("w", encoding="utf-8").write(dep_out)
|
||||
|
||||
ent_out = spacy.displacy.render( # type: ignore
|
||||
doc, style="ent", jupyter=False, page=True
|
||||
)
|
||||
ent_output_path = Path(output_dir, hash_string(f"ent-{text}") + ".html")
|
||||
ent_output_path.open("w", encoding="utf-8").write(ent_out)
|
||||
|
||||
text_visualizations = {
|
||||
"dependency_tree": wandb.Html(str(dep_output_path)),
|
||||
"entities": wandb.Html(str(ent_output_path)),
|
||||
}
|
||||
resp.update(text_visualizations)
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any:
|
||||
"""Construct an html element from a prompt and a generation.
|
||||
|
||||
Parameters:
|
||||
prompt (str): The prompt.
|
||||
generation (str): The generation.
|
||||
|
||||
Returns:
|
||||
(wandb.Html): The html element."""
|
||||
wandb = import_wandb()
|
||||
formatted_prompt = prompt.replace("\n", "<br>")
|
||||
formatted_generation = generation.replace("\n", "<br>")
|
||||
|
||||
return wandb.Html(
|
||||
f"""
|
||||
<p style="color:black;">{formatted_prompt}:</p>
|
||||
<blockquote>
|
||||
<p style="color:green;">
|
||||
{formatted_generation}
|
||||
</p>
|
||||
</blockquote>
|
||||
""",
|
||||
inject=False,
|
||||
)
|
||||
|
||||
|
||||
class BaseMetadataCallbackHandler:
|
||||
"""This class handles the metadata and associated function states for callbacks.
|
||||
|
||||
Attributes:
|
||||
step (int): The current step.
|
||||
starts (int): The number of times the start method has been called.
|
||||
ends (int): The number of times the end method has been called.
|
||||
errors (int): The number of times the error method has been called.
|
||||
text_ctr (int): The number of times the text method has been called.
|
||||
ignore_llm_ (bool): Whether to ignore llm callbacks.
|
||||
ignore_chain_ (bool): Whether to ignore chain callbacks.
|
||||
ignore_agent_ (bool): Whether to ignore agent callbacks.
|
||||
always_verbose_ (bool): Whether to always be verbose.
|
||||
chain_starts (int): The number of times the chain start method has been called.
|
||||
chain_ends (int): The number of times the chain end method has been called.
|
||||
llm_starts (int): The number of times the llm start method has been called.
|
||||
llm_ends (int): The number of times the llm end method has been called.
|
||||
llm_streams (int): The number of times the text method has been called.
|
||||
tool_starts (int): The number of times the tool start method has been called.
|
||||
tool_ends (int): The number of times the tool end method has been called.
|
||||
agent_ends (int): The number of times the agent end method has been called.
|
||||
on_llm_start_records (list): A list of records of the on_llm_start method.
|
||||
on_llm_token_records (list): A list of records of the on_llm_token method.
|
||||
on_llm_end_records (list): A list of records of the on_llm_end method.
|
||||
on_chain_start_records (list): A list of records of the on_chain_start method.
|
||||
on_chain_end_records (list): A list of records of the on_chain_end method.
|
||||
on_tool_start_records (list): A list of records of the on_tool_start method.
|
||||
on_tool_end_records (list): A list of records of the on_tool_end method.
|
||||
on_agent_finish_records (list): A list of records of the on_agent_end method.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.step = 0
|
||||
|
||||
self.starts = 0
|
||||
self.ends = 0
|
||||
self.errors = 0
|
||||
self.text_ctr = 0
|
||||
|
||||
self.ignore_llm_ = False
|
||||
self.ignore_chain_ = False
|
||||
self.ignore_agent_ = False
|
||||
self.always_verbose_ = False
|
||||
|
||||
self.chain_starts = 0
|
||||
self.chain_ends = 0
|
||||
|
||||
self.llm_starts = 0
|
||||
self.llm_ends = 0
|
||||
self.llm_streams = 0
|
||||
|
||||
self.tool_starts = 0
|
||||
self.tool_ends = 0
|
||||
|
||||
self.agent_ends = 0
|
||||
|
||||
self.on_llm_start_records: list = []
|
||||
self.on_llm_token_records: list = []
|
||||
self.on_llm_end_records: list = []
|
||||
|
||||
self.on_chain_start_records: list = []
|
||||
self.on_chain_end_records: list = []
|
||||
|
||||
self.on_tool_start_records: list = []
|
||||
self.on_tool_end_records: list = []
|
||||
|
||||
self.on_text_records: list = []
|
||||
self.on_agent_finish_records: list = []
|
||||
self.on_agent_action_records: list = []
|
||||
|
||||
@property
|
||||
def always_verbose(self) -> bool:
|
||||
"""Whether to call verbose callbacks even if verbose is False."""
|
||||
return self.always_verbose_
|
||||
|
||||
@property
|
||||
def ignore_llm(self) -> bool:
|
||||
"""Whether to ignore LLM callbacks."""
|
||||
return self.ignore_llm_
|
||||
|
||||
@property
|
||||
def ignore_chain(self) -> bool:
|
||||
"""Whether to ignore chain callbacks."""
|
||||
return self.ignore_chain_
|
||||
|
||||
@property
|
||||
def ignore_agent(self) -> bool:
|
||||
"""Whether to ignore agent callbacks."""
|
||||
return self.ignore_agent_
|
||||
|
||||
def get_custom_callback_meta(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"step": self.step,
|
||||
"starts": self.starts,
|
||||
"ends": self.ends,
|
||||
"errors": self.errors,
|
||||
"text_ctr": self.text_ctr,
|
||||
"chain_starts": self.chain_starts,
|
||||
"chain_ends": self.chain_ends,
|
||||
"llm_starts": self.llm_starts,
|
||||
"llm_ends": self.llm_ends,
|
||||
"llm_streams": self.llm_streams,
|
||||
"tool_starts": self.tool_starts,
|
||||
"tool_ends": self.tool_ends,
|
||||
"agent_ends": self.agent_ends,
|
||||
}
|
||||
|
||||
def reset_callback_meta(self) -> None:
|
||||
"""Reset the callback metadata."""
|
||||
self.step = 0
|
||||
|
||||
self.starts = 0
|
||||
self.ends = 0
|
||||
self.errors = 0
|
||||
self.text_ctr = 0
|
||||
|
||||
self.ignore_llm_ = False
|
||||
self.ignore_chain_ = False
|
||||
self.ignore_agent_ = False
|
||||
self.always_verbose_ = False
|
||||
|
||||
self.chain_starts = 0
|
||||
self.chain_ends = 0
|
||||
|
||||
self.llm_starts = 0
|
||||
self.llm_ends = 0
|
||||
self.llm_streams = 0
|
||||
|
||||
self.tool_starts = 0
|
||||
self.tool_ends = 0
|
||||
|
||||
self.agent_ends = 0
|
||||
|
||||
self.on_llm_start_records = []
|
||||
self.on_llm_token_records = []
|
||||
self.on_llm_end_records = []
|
||||
|
||||
self.on_chain_start_records = []
|
||||
self.on_chain_end_records = []
|
||||
|
||||
self.on_tool_start_records = []
|
||||
self.on_tool_end_records = []
|
||||
|
||||
self.on_text_records = []
|
||||
self.on_agent_finish_records = []
|
||||
self.on_agent_action_records = []
|
||||
return None
|
||||
|
||||
|
||||
class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
"""Callback Handler that logs to Weights and Biases.
|
||||
|
||||
Parameters:
|
||||
job_type (str): The type of job.
|
||||
project (str): The project to log to.
|
||||
entity (str): The entity to log to.
|
||||
tags (list): The tags to log.
|
||||
group (str): The group to log to.
|
||||
name (str): The name of the run.
|
||||
notes (str): The notes to log.
|
||||
visualize (bool): Whether to visualize the run.
|
||||
complexity_metrics (bool): Whether to log complexity metrics.
|
||||
stream_logs (bool): Whether to stream callback actions to W&B
|
||||
|
||||
This handler will utilize the associated callback method called and formats
|
||||
the input of each callback function with metadata regarding the state of LLM run,
|
||||
and adds the response to the list of records for both the {method}_records and
|
||||
action. It then logs the response using the run.log() method to Weights and Biases.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
job_type: Optional[str] = None,
|
||||
project: Optional[str] = "langchain_callback_demo",
|
||||
entity: Optional[str] = None,
|
||||
tags: Optional[Sequence] = None,
|
||||
group: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
notes: Optional[str] = None,
|
||||
visualize: bool = False,
|
||||
complexity_metrics: bool = False,
|
||||
stream_logs: bool = False,
|
||||
) -> None:
|
||||
"""Initialize callback handler."""
|
||||
|
||||
wandb = import_wandb()
|
||||
import_pandas()
|
||||
import_textstat()
|
||||
spacy = import_spacy()
|
||||
super().__init__()
|
||||
|
||||
self.job_type = job_type
|
||||
self.project = project
|
||||
self.entity = entity
|
||||
self.tags = tags
|
||||
self.group = group
|
||||
self.name = name
|
||||
self.notes = notes
|
||||
self.visualize = visualize
|
||||
self.complexity_metrics = complexity_metrics
|
||||
self.stream_logs = stream_logs
|
||||
|
||||
self.temp_dir = tempfile.TemporaryDirectory()
|
||||
self.run: wandb.sdk.wandb_run.Run = wandb.init( # type: ignore
|
||||
job_type=self.job_type,
|
||||
project=self.project,
|
||||
entity=self.entity,
|
||||
tags=self.tags,
|
||||
group=self.group,
|
||||
name=self.name,
|
||||
notes=self.notes,
|
||||
)
|
||||
warning = (
|
||||
"The wandb callback is currently in beta and is subject to change "
|
||||
"based on updates to `langchain`. Please report any issues to "
|
||||
"https://github.com/wandb/wandb/issues with the tag `langchain`."
|
||||
)
|
||||
wandb.termwarn(
|
||||
warning,
|
||||
repeat=False,
|
||||
)
|
||||
self.callback_columns: list = []
|
||||
self.action_records: list = []
|
||||
self.complexity_metrics = complexity_metrics
|
||||
self.visualize = visualize
|
||||
self.nlp = spacy.load("en_core_web_sm")
|
||||
|
||||
def _init_resp(self) -> Dict:
|
||||
return {k: None for k in self.callback_columns}
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM starts."""
|
||||
self.step += 1
|
||||
self.llm_starts += 1
|
||||
self.starts += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_llm_start"})
|
||||
resp.update(flatten_dict(serialized))
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
for prompt in prompts:
|
||||
prompt_resp = deepcopy(resp)
|
||||
prompt_resp["prompts"] = prompt
|
||||
self.on_llm_start_records.append(prompt_resp)
|
||||
self.action_records.append(prompt_resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(prompt_resp)
|
||||
|
||||
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
"""Run when LLM generates a new token."""
|
||||
self.step += 1
|
||||
self.llm_streams += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_llm_new_token", "token": token})
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
self.on_llm_token_records.append(resp)
|
||||
self.action_records.append(resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(resp)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
self.step += 1
|
||||
self.llm_ends += 1
|
||||
self.ends += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_llm_end"})
|
||||
resp.update(flatten_dict(response.llm_output or {}))
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
for generations in response.generations:
|
||||
for generation in generations:
|
||||
generation_resp = deepcopy(resp)
|
||||
generation_resp.update(flatten_dict(generation.dict()))
|
||||
generation_resp.update(
|
||||
analyze_text(
|
||||
generation.text,
|
||||
complexity_metrics=self.complexity_metrics,
|
||||
visualize=self.visualize,
|
||||
nlp=self.nlp,
|
||||
output_dir=self.temp_dir.name,
|
||||
)
|
||||
)
|
||||
self.on_llm_end_records.append(generation_resp)
|
||||
self.action_records.append(generation_resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(generation_resp)
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
self.step += 1
|
||||
self.errors += 1
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain starts running."""
|
||||
self.step += 1
|
||||
self.chain_starts += 1
|
||||
self.starts += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_chain_start"})
|
||||
resp.update(flatten_dict(serialized))
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
chain_input = inputs["input"]
|
||||
|
||||
if isinstance(chain_input, str):
|
||||
input_resp = deepcopy(resp)
|
||||
input_resp["input"] = chain_input
|
||||
self.on_chain_start_records.append(input_resp)
|
||||
self.action_records.append(input_resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(input_resp)
|
||||
elif isinstance(chain_input, list):
|
||||
for inp in chain_input:
|
||||
input_resp = deepcopy(resp)
|
||||
input_resp.update(inp)
|
||||
self.on_chain_start_records.append(input_resp)
|
||||
self.action_records.append(input_resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(input_resp)
|
||||
else:
|
||||
raise ValueError("Unexpected data format provided!")
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running."""
|
||||
self.step += 1
|
||||
self.chain_ends += 1
|
||||
self.ends += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_chain_end", "outputs": outputs["output"]})
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
self.on_chain_end_records.append(resp)
|
||||
self.action_records.append(resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(resp)
|
||||
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
self.step += 1
|
||||
self.errors += 1
|
||||
|
||||
def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool starts running."""
|
||||
self.step += 1
|
||||
self.tool_starts += 1
|
||||
self.starts += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_tool_start", "input_str": input_str})
|
||||
resp.update(flatten_dict(serialized))
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
self.on_tool_start_records.append(resp)
|
||||
self.action_records.append(resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(resp)
|
||||
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running."""
|
||||
self.step += 1
|
||||
self.tool_ends += 1
|
||||
self.ends += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_tool_end", "output": output})
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
self.on_tool_end_records.append(resp)
|
||||
self.action_records.append(resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(resp)
|
||||
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
self.step += 1
|
||||
self.errors += 1
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""
|
||||
Run when agent is ending.
|
||||
"""
|
||||
self.step += 1
|
||||
self.text_ctr += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update({"action": "on_text", "text": text})
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
self.on_text_records.append(resp)
|
||||
self.action_records.append(resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(resp)
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Run when agent ends running."""
|
||||
self.step += 1
|
||||
self.agent_ends += 1
|
||||
self.ends += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update(
|
||||
{
|
||||
"action": "on_agent_finish",
|
||||
"output": finish.return_values["output"],
|
||||
"log": finish.log,
|
||||
}
|
||||
)
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
|
||||
self.on_agent_finish_records.append(resp)
|
||||
self.action_records.append(resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(resp)
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run on agent action."""
|
||||
self.step += 1
|
||||
self.tool_starts += 1
|
||||
self.starts += 1
|
||||
|
||||
resp = self._init_resp()
|
||||
resp.update(
|
||||
{
|
||||
"action": "on_agent_action",
|
||||
"tool": action.tool,
|
||||
"tool_input": action.tool_input,
|
||||
"log": action.log,
|
||||
}
|
||||
)
|
||||
resp.update(self.get_custom_callback_meta())
|
||||
self.on_agent_action_records.append(resp)
|
||||
self.action_records.append(resp)
|
||||
if self.stream_logs:
|
||||
self.run.log(resp)
|
||||
|
||||
def _create_session_analysis_df(self) -> Any:
|
||||
"""Create a dataframe with all the information from the session."""
|
||||
pd = import_pandas()
|
||||
on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
|
||||
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
|
||||
|
||||
llm_input_prompts_df = (
|
||||
on_llm_start_records_df[["step", "prompts", "name"]]
|
||||
.dropna(axis=1)
|
||||
.rename({"step": "prompt_step"}, axis=1)
|
||||
)
|
||||
complexity_metrics_columns = []
|
||||
visualizations_columns = []
|
||||
|
||||
if self.complexity_metrics:
|
||||
complexity_metrics_columns = [
|
||||
"flesch_reading_ease",
|
||||
"flesch_kincaid_grade",
|
||||
"smog_index",
|
||||
"coleman_liau_index",
|
||||
"automated_readability_index",
|
||||
"dale_chall_readability_score",
|
||||
"difficult_words",
|
||||
"linsear_write_formula",
|
||||
"gunning_fog",
|
||||
"text_standard",
|
||||
"fernandez_huerta",
|
||||
"szigriszt_pazos",
|
||||
"gutierrez_polini",
|
||||
"crawford",
|
||||
"gulpease_index",
|
||||
"osman",
|
||||
]
|
||||
|
||||
if self.visualize:
|
||||
visualizations_columns = ["dependency_tree", "entities"]
|
||||
|
||||
llm_outputs_df = (
|
||||
on_llm_end_records_df[
|
||||
[
|
||||
"step",
|
||||
"text",
|
||||
"token_usage_total_tokens",
|
||||
"token_usage_prompt_tokens",
|
||||
"token_usage_completion_tokens",
|
||||
]
|
||||
+ complexity_metrics_columns
|
||||
+ visualizations_columns
|
||||
]
|
||||
.dropna(axis=1)
|
||||
.rename({"step": "output_step", "text": "output"}, axis=1)
|
||||
)
|
||||
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
|
||||
session_analysis_df["chat_html"] = session_analysis_df[
|
||||
["prompts", "output"]
|
||||
].apply(
|
||||
lambda row: construct_html_from_prompt_and_generation(
|
||||
row["prompts"], row["output"]
|
||||
),
|
||||
axis=1,
|
||||
)
|
||||
return session_analysis_df
|
||||
|
||||
def flush_tracker(
|
||||
self,
|
||||
langchain_asset: Any = None,
|
||||
reset: bool = True,
|
||||
finish: bool = False,
|
||||
job_type: Optional[str] = None,
|
||||
project: Optional[str] = None,
|
||||
entity: Optional[str] = None,
|
||||
tags: Optional[Sequence] = None,
|
||||
group: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
notes: Optional[str] = None,
|
||||
visualize: Optional[bool] = None,
|
||||
complexity_metrics: Optional[bool] = None,
|
||||
) -> None:
|
||||
"""Flush the tracker and reset the session.
|
||||
|
||||
Args:
|
||||
langchain_asset: The langchain asset to save.
|
||||
reset: Whether to reset the session.
|
||||
finish: Whether to finish the run.
|
||||
job_type: The job type.
|
||||
project: The project.
|
||||
entity: The entity.
|
||||
tags: The tags.
|
||||
group: The group.
|
||||
name: The name.
|
||||
notes: The notes.
|
||||
visualize: Whether to visualize.
|
||||
complexity_metrics: Whether to compute complexity metrics.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
pd = import_pandas()
|
||||
wandb = import_wandb()
|
||||
action_records_table = wandb.Table(dataframe=pd.DataFrame(self.action_records))
|
||||
session_analysis_table = wandb.Table(
|
||||
dataframe=self._create_session_analysis_df()
|
||||
)
|
||||
self.run.log(
|
||||
{
|
||||
"action_records": action_records_table,
|
||||
"session_analysis": session_analysis_table,
|
||||
}
|
||||
)
|
||||
|
||||
if langchain_asset:
|
||||
langchain_asset_path = Path(self.temp_dir.name, "model.json")
|
||||
model_artifact = wandb.Artifact(name="model", type="model")
|
||||
model_artifact.add(action_records_table, name="action_records")
|
||||
model_artifact.add(session_analysis_table, name="session_analysis")
|
||||
try:
|
||||
langchain_asset.save(langchain_asset_path)
|
||||
model_artifact.add_file(str(langchain_asset_path))
|
||||
model_artifact.metadata = load_json_to_dict(langchain_asset_path)
|
||||
except ValueError:
|
||||
langchain_asset.save_agent(langchain_asset_path)
|
||||
model_artifact.add_file(str(langchain_asset_path))
|
||||
model_artifact.metadata = load_json_to_dict(langchain_asset_path)
|
||||
except NotImplementedError as e:
|
||||
print("Could not save model.")
|
||||
print(repr(e))
|
||||
pass
|
||||
self.run.log_artifact(model_artifact)
|
||||
|
||||
if finish or reset:
|
||||
self.run.finish()
|
||||
self.temp_dir.cleanup()
|
||||
self.reset_callback_meta()
|
||||
if reset:
|
||||
self.__init__( # type: ignore
|
||||
job_type=job_type if job_type else self.job_type,
|
||||
project=project if project else self.project,
|
||||
entity=entity if entity else self.entity,
|
||||
tags=tags if tags else self.tags,
|
||||
group=group if group else self.group,
|
||||
name=name if name else self.name,
|
||||
notes=notes if notes else self.notes,
|
||||
visualize=visualize if visualize else self.visualize,
|
||||
complexity_metrics=complexity_metrics
|
||||
if complexity_metrics
|
||||
else self.complexity_metrics,
|
||||
)
|
||||
@@ -1,9 +1,12 @@
|
||||
"""Chains are easily reusable components which can be linked together."""
|
||||
from langchain.chains.api.base import APIChain
|
||||
from langchain.chains.chat_vector_db.base import ChatVectorDBChain
|
||||
from langchain.chains.combine_documents.base import AnalyzeDocumentChain
|
||||
from langchain.chains.constitutional_ai.base import ConstitutionalChain
|
||||
from langchain.chains.conversation.base import ConversationChain
|
||||
from langchain.chains.conversational_retrieval.base import (
|
||||
ChatVectorDBChain,
|
||||
ConversationalRetrievalChain,
|
||||
)
|
||||
from langchain.chains.graph_qa.base import GraphQAChain
|
||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
|
||||
from langchain.chains.llm import LLMChain
|
||||
@@ -16,15 +19,17 @@ from langchain.chains.loading import load_chain
|
||||
from langchain.chains.mapreduce import MapReduceChain
|
||||
from langchain.chains.moderation import OpenAIModerationChain
|
||||
from langchain.chains.pal.base import PALChain
|
||||
from langchain.chains.qa_generation.base import QAGenerationChain
|
||||
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
|
||||
from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
|
||||
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
|
||||
from langchain.chains.sql_database.base import (
|
||||
SQLDatabaseChain,
|
||||
SQLDatabaseSequentialChain,
|
||||
)
|
||||
from langchain.chains.transform import TransformChain
|
||||
from langchain.chains.vector_db_qa.base import VectorDBQA
|
||||
|
||||
__all__ = [
|
||||
"ConversationChain",
|
||||
@@ -52,4 +57,8 @@ __all__ = [
|
||||
"ChatVectorDBChain",
|
||||
"GraphQAChain",
|
||||
"ConstitutionalChain",
|
||||
"QAGenerationChain",
|
||||
"RetrievalQA",
|
||||
"RetrievalQAWithSourcesChain",
|
||||
"ConversationalRetrievalChain",
|
||||
]
|
||||
|
||||
@@ -8,9 +8,9 @@ from pydantic import BaseModel, Field, root_validator
|
||||
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts import BasePromptTemplate
|
||||
from langchain.requests import RequestsWrapper
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class APIChain(Chain, BaseModel):
|
||||
@@ -84,7 +84,7 @@ class APIChain(Chain, BaseModel):
|
||||
@classmethod
|
||||
def from_llm_and_api_docs(
|
||||
cls,
|
||||
llm: BaseLLM,
|
||||
llm: BaseLanguageModel,
|
||||
api_docs: str,
|
||||
headers: Optional[dict] = None,
|
||||
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
|
||||
|
||||
28
langchain/chains/api/podcast_docs.py
Normal file
28
langchain/chains/api/podcast_docs.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# flake8: noqa
|
||||
PODCAST_DOCS = """API documentation:
|
||||
Endpoint: https://listen-api.listennotes.com/api/v2
|
||||
GET /search
|
||||
|
||||
This API is for searching podcasts or episodes.
|
||||
|
||||
Query parameters table:
|
||||
q | string | Search term, e.g., person, place, topic... You can use double quotes to do verbatim match, e.g., "game of thrones". Otherwise, it's fuzzy search. | required
|
||||
type | string | What type of contents do you want to search for? Available values: episode, podcast, curated. default: episode | optional
|
||||
page_size | integer | The maximum number of search results per page. A valid value should be an integer between 1 and 10 (inclusive). default: 3 | optional
|
||||
language | string | Limit search results to a specific language, e.g., English, Chinese ... If not specified, it'll be any language. It works only when type is episode or podcast. | optional
|
||||
region | string | Limit search results to a specific region (e.g., us, gb, in...). If not specified, it'll be any region. It works only when type is episode or podcast. | optional
|
||||
len_min | integer | Minimum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional
|
||||
len_max | integer | Maximum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional
|
||||
|
||||
Response schema (JSON object):
|
||||
next_offset | integer | optional
|
||||
total | integer | optional
|
||||
results | array[object] (Episode / Podcast List Result Object)
|
||||
|
||||
Each object in the "results" key has the following schema:
|
||||
listennotes_url | string | optional
|
||||
id | integer | optional
|
||||
title_highlighted | string | optional
|
||||
|
||||
Use page_size: 3
|
||||
"""
|
||||
@@ -1 +0,0 @@
|
||||
"""Chain for chatting with a vector database."""
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
"""Chain for applying constitutional principles to the outputs of another chain."""
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
||||
from langchain.chains.constitutional_ai.principles import PRINCIPLES
|
||||
from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class ConstitutionalChain(Chain):
|
||||
@@ -42,10 +43,19 @@ class ConstitutionalChain(Chain):
|
||||
critique_chain: LLMChain
|
||||
revision_chain: LLMChain
|
||||
|
||||
@classmethod
|
||||
def get_principles(
|
||||
cls, names: Optional[List[str]] = None
|
||||
) -> List[ConstitutionalPrinciple]:
|
||||
if names is None:
|
||||
return list(PRINCIPLES.values())
|
||||
else:
|
||||
return [PRINCIPLES[name] for name in names]
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
llm: BaseLLM,
|
||||
llm: BaseLanguageModel,
|
||||
chain: LLMChain,
|
||||
critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT,
|
||||
revision_prompt: BasePromptTemplate = REVISION_PROMPT,
|
||||
|
||||
5
langchain/chains/constitutional_ai/principles.py
Normal file
5
langchain/chains/constitutional_ai/principles.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# flake8: noqa
|
||||
from typing import Dict
|
||||
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
||||
|
||||
PRINCIPLES: Dict[str, ConstitutionalPrinciple] = {}
|
||||
1
langchain/chains/conversational_retrieval/__init__.py
Normal file
1
langchain/chains/conversational_retrieval/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Chain for chatting with a vector database."""
|
||||
@@ -1,18 +1,20 @@
|
||||
"""Chain for chatting with a vector database."""
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Extra, Field, root_validator
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.question_answering import load_qa_chain
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.schema import BaseLanguageModel, BaseRetriever, Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
||||
|
||||
@@ -25,21 +27,22 @@ def _get_chat_history(chat_history: List[Tuple[str, str]]) -> str:
|
||||
return buffer
|
||||
|
||||
|
||||
class ChatVectorDBChain(Chain, BaseModel):
|
||||
"""Chain for chatting with a vector database."""
|
||||
class BaseConversationalRetrievalChain(Chain, BaseModel):
|
||||
"""Chain for chatting with an index."""
|
||||
|
||||
vectorstore: VectorStore
|
||||
combine_docs_chain: BaseCombineDocumentsChain
|
||||
question_generator: LLMChain
|
||||
output_key: str = "answer"
|
||||
return_source_documents: bool = False
|
||||
top_k_docs_for_context: int = 4
|
||||
get_chat_history: Optional[Callable[[Tuple[str, str]], str]] = None
|
||||
"""Return the source documents."""
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "chat-vector-db"
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
allow_population_by_field_name = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
@@ -57,44 +60,22 @@ class ChatVectorDBChain(Chain, BaseModel):
|
||||
_output_keys = _output_keys + ["source_documents"]
|
||||
return _output_keys
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
vectorstore: VectorStore,
|
||||
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
|
||||
qa_prompt: Optional[BasePromptTemplate] = None,
|
||||
chain_type: str = "stuff",
|
||||
**kwargs: Any,
|
||||
) -> ChatVectorDBChain:
|
||||
"""Load chain from LLM."""
|
||||
doc_chain = load_qa_chain(
|
||||
llm,
|
||||
chain_type=chain_type,
|
||||
prompt=qa_prompt,
|
||||
)
|
||||
condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt)
|
||||
return cls(
|
||||
vectorstore=vectorstore,
|
||||
combine_docs_chain=doc_chain,
|
||||
question_generator=condense_question_chain,
|
||||
**kwargs,
|
||||
)
|
||||
@abstractmethod
|
||||
def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
|
||||
"""Get docs."""
|
||||
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
question = inputs["question"]
|
||||
get_chat_history = self.get_chat_history or _get_chat_history
|
||||
chat_history_str = get_chat_history(inputs["chat_history"])
|
||||
vectordbkwargs = inputs.get("vectordbkwargs", {})
|
||||
|
||||
if chat_history_str:
|
||||
new_question = self.question_generator.run(
|
||||
question=question, chat_history=chat_history_str
|
||||
)
|
||||
else:
|
||||
new_question = question
|
||||
docs = self.vectorstore.similarity_search(
|
||||
new_question, k=self.top_k_docs_for_context, **vectordbkwargs
|
||||
)
|
||||
docs = self._get_docs(new_question, inputs)
|
||||
new_inputs = inputs.copy()
|
||||
new_inputs["question"] = new_question
|
||||
new_inputs["chat_history"] = chat_history_str
|
||||
@@ -108,7 +89,6 @@ class ChatVectorDBChain(Chain, BaseModel):
|
||||
question = inputs["question"]
|
||||
get_chat_history = self.get_chat_history or _get_chat_history
|
||||
chat_history_str = get_chat_history(inputs["chat_history"])
|
||||
vectordbkwargs = inputs.get("vectordbkwargs", {})
|
||||
if chat_history_str:
|
||||
new_question = await self.question_generator.arun(
|
||||
question=question, chat_history=chat_history_str
|
||||
@@ -116,9 +96,7 @@ class ChatVectorDBChain(Chain, BaseModel):
|
||||
else:
|
||||
new_question = question
|
||||
# TODO: This blocks the event loop, but it's not clear how to avoid it.
|
||||
docs = self.vectorstore.similarity_search(
|
||||
new_question, k=self.top_k_docs_for_context, **vectordbkwargs
|
||||
)
|
||||
docs = self._get_docs(new_question, inputs)
|
||||
new_inputs = inputs.copy()
|
||||
new_inputs["question"] = new_question
|
||||
new_inputs["chat_history"] = chat_history_str
|
||||
@@ -132,3 +110,87 @@ class ChatVectorDBChain(Chain, BaseModel):
|
||||
if self.get_chat_history:
|
||||
raise ValueError("Chain not savable when `get_chat_history` is not None.")
|
||||
super().save(file_path)
|
||||
|
||||
|
||||
class ConversationalRetrievalChain(BaseConversationalRetrievalChain, BaseModel):
|
||||
"""Chain for chatting with an index."""
|
||||
|
||||
retriever: BaseRetriever
|
||||
|
||||
def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
|
||||
return self.retriever.get_relevant_documents(question)
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
retriever: BaseRetriever,
|
||||
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
|
||||
qa_prompt: Optional[BasePromptTemplate] = None,
|
||||
chain_type: str = "stuff",
|
||||
**kwargs: Any,
|
||||
) -> BaseConversationalRetrievalChain:
|
||||
"""Load chain from LLM."""
|
||||
doc_chain = load_qa_chain(
|
||||
llm,
|
||||
chain_type=chain_type,
|
||||
prompt=qa_prompt,
|
||||
)
|
||||
condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt)
|
||||
return cls(
|
||||
retriever=retriever,
|
||||
combine_docs_chain=doc_chain,
|
||||
question_generator=condense_question_chain,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class ChatVectorDBChain(BaseConversationalRetrievalChain, BaseModel):
|
||||
"""Chain for chatting with a vector database."""
|
||||
|
||||
vectorstore: VectorStore = Field(alias="vectorstore")
|
||||
top_k_docs_for_context: int = 4
|
||||
search_kwargs: dict = Field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "chat-vector-db"
|
||||
|
||||
@root_validator()
|
||||
def raise_deprecation(cls, values: Dict) -> Dict:
|
||||
warnings.warn(
|
||||
"`ChatVectorDBChain` is deprecated - "
|
||||
"please use `from langchain.chains import ConversationalRetrievalChain`"
|
||||
)
|
||||
return values
|
||||
|
||||
def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
|
||||
vectordbkwargs = inputs.get("vectordbkwargs", {})
|
||||
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
|
||||
return self.vectorstore.similarity_search(
|
||||
question, k=self.top_k_docs_for_context, **full_kwargs
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
vectorstore: VectorStore,
|
||||
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
|
||||
qa_prompt: Optional[BasePromptTemplate] = None,
|
||||
chain_type: str = "stuff",
|
||||
**kwargs: Any,
|
||||
) -> BaseConversationalRetrievalChain:
|
||||
"""Load chain from LLM."""
|
||||
doc_chain = load_qa_chain(
|
||||
llm,
|
||||
chain_type=chain_type,
|
||||
prompt=qa_prompt,
|
||||
)
|
||||
condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt)
|
||||
return cls(
|
||||
vectorstore=vectorstore,
|
||||
combine_docs_chain=doc_chain,
|
||||
question_generator=condense_question_chain,
|
||||
**kwargs,
|
||||
)
|
||||
20
langchain/chains/conversational_retrieval/prompts.py
Normal file
20
langchain/chains/conversational_retrieval/prompts.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
|
||||
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
|
||||
|
||||
Chat History:
|
||||
{chat_history}
|
||||
Follow Up Input: {question}
|
||||
Standalone question:"""
|
||||
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
||||
|
||||
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
||||
|
||||
{context}
|
||||
|
||||
Question: {question}
|
||||
Helpful Answer:"""
|
||||
QA_PROMPT = PromptTemplate(
|
||||
template=prompt_template, input_variables=["context", "question"]
|
||||
)
|
||||
@@ -6,8 +6,8 @@ from pydantic import BaseModel, Extra
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_bash.prompt import PROMPT
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.utilities.bash import BashProcess
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ class LLMBashChain(Chain, BaseModel):
|
||||
llm_bash = LLMBashChain(llm=OpenAI())
|
||||
"""
|
||||
|
||||
llm: BaseLLM
|
||||
llm: BaseLanguageModel
|
||||
"""LLM wrapper to use."""
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
@@ -1,26 +1,17 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
|
||||
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
|
||||
_PROMPT_TEMPLATE = """Translate a math problem into Python code that can be executed in Python 3 REPL. Use the output of running this code to answer the question.
|
||||
|
||||
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
|
||||
|
||||
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
|
||||
|
||||
Question: ${{Question with hard calculation.}}
|
||||
Question: ${{Question with math problem.}}
|
||||
```python
|
||||
${{Code that prints what you need to know}}
|
||||
${{Code that solves the problem and prints the solution}}
|
||||
```
|
||||
```output
|
||||
${{Output of your code}}
|
||||
${{Output of running the code}}
|
||||
```
|
||||
Answer: ${{Answer}}
|
||||
|
||||
Otherwise, use this simpler format:
|
||||
|
||||
Question: ${{Question without hard calculation}}
|
||||
Answer: ${{Answer}}
|
||||
|
||||
Begin.
|
||||
|
||||
Question: What is 37593 * 67?
|
||||
|
||||
@@ -20,8 +20,8 @@ from langchain.chains.llm_requests import LLMRequestsChain
|
||||
from langchain.chains.pal.base import PALChain
|
||||
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
|
||||
from langchain.chains.retrieval_qa.base import VectorDBQA
|
||||
from langchain.chains.sql_database.base import SQLDatabaseChain
|
||||
from langchain.chains.vector_db_qa.base import VectorDBQA
|
||||
from langchain.llms.loading import load_llm, load_llm_from_config
|
||||
from langchain.prompts.loading import load_prompt, load_prompt_from_config
|
||||
from langchain.utilities.loading import try_load_from_hub
|
||||
|
||||
@@ -12,15 +12,15 @@ from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT
|
||||
from langchain.chains.pal.math_prompt import MATH_PROMPT
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.python import PythonREPL
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class PALChain(Chain, BaseModel):
|
||||
"""Implements Program-Aided Language Models."""
|
||||
|
||||
llm: BaseLLM
|
||||
llm: BaseLanguageModel
|
||||
prompt: BasePromptTemplate
|
||||
stop: str = "\n\n"
|
||||
get_answer_expr: str = "print(solution())"
|
||||
@@ -68,7 +68,7 @@ class PALChain(Chain, BaseModel):
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
def from_math_prompt(cls, llm: BaseLLM, **kwargs: Any) -> PALChain:
|
||||
def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain:
|
||||
"""Load PAL from math prompt."""
|
||||
return cls(
|
||||
llm=llm,
|
||||
@@ -79,7 +79,9 @@ class PALChain(Chain, BaseModel):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_colored_object_prompt(cls, llm: BaseLLM, **kwargs: Any) -> PALChain:
|
||||
def from_colored_object_prompt(
|
||||
cls, llm: BaseLanguageModel, **kwargs: Any
|
||||
) -> PALChain:
|
||||
"""Load PAL from colored object prompt."""
|
||||
return cls(
|
||||
llm=llm,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user