mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-05 00:30:18 +00:00
Compare commits
88 Commits
harrison/k
...
harrison/d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
159cc166c2 | ||
|
|
075de91675 | ||
|
|
d5d50c39e6 | ||
|
|
1f18698b2a | ||
|
|
ef4945af6b | ||
|
|
7de2ada3ea | ||
|
|
262d4cb9a8 | ||
|
|
951c158106 | ||
|
|
85e4dd7fc3 | ||
|
|
b1b4a4065a | ||
|
|
08f23c95d9 | ||
|
|
3cf493b089 | ||
|
|
e635c86145 | ||
|
|
779790167e | ||
|
|
3161ced4bc | ||
|
|
3d6fcb85dc | ||
|
|
3701b2901e | ||
|
|
280cb4160d | ||
|
|
80d8db5f60 | ||
|
|
1a8790d808 | ||
|
|
34840f3aee | ||
|
|
8685d53adc | ||
|
|
2f6833d433 | ||
|
|
dd90fd02d5 | ||
|
|
07766a69f3 | ||
|
|
aa854988bf | ||
|
|
96ebe98dc2 | ||
|
|
45f05fc939 | ||
|
|
cf9c3f54f7 | ||
|
|
fbc0c85b90 | ||
|
|
276940fd9b | ||
|
|
cdff6c8181 | ||
|
|
cd45adbea2 | ||
|
|
aff44d0a98 | ||
|
|
8a95fdaee1 | ||
|
|
5d8dc83ede | ||
|
|
b157e0c1c3 | ||
|
|
40e9488055 | ||
|
|
55efbb8a7e | ||
|
|
d6bbf395af | ||
|
|
606605925d | ||
|
|
f93c011456 | ||
|
|
3c24684522 | ||
|
|
b84d190fd0 | ||
|
|
aad4bff098 | ||
|
|
3ea6d9c4d2 | ||
|
|
ced412e1c1 | ||
|
|
1279c8de39 | ||
|
|
c7779c800a | ||
|
|
6f4f771897 | ||
|
|
4a327dd1d6 | ||
|
|
d4edd3c312 | ||
|
|
e72074f78a | ||
|
|
0b29e68c17 | ||
|
|
4d7fdb8957 | ||
|
|
656efe6ef3 | ||
|
|
362586fe8b | ||
|
|
63aa28e2a6 | ||
|
|
c3dfbdf0da | ||
|
|
a2280f321f | ||
|
|
4e13cef05a | ||
|
|
e5c1659864 | ||
|
|
2d098e8869 | ||
|
|
8965a2f0af | ||
|
|
e222ea4ee8 | ||
|
|
e326939759 | ||
|
|
7cf46b3fee | ||
|
|
84cd825a0e | ||
|
|
0a1b1806e9 | ||
|
|
9ee2713272 | ||
|
|
b3234bf3b0 | ||
|
|
562d9891ea | ||
|
|
56aff797c0 | ||
|
|
d53ff270e0 | ||
|
|
df6c33d4b3 | ||
|
|
039d05c808 | ||
|
|
aed9f9febe | ||
|
|
72b461e257 | ||
|
|
cb646082ba | ||
|
|
bd4a2a670b | ||
|
|
6e98ab01e1 | ||
|
|
c0ad5d13b8 | ||
|
|
acd86d33bc | ||
|
|
9707eda83c | ||
|
|
7e550df6d4 | ||
|
|
c9b5a30b37 | ||
|
|
cb04ba0136 | ||
|
|
5903a93f3d |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -135,3 +135,6 @@ dmypy.json
|
||||
|
||||
# macOS display setting files
|
||||
.DS_Store
|
||||
|
||||
# asdf tool versions
|
||||
.tool-versions
|
||||
@@ -23,7 +23,7 @@ with open("../pyproject.toml") as f:
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "🦜🔗 LangChain"
|
||||
copyright = "2022, Harrison Chase"
|
||||
copyright = "2023, Harrison Chase"
|
||||
author = "Harrison Chase"
|
||||
|
||||
version = data["tool"]["poetry"]["version"]
|
||||
@@ -46,6 +46,7 @@ extensions = [
|
||||
"sphinx.ext.viewcode",
|
||||
"sphinxcontrib.autodoc_pydantic",
|
||||
"myst_nb",
|
||||
"sphinx_copybutton",
|
||||
"sphinx_panels",
|
||||
"IPython.sphinxext.ipython_console_highlighting",
|
||||
]
|
||||
|
||||
@@ -34,7 +34,8 @@ search = GoogleSerperAPIWrapper()
|
||||
tools = [
|
||||
Tool(
|
||||
name="Intermediate Answer",
|
||||
func=search.run
|
||||
func=search.run,
|
||||
description="useful for when you need to ask with search"
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
29
docs/ecosystem/pgvector.md
Normal file
29
docs/ecosystem/pgvector.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# PGVector
|
||||
|
||||
This page covers how to use the Postgres [PGVector](https://github.com/pgvector/pgvector) ecosystem within LangChain
|
||||
It is broken into two parts: installation and setup, and then references to specific PGVector wrappers.
|
||||
|
||||
## Installation
|
||||
- Install the Python package with `pip install pgvector`
|
||||
|
||||
|
||||
## Setup
|
||||
1. The first step is to create a database with the `pgvector` extension installed.
|
||||
|
||||
Follow the steps at [PGVector Installation Steps](https://github.com/pgvector/pgvector#installation) to install the database and the extension. The docker image is the easiest way to get started.
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around Postgres vector databases, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores.pgvector import PGVector
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
For a more detailed walkthrough of the PGVector Wrapper, see [this notebook](../modules/indexes/vectorstore_examples/pgvector.ipynb)
|
||||
@@ -25,9 +25,25 @@ from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(pl_tags=["langchain-requests", "chatbot"])
|
||||
```
|
||||
|
||||
To get the PromptLayer request id, use the argument `return_pl_id` when instanializing the LLM
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(return_pl_id=True)
|
||||
```
|
||||
This will add the PromptLayer request ID in the `generation_info` field of the `Generation` returned when using `.generate` or `.agenerate`
|
||||
|
||||
For example:
|
||||
```python
|
||||
llm_results = llm.generate(["hello world"])
|
||||
for res in llm_results.generations:
|
||||
print("pl request id: ", res[0].generation_info["pl_request_id"])
|
||||
```
|
||||
You can use the PromptLayer request ID to add a prompt, score, or other metadata to your request. [Read more about it here](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
|
||||
This LLM is identical to the [OpenAI LLM](./openai), except that
|
||||
- all your requests will be logged to your PromptLayer account
|
||||
- you can add `pl_tags` when instantializing to tag your requests on PromptLayer
|
||||
- you can add `return_pl_id` when instantializing to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
|
||||
|
||||
PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](../modules/chat/examples/promptlayer_chat_openai.ipynb)
|
||||
PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](../modules/chat/examples/promptlayer_chat_openai.ipynb) and `PromptLayerOpenAIChat`
|
||||
|
||||
@@ -17,9 +17,12 @@ This page is broken into two parts: installation and setup, and then references
|
||||
- `poppler-utils`
|
||||
- `tesseract-ocr`
|
||||
- `libreoffice`
|
||||
- If you are parsing PDFs, run the following to install the `detectron2` model, which
|
||||
- If you are parsing PDFs using the `"hi_res"` strategy, run the following to install the `detectron2` model, which
|
||||
`unstructured` uses for layout detection:
|
||||
- `pip install "detectron2@git+https://github.com/facebookresearch/detectron2.git@v0.6#egg=detectron2"`
|
||||
- If `detectron2` is not installed, `unstructured` will fallback to processing PDFs
|
||||
using the `"fast"` strategy, which uses `pdfminer` directly and doesn't require
|
||||
`detectron2`.
|
||||
|
||||
## Wrappers
|
||||
|
||||
|
||||
@@ -322,5 +322,14 @@ Proprietary
|
||||
|
||||
By Zahid Khawaja, this demo utilizes question answering to answer questions about a given website. A followup added this for `YouTube videos <https://twitter.com/chillzaza_/status/1593739682013220865?s=20&t=EhU8jl0KyCPJ7vE9Rnz-cQ>`_, and then another followup added it for `Wikipedia <https://twitter.com/chillzaza_/status/1594847151238037505?s=20&t=EhU8jl0KyCPJ7vE9Rnz-cQ>`_.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://mynd.so
|
||||
:type: url
|
||||
:text: Mynd
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A journaling app for self-care that uses AI to uncover insights and patterns over time.
|
||||
|
||||
|
||||
@@ -97,6 +97,8 @@ The above modules can be used in a variety of ways. LangChain also provides guid
|
||||
|
||||
- `Summarization <./use_cases/summarization.html>`_: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.
|
||||
|
||||
- `Querying Tabular Data <./use_cases/tabular.html>`_: If you want to understand how to use LLMs to query data that is stored in a tabular format (csvs, SQL, dataframes, etc) you should read this page.
|
||||
|
||||
- `Evaluation <./use_cases/evaluation.html>`_: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
|
||||
|
||||
- `Generate similar examples <./use_cases/generate_examples.html>`_: Generating similar examples to a given input. This is a common use case for many applications, and LangChain provides some prompts/chains for assisting in this.
|
||||
@@ -117,6 +119,7 @@ The above modules can be used in a variety of ways. LangChain also provides guid
|
||||
./use_cases/combine_docs.md
|
||||
./use_cases/question_answering.md
|
||||
./use_cases/summarization.md
|
||||
./use_cases/tabular.rst
|
||||
./use_cases/evaluation.rst
|
||||
./use_cases/model_laboratory.ipynb
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"id": "f4814175-964d-42f1-aa9d-22801ce1e912",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initalize Toolkit and Agent\n",
|
||||
"## Initialize Toolkit and Agent\n",
|
||||
"\n",
|
||||
"First, we'll create an agent with a single vectorstore."
|
||||
]
|
||||
|
||||
309
docs/modules/agents/examples/chat_conversation_agent.ipynb
Normal file
309
docs/modules/agents/examples/chat_conversation_agent.ipynb
Normal file
@@ -0,0 +1,309 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4658d71a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Conversation Agent (for Chat Models)\n",
|
||||
"\n",
|
||||
"This notebook walks through using an agent optimized for conversation, using ChatModels. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\n",
|
||||
"\n",
|
||||
"This is accomplished with a specific type of agent (`chat-conversational-react-description`) which expects to be used with a memory component."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f4f5d1a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f65308ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"from langchain.agents import initialize_agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5fb14d6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Current Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events or the current state of the world. the input to this should be a single search term.\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "dddc34c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "cafe9bc1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm=ChatOpenAI(temperature=0)\n",
|
||||
"agent_chain = initialize_agent(tools, llm, agent=\"chat-conversational-react-description\", verbose=True, memory=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "dc70b454",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"Hello Bob! How can I assist you today?\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello Bob! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"hi, i am bob\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3dcf7953",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"Your name is Bob.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Your name is Bob.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"what's my name?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "aa05f566",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Current Search\",\n",
|
||||
" \"action_input\": \"Thai food dinner recipes\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m59 easy Thai recipes for any night of the week · Marion Grasby's Thai spicy chilli and basil fried rice · Thai curry noodle soup · Marion Grasby's ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"Here are some Thai food dinner recipes you can make this week: Thai spicy chilli and basil fried rice, Thai curry noodle soup, and many more. You can find 59 easy Thai recipes for any night of the week on Marion Grasby's website.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Here are some Thai food dinner recipes you can make this week: Thai spicy chilli and basil fried rice, Thai curry noodle soup, and many more. You can find 59 easy Thai recipes for any night of the week on Marion Grasby's website.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(\"what are some good dinners to make this week, if i like thai food?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "c5d8b7ea",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m```json\n",
|
||||
"{\n",
|
||||
" \"action\": \"Current Search\",\n",
|
||||
" \"action_input\": \"who won the world cup in 1978\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mThe Argentina national football team represents Argentina in men's international football and is administered by the Argentine Football Association, the governing body for football in Argentina. Nicknamed La Albiceleste, they are the reigning world champions, having won the most recent World Cup in 2022.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m```json\n",
|
||||
"{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"The last letter in your name is 'b'. The Argentina national football team won the World Cup in 1978.\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"The last letter in your name is 'b'. The Argentina national football team won the World Cup in 1978.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "f608889b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Current Search\",\n",
|
||||
" \"action_input\": \"weather in pomfret\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mMostly cloudy with gusty winds developing during the afternoon. A few flurries or snow showers possible. High near 40F. Winds NNW at 20 to 30 mph.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"The weather in Pomfret is mostly cloudy with gusty winds developing during the afternoon. A few flurries or snow showers are possible. High near 40F. Winds NNW at 20 to 30 mph.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The weather in Pomfret is mostly cloudy with gusty winds developing during the afternoon. A few flurries or snow showers are possible. High near 40F. Winds NNW at 20 to 30 mph.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"whats the weather like in pomfret?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0084efd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -61,7 +61,8 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
552
docs/modules/agents/examples/sharedmemory_for_tools.ipynb
Normal file
552
docs/modules/agents/examples/sharedmemory_for_tools.ipynb
Normal file
@@ -0,0 +1,552 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "fa6802ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Adding SharedMemory to an Agent and its Tools\n",
|
||||
"\n",
|
||||
"This notebook goes over adding memory to **both** of an Agent and its tools. Before going through this notebook, please walk through the following notebooks, as this will build on top of both of them:\n",
|
||||
"\n",
|
||||
"- [Adding memory to an LLM Chain](../../memory/examples/adding_memory.ipynb)\n",
|
||||
"- [Custom Agents](custom_agent.ipynb)\n",
|
||||
"\n",
|
||||
"We are going to create a custom Agent. The agent has access to a conversation memory, search tool, and a summarization tool. And, the summarization tool also needs access to the conversation memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "8db95912",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n",
|
||||
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
|
||||
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "06b7187b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"This is a conversation between a human and a bot:\n",
|
||||
"\n",
|
||||
"{chat_history}\n",
|
||||
"\n",
|
||||
"Write a summary of the conversation for {input}:\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"input\", \"chat_history\"], \n",
|
||||
" template=template\n",
|
||||
")\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||||
"readonlymemory = ReadOnlySharedMemory(memory=memory)\n",
|
||||
"summry_chain = LLMChain(\n",
|
||||
" llm=OpenAI(), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "97ad8467",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Summary\",\n",
|
||||
" func=summry_chain.run,\n",
|
||||
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\"\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e3439cd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
|
||||
"suffix = \"\"\"Begin!\"\n",
|
||||
"\n",
|
||||
"{chat_history}\n",
|
||||
"Question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools, \n",
|
||||
" prefix=prefix, \n",
|
||||
" suffix=suffix, \n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0021675b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now construct the LLMChain, with the Memory object, and then create the agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "c56a0e73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "ca4bc1fb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"ChatGPT\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"What is ChatGPT?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45627664",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To test the memory of this agent, we can ask a followup question that relies on information in the previous exchange to be answered correctly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "eecc0462",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Who developed ChatGPT\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'ChatGPT was developed by OpenAI.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"Who developed it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c34424cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
|
||||
"Action: Summary\n",
|
||||
"Action Input: My daughter 5 years old\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
|
||||
"\n",
|
||||
"Human: What is ChatGPT?\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
|
||||
"Human: Who developed it?\n",
|
||||
"AI: ChatGPT was developed by OpenAI.\n",
|
||||
"\n",
|
||||
"Write a summary of the conversation for My daughter 5 years old:\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m\n",
|
||||
"The conversation was about ChatGPT, an artificial intelligence chatbot. It was created by OpenAI and can send and receive images while chatting.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "4ebd8326",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Confirm that the memory was correctly updated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "b91f8c85",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Human: What is ChatGPT?\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
|
||||
"Human: Who developed it?\n",
|
||||
"AI: ChatGPT was developed by OpenAI.\n",
|
||||
"Human: Thanks. Summarize the conversation, for my daughter 5 years old.\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(agent_chain.memory.buffer)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "cc3d0aa4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For comparison, below is a bad example that uses the same memory for both the Agent and the tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "3359d043",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## This is a bad practice for using the memory.\n",
|
||||
"## Use the ReadOnlySharedMemory class, as shown above.\n",
|
||||
"\n",
|
||||
"template = \"\"\"This is a conversation between a human and a bot:\n",
|
||||
"\n",
|
||||
"{chat_history}\n",
|
||||
"\n",
|
||||
"Write a summary of the conversation for {input}:\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"input\", \"chat_history\"], \n",
|
||||
" template=template\n",
|
||||
")\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||||
"summry_chain = LLMChain(\n",
|
||||
" llm=OpenAI(), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=memory, # <--- this is the only change\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Summary\",\n",
|
||||
" func=summry_chain.run,\n",
|
||||
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
|
||||
"suffix = \"\"\"Begin!\"\n",
|
||||
"\n",
|
||||
"{chat_history}\n",
|
||||
"Question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools, \n",
|
||||
" prefix=prefix, \n",
|
||||
" suffix=suffix, \n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "970d23df",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"ChatGPT\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"What is ChatGPT?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "d9ea82f0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Who developed ChatGPT\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'ChatGPT was developed by OpenAI.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"Who developed it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "5b1f9223",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
|
||||
"Action: Summary\n",
|
||||
"Action Input: My daughter 5 years old\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
|
||||
"\n",
|
||||
"Human: What is ChatGPT?\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
|
||||
"Human: Who developed it?\n",
|
||||
"AI: ChatGPT was developed by OpenAI.\n",
|
||||
"\n",
|
||||
"Write a summary of the conversation for My daughter 5 years old:\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m\n",
|
||||
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "d07415da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The final answer is not wrong, but we see the 3rd Human input is actually from the agent in the memory because the memory was modified by the summary tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "32f97b21",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Human: What is ChatGPT?\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
|
||||
"Human: Who developed it?\n",
|
||||
"AI: ChatGPT was developed by OpenAI.\n",
|
||||
"Human: My daughter 5 years old\n",
|
||||
"AI: \n",
|
||||
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\n",
|
||||
"Human: Thanks. Summarize the conversation, for my daughter 5 years old.\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(agent_chain.memory.buffer)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -24,11 +24,13 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=docstore.search\n",
|
||||
" func=docstore.search,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Lookup\",\n",
|
||||
" func=docstore.lookup\n",
|
||||
" func=docstore.lookup,\n",
|
||||
" description=\"useful for when you need to ask with lookup\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
@@ -52,7 +52,8 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
@@ -13,3 +13,4 @@ For more detailed information on tools, and different types of tools in LangChai
|
||||
Toolkits are groups of tools that are best used together.
|
||||
They allow you to logically group and initialize a set of tools that share a particular resource (such as a database connection or json object).
|
||||
They can be used to construct an agent for a specific use-case.
|
||||
For more detailed information on toolkits and their use cases, see [this documentation](how_to_guides.rst#agent-toolkits) (the "Agent Toolkits" section).
|
||||
106
docs/modules/chains/examples/dbpedia.ipynb
Normal file
106
docs/modules/chains/examples/dbpedia.ipynb
Normal file
@@ -0,0 +1,106 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "546fd3f7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# DBPedia\n",
|
||||
"\n",
|
||||
"This example shows how you can use LLMs to interact in natural language with a SPARKQL database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6f3bf955",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains.dbpedia.base import DBPediaChain\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "09dbd131",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = DBPediaChain.from_llm(model, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "408cb57d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new DBPediaChain chain...\u001b[0m\n",
|
||||
"Query written:\n",
|
||||
"\u001b[32;1m\u001b[1;3mSELECT ?capital WHERE { \n",
|
||||
" ?country rdfs:label \"Wakanda\"@en . \n",
|
||||
" ?country dbo:capital ?capital . \n",
|
||||
" ?capital rdfs:label ?capitalLabel .\n",
|
||||
" FILTER (LANG(?capitalLabel) = 'en') \n",
|
||||
"}\u001b[0m\n",
|
||||
"Response gotten:\n",
|
||||
"\u001b[32;1m\u001b[1;3m{'head': {'link': [], 'vars': ['capital']}, 'results': {'distinct': False, 'ordered': True, 'bindings': []}}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'There is no capital information available for Wakanda in the provided SPARQL query response. This is because Wakanda is a fictional country in the Marvel Cinematic Universe and does not have a real-world capital.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"what is the capital of wakanda?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "950e2472",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -377,18 +377,19 @@
|
||||
"\tFOREIGN KEY(\"GenreId\") REFERENCES \"Genre\" (\"GenreId\"), \n",
|
||||
"\tFOREIGN KEY(\"AlbumId\") REFERENCES \"Album\" (\"AlbumId\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"SELECT * FROM 'Track' LIMIT 2;\n",
|
||||
"/*\n",
|
||||
"2 rows from Track table:\n",
|
||||
"TrackId\tName\tAlbumId\tMediaTypeId\tGenreId\tComposer\tMilliseconds\tBytes\tUnitPrice\n",
|
||||
"1\tFor Those About To Rock (We Salute You)\t1\t1\t1\tAngus Young, Malcolm Young, Brian Johnson\t343719\t11170334\t0.99\n",
|
||||
"2\tBalls to the Wall\t2\t2\t1\tNone\t342562\t5510424\t0.99\n"
|
||||
"2\tBalls to the Wall\t2\t2\t1\tNone\t342562\t5510424\t0.99\n",
|
||||
"*/\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/jon/projects/langchain/langchain/sql_database.py:121: SAWarning: Dialect sqlite+pysqlite does *not* support Decimal objects natively, and SQLAlchemy must convert from floating point - rounding errors and other issues may occur. Please consider storing Decimal numbers as strings or integers on this platform for lossless storage.\n",
|
||||
"/home/jon/projects/langchain/langchain/sql_database.py:135: SAWarning: Dialect sqlite+pysqlite does *not* support Decimal objects natively, and SQLAlchemy must convert from floating point - rounding errors and other issues may occur. Please consider storing Decimal numbers as strings or integers on this platform for lossless storage.\n",
|
||||
" sample_rows = connection.execute(command)\n"
|
||||
]
|
||||
}
|
||||
@@ -467,12 +468,13 @@
|
||||
"\t\"Composer\" NVARCHAR(220),\n",
|
||||
"\tPRIMARY KEY (\"TrackId\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"SELECT * FROM 'Track' LIMIT 3;\n",
|
||||
"/*\n",
|
||||
"3 rows from Track table:\n",
|
||||
"TrackId\tName\tComposer\n",
|
||||
"1\tFor Those About To Rock (We Salute You)\tAngus Young, Malcolm Young, Brian Johnson\n",
|
||||
"2\tBalls to the Wall\tNone\n",
|
||||
"3\tMy favorite song ever\tThe coolest composer of all time\"\"\"\n",
|
||||
"3\tMy favorite song ever\tThe coolest composer of all time\n",
|
||||
"*/\"\"\"\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
@@ -492,11 +494,12 @@
|
||||
"\t\"Name\" NVARCHAR(120), \n",
|
||||
"\tPRIMARY KEY (\"PlaylistId\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"SELECT * FROM 'Playlist' LIMIT 2;\n",
|
||||
"/*\n",
|
||||
"2 rows from Playlist table:\n",
|
||||
"PlaylistId\tName\n",
|
||||
"1\tMusic\n",
|
||||
"2\tMovies\n",
|
||||
"*/\n",
|
||||
"\n",
|
||||
"CREATE TABLE Track (\n",
|
||||
"\t\"TrackId\" INTEGER NOT NULL, \n",
|
||||
@@ -504,12 +507,13 @@
|
||||
"\t\"Composer\" NVARCHAR(220),\n",
|
||||
"\tPRIMARY KEY (\"TrackId\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"SELECT * FROM 'Track' LIMIT 3;\n",
|
||||
"/*\n",
|
||||
"3 rows from Track table:\n",
|
||||
"TrackId\tName\tComposer\n",
|
||||
"1\tFor Those About To Rock (We Salute You)\tAngus Young, Malcolm Young, Brian Johnson\n",
|
||||
"2\tBalls to the Wall\tNone\n",
|
||||
"3\tMy favorite song ever\tThe coolest composer of all time\n"
|
||||
"3\tMy favorite song ever\tThe coolest composer of all time\n",
|
||||
"*/\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -675,7 +679,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -123,6 +123,40 @@
|
||||
"id": "05e9e2fe",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c43803d1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using PromptLayer Track\n",
|
||||
"If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantializing the PromptLayer LLM to get the request id. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b7d4db01",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = PromptLayerChatOpenAI(return_pl_id=True)\n",
|
||||
"chat_results = chat.generate([[HumanMessage(content=\"I am a cat and I want\")]])\n",
|
||||
"\n",
|
||||
"for res in chat_results.generations:\n",
|
||||
" pl_request_id = res[0].generation_info[\"pl_request_id\"]\n",
|
||||
" promptlayer.track.score(request_id=pl_request_id, score=100)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13e56507",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Using this allows you to track the performance of your model in the PromptLayer dashboard. If you are using a prompt template, you can attach a template to a request as well.\n",
|
||||
"Overall, this gives you the opportunity to track the performance of different templates and models in the PromptLayer dashboard."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -141,11 +175,11 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
"version": "3.8.8 (default, Apr 13 2021, 12:59:45) \n[Clang 10.0.0 ]"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "c4fe2cd85a8d9e8baaec5340ce66faff1c77581a9f43e6c45e85e09b6fced008"
|
||||
"hash": "8a5edab282632443219e051e4ade2d1d5bbc671c781051bf1437897cbdfea0f1"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -165,28 +165,6 @@
|
||||
"source": [
|
||||
"chain({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c91fdc8a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': ' The president honored Justice Stephen Breyer for his service.\\n',\n",
|
||||
" 'sources': '30-pl'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "522686de",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -36,7 +36,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -56,7 +56,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "76a6e7b0-e927-4bfb-a414-1332a4149106",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -68,7 +68,7 @@
|
||||
"AIMessage(content=\"J'aime programmer.\", additional_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -87,7 +87,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "ce16ad78-8e6f-48cd-954e-98be75eb5836",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -99,7 +99,7 @@
|
||||
"AIMessage(content=\"J'aime programmer.\", additional_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -122,7 +122,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "2b21fc52-74b6-4950-ab78-45d12c68fb4d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -131,10 +131,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output=None)"
|
||||
"LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -150,7 +150,39 @@
|
||||
" HumanMessage(content=\"Translate this sentence from English to French. I love artificial intelligence.\")\n",
|
||||
" ],\n",
|
||||
"]\n",
|
||||
"chat.generate(batch_messages)"
|
||||
"result = chat.generate(batch_messages)\n",
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2960f50f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can recover things like token usage from this LLMResult"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a6186bee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'token_usage': {'prompt_tokens': 71,\n",
|
||||
" 'completion_tokens': 18,\n",
|
||||
" 'total_tokens': 89}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result.llm_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
38
docs/modules/document_loaders/examples/blackboard.ipynb
Normal file
38
docs/modules/document_loaders/examples/blackboard.ipynb
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Blackboard\n",
|
||||
"\n",
|
||||
"This covers how to load data from a Blackboard Learn instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import BlackboardLoader\n",
|
||||
"\n",
|
||||
"loader = BlackboardLoader(\n",
|
||||
" blackboard_course_url=\"https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1\",\n",
|
||||
" bbrouter=\"expires:12345...\",\n",
|
||||
" load_all_recursively=True,\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -158,7 +158,72 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7874d01d",
|
||||
"id": "672733fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define a Partitioning Strategy\n",
|
||||
"\n",
|
||||
"Unstructured document loader allow users to pass in a `strategy` parameter that lets `unstructured` know how to partitioning the document. Currently supported strategies are `\"hi_res\"` (the default) and `\"fast\"`. Hi res partitioning strategies are more accurate, but take longer to process. Fast strategies partition the document more quickly, but trade-off accuracy. Not all document types have separate hi res and fast partitioning strategies. For those document types, the `strategy` kwarg is ignored. In some cases, the high res strategy will fallback to fast if there is a dependency missing (i.e. a model for document partitioning). You can see how to apply a strategy to an `UnstructuredFileLoader` below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "767238a4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9518b425",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\"layout-parser-paper-fast.pdf\", strategy=\"fast\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "645f29e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "60685353",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='1', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='0', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='n', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'Title'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8de9ef16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PDF Example\n",
|
||||
@@ -166,7 +231,6 @@
|
||||
"Processing PDF documents works exactly the same way. Unstructured detects the file type and extracts the same types of `elements`. "
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -225,7 +289,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8ca8a648",
|
||||
"id": "f52b04cb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -247,7 +311,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -55,12 +55,12 @@ There are a lot of different document loaders that LangChain supports. Below are
|
||||
|
||||
`Airbyte Json <./examples/airbyte_json.html>`_: A walkthrough of how to load data from a local Airbyte JSON file.
|
||||
|
||||
`Online PDF <./examples/online_pdf.html>`_: A walkthrough of how to load data from an online PDF.
|
||||
|
||||
`CoNLL-U <./examples/CoNLL-U.html>`_: A walkthrough of how to load data from a ConLL-U file.
|
||||
|
||||
`iFixit <./examples/ifixit.html>`_: A walkthrough of how to search and load data like guides, technical Q&A's, and device wikis from iFixit.com
|
||||
|
||||
`Blackboard <./examples/blackboard.html>`_: A walkthrough of how to load data from a Blackboard course.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
@@ -178,16 +178,16 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new GraphQAChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new GraphQAChain chain...\u001b[0m\n",
|
||||
"Entities Extracted:\n",
|
||||
"\u001B[32;1m\u001B[1;3m Intel\u001B[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m Intel\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001B[32;1m\u001B[1;3mIntel is going to build $20 billion semiconductor \"mega site\"\n",
|
||||
"\u001b[32;1m\u001b[1;3mIntel is going to build $20 billion semiconductor \"mega site\"\n",
|
||||
"Intel is building state-of-the-art factories\n",
|
||||
"Intel is creating 10,000 new good-paying jobs\n",
|
||||
"Intel is helping build Silicon Valley\u001B[0m\n",
|
||||
"Intel is helping build Silicon Valley\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,10 +205,76 @@
|
||||
"chain.run(\"what is Intel going to build?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "410aafa0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Save the graph\n",
|
||||
"We can also save and load the graph."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bc72cca0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph.write_to_gml(\"graph.gml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "652760ad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes.graph import NetworkxEntityGraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "eae591fe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loaded_graph = NetworkxEntityGraph.from_gml(\"graph.gml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9439d419",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('Intel', '$20 billion semiconductor \"mega site\"', 'is going to build'),\n",
|
||||
" ('Intel', 'state-of-the-art factories', 'is building'),\n",
|
||||
" ('Intel', '10,000 new good-paying jobs', 'is creating'),\n",
|
||||
" ('Intel', 'Silicon Valley', 'is helping build'),\n",
|
||||
" ('Field of dreams',\n",
|
||||
" \"America's future will be built\",\n",
|
||||
" 'is the ground on which')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loaded_graph.get_triples()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f70b9ada",
|
||||
"id": "045796cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
||||
@@ -635,7 +635,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.base import RegexParser\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"\n",
|
||||
"output_parser = RegexParser(\n",
|
||||
" regex=r\"(.*?)\\nScore: (.*)\",\n",
|
||||
@@ -732,4 +732,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -635,7 +635,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.base import RegexParser\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"\n",
|
||||
"output_parser = RegexParser(\n",
|
||||
" regex=r\"(.*?)\\nScore: (.*)\",\n",
|
||||
|
||||
@@ -19,20 +19,20 @@ to pass to the language model. This is implemented in LangChain as the `StuffDoc
|
||||
|
||||
**Cons:** Most LLMs have a context length, and for large documents (or many documents) this will not work as it will result in a prompt larger than the context length.
|
||||
|
||||
The main downside of this method is that it only works one smaller pieces of data. Once you are working
|
||||
The main downside of this method is that it only works on smaller pieces of data. Once you are working
|
||||
with many pieces of data, this approach is no longer feasible. The next two approaches are designed to help deal with that.
|
||||
|
||||
## Map Reduce
|
||||
This method involves an initial prompt on each chunk of data (for summarization tasks, this
|
||||
This method involves running an initial prompt on each chunk of data (for summarization tasks, this
|
||||
could be a summary of that chunk; for question-answering tasks, it could be an answer based solely on that chunk).
|
||||
Then a different prompt is run to combine all the initial outputs. This is implemented in the LangChain as the `MapReduceDocumentsChain`.
|
||||
|
||||
**Pros:** Can scale to larger documents (and more documents) than `StuffDocumentsChain`. The calls to the LLM on individual documents are independent and can therefore be parallelized.
|
||||
|
||||
**Cons:** Requires many more calls to the LLM than `StuffDocumentsChain`. Loses some information during the final combining call.
|
||||
**Cons:** Requires many more calls to the LLM than `StuffDocumentsChain`. Loses some information during the final combined call.
|
||||
|
||||
## Refine
|
||||
This method involves an initial prompt on the first chunk of data, generating some output.
|
||||
This method involves running an initial prompt on the first chunk of data, generating some output.
|
||||
For the remaining documents, that output is passed in, along with the next document,
|
||||
asking the LLM to refine the output based on the new document.
|
||||
|
||||
@@ -46,6 +46,6 @@ This method involves running an initial prompt on each chunk of data, that not o
|
||||
task but also gives a score for how certain it is in its answer. The responses are then
|
||||
ranked according to this score, and the highest score is returned.
|
||||
|
||||
**Pros:** Similar pros as `MapReduceDocumentsChain`. Compared to `MapReduceDocumentsChain`, it requires fewer calls.
|
||||
**Pros:** Similar pros as `MapReduceDocumentsChain`. Requires fewer calls, compared to `MapReduceDocumentsChain`.
|
||||
|
||||
**Cons:** Cannot combine information between documents. This means it is most useful when you expect there to be a single simple answer in a single document.
|
||||
|
||||
@@ -76,6 +76,131 @@
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "bb61bbeb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's load the OpenAI Embedding class with first generation models (e.g. text-search-ada-doc-001/text-search-ada-query-001). Note: These are not recommended models - see [here](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c0b072cc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a56b70f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings(model_name=\"ada\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "14aefb64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3c39ed33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e3221db6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c3852491",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AzureOpenAI\n",
|
||||
"\n",
|
||||
"Let's load the OpenAI Embedding class with environment variables set to indicate to use Azure endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1b40f827",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set the environment variables needed for openai package to know to reach out to azure\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_TYPE\"] = \"azure\"\n",
|
||||
"os.environ[\"OPENAI_API_BASE\"] = \"https://<your-endpoint.openai.azure.com/\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your AzureOpenAI key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bb36d16c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings(model=\"your-embeddings-deployment-name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "228abcbb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "60dd7fad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "83bc1a72",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "42f76e43",
|
||||
@@ -86,6 +211,13 @@
|
||||
"Let's load the Cohere Embedding class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "ca9e2b3a",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -103,7 +235,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = CohereEmbeddings(cohere_api_key= cohere_api_key)"
|
||||
"embeddings = CohereEmbeddings(cohere_api_key=cohere_api_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -290,7 +422,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings = HuggingFaceInstructEmbeddings(query_instruction=\"Represent the query for retrieval: \")"
|
||||
"embeddings = HuggingFaceInstructEmbeddings(\n",
|
||||
" query_instruction=\"Represent the query for retrieval: \"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -332,9 +466,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import (\n",
|
||||
" SelfHostedEmbeddings, \n",
|
||||
" SelfHostedHuggingFaceEmbeddings, \n",
|
||||
" SelfHostedHuggingFaceInstructEmbeddings\n",
|
||||
" SelfHostedEmbeddings,\n",
|
||||
" SelfHostedHuggingFaceEmbeddings,\n",
|
||||
" SelfHostedHuggingFaceInstructEmbeddings,\n",
|
||||
")\n",
|
||||
"import runhouse as rh"
|
||||
]
|
||||
@@ -353,7 +487,7 @@
|
||||
"# gpu = rh.cluster(name='rh-a10x', instance_type='g5.2xlarge', provider='aws')\n",
|
||||
"\n",
|
||||
"# For an existing cluster\n",
|
||||
"# gpu = rh.cluster(ips=['<ip of the cluster>'], \n",
|
||||
"# gpu = rh.cluster(ips=['<ip of the cluster>'],\n",
|
||||
"# ssh_creds={'ssh_user': '...', 'ssh_private_key':'<path_to_key>'},\n",
|
||||
"# name='my-cluster')"
|
||||
]
|
||||
@@ -424,16 +558,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_pipeline():\n",
|
||||
" from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # Must be inside the function in notebooks\n",
|
||||
" from transformers import (\n",
|
||||
" AutoModelForCausalLM,\n",
|
||||
" AutoTokenizer,\n",
|
||||
" pipeline,\n",
|
||||
" ) # Must be inside the function in notebooks\n",
|
||||
"\n",
|
||||
" model_id = \"facebook/bart-base\"\n",
|
||||
" tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
" model = AutoModelForCausalLM.from_pretrained(model_id)\n",
|
||||
" return pipeline(\"feature-extraction\", model=model, tokenizer=tokenizer)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def inference_fn(pipeline, prompt):\n",
|
||||
" # Return last hidden state of the model\n",
|
||||
" if isinstance(prompt, list):\n",
|
||||
" return [emb[0][-1] for emb in pipeline(prompt)] \n",
|
||||
" return [emb[0][-1] for emb in pipeline(prompt)]\n",
|
||||
" return pipeline(prompt)[0][-1]"
|
||||
]
|
||||
},
|
||||
@@ -445,10 +585,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = SelfHostedEmbeddings(\n",
|
||||
" model_load_fn=get_pipeline, \n",
|
||||
" model_load_fn=get_pipeline,\n",
|
||||
" hardware=gpu,\n",
|
||||
" model_reqs=[\"./\", \"torch\", \"transformers\"],\n",
|
||||
" inference_fn=inference_fn\n",
|
||||
" inference_fn=inference_fn,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -176,6 +176,77 @@
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a2f572e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Latex Text Splitter\n",
|
||||
"\n",
|
||||
"LatexTextSplitter splits text along Latex headings, headlines, enumerations and more. It's implemented as a simple subclass of RecursiveCharacterSplitter with Latex-specific separators. See the source code to see the Latex syntax expected by default.\n",
|
||||
"\n",
|
||||
"1. How the text is split: by list of latex specific tags\n",
|
||||
"2. How the chunk size is measured: by length function passed in (defaults to number of characters)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c2503917",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import LatexTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e46b753b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"latex_text = \"\"\"\n",
|
||||
"\\documentclass{article}\n",
|
||||
"\n",
|
||||
"\\begin{document}\n",
|
||||
"\n",
|
||||
"\\maketitle\n",
|
||||
"\n",
|
||||
"\\section{Introduction}\n",
|
||||
"Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis.\n",
|
||||
"\n",
|
||||
"\\subsection{History of LLMs}\n",
|
||||
"The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance.\n",
|
||||
"\n",
|
||||
"\\subsection{Applications of LLMs}\n",
|
||||
"LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics.\n",
|
||||
"\n",
|
||||
"\\end{document}\n",
|
||||
"\"\"\"\n",
|
||||
"latex_splitter = LatexTextSplitter(chunk_size=400, chunk_overlap=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "73b5bd33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = latex_splitter.create_documents([latex_text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e1c7fbd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c350765d",
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"id": "07c1e3b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next in the generic setup, let's specify the document loader we want to use."
|
||||
"Next in the generic setup, let's specify the document loader we want to use. You can download the `state_of_the_union.txt` file [here](https://github.com/hwchase17/langchain/blob/master/docs/modules/state_of_the_union.txt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -366,7 +366,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -52,6 +52,8 @@ In the below guides, we cover different types of vectorstores and how to use the
|
||||
|
||||
`Weaviate <./vectorstore_examples/weaviate.html>`_: A walkthrough of how to use the Weaviate vectorstore wrapper.
|
||||
|
||||
`PGVector <./vectorstore_examples/pgvector.html>`_: A walkthrough of how to use the PGVector (Postgres Vector DB) vectorstore wrapper.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
194
docs/modules/indexes/vectorstore_examples/pgvector.ipynb
Normal file
194
docs/modules/indexes/vectorstore_examples/pgvector.ipynb
Normal file
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PGVector\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Postgres vector database (PGVector)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## Loading Environment Variables\n",
|
||||
"from typing import List, Tuple\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores.pgvector import PGVector\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.docstore.document import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## PGVector needs the connection string to the database.\n",
|
||||
"## We will load it from the environment variables.\n",
|
||||
"import os\n",
|
||||
"CONNECTION_STRING = PGVector.connection_string_from_db_params(\n",
|
||||
" driver=os.environ.get(\"PGVECTOR_DRIVER\", \"psycopg2\"),\n",
|
||||
" host=os.environ.get(\"PGVECTOR_HOST\", \"localhost\"),\n",
|
||||
" port=int(os.environ.get(\"PGVECTOR_PORT\", \"5432\")),\n",
|
||||
" database=os.environ.get(\"PGVECTOR_DATABASE\", \"postgres\"),\n",
|
||||
" user=os.environ.get(\"PGVECTOR_USER\", \"postgres\"),\n",
|
||||
" password=os.environ.get(\"PGVECTOR_PASSWORD\", \"postgres\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Example\n",
|
||||
"# postgresql+psycopg2://username:password@localhost:5432/database_name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity search with score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search with Euclidean Distance (Default)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The PGVector Module will try to create a table with the name of the collection. So, make sure that the collection name is unique and the user has the \n",
|
||||
"# permission to create a table.\n",
|
||||
"\n",
|
||||
"db = PGVector.from_documents(\n",
|
||||
" embedding=embeddings,\n",
|
||||
" documents=docs,\n",
|
||||
" collection_name=\"state_of_the_union\",\n",
|
||||
" connection_string=CONNECTION_STRING,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs_with_score: List[Tuple[Document, float]] = db.similarity_search_with_score(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076628081132506\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076628081132506\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076804780049968\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.6076804780049968\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for doc, score in docs_with_score:\n",
|
||||
" print(\"-\" * 80)\n",
|
||||
" print(\"Score: \", score)\n",
|
||||
" print(doc.page_content)\n",
|
||||
" print(\"-\" * 80)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
204
docs/modules/indexes/vectorstore_examples/redis.ipynb
Normal file
204
docs/modules/indexes/vectorstore_examples/redis.ipynb
Normal file
@@ -0,0 +1,204 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Redis\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Redis database."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores.redis import Redis"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rds = Redis.from_documents(docs, embeddings,redis_url=\"redis://localhost:6379\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'b564189668a343648996bd5a1d353d4e'"
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rds.index_name"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"results = rds.similarity_search(query)\n",
|
||||
"print(results[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['doc:333eadf75bd74be393acafa8bca48669']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(rds.add_texts([\"Ankush went to Princeton\"]))"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ankush went to Princeton\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"Princeton\"\n",
|
||||
"results = rds.similarity_search(query)\n",
|
||||
"print(results[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -119,10 +119,39 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "05e9e2fe",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
"source": [
|
||||
"## Using PromptLayer Track\n",
|
||||
"If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantializing the PromptLayer LLM to get the request id. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1a7315b9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = PromptLayerOpenAI(return_pl_id=True)\n",
|
||||
"llm_results = llm.generate([\"Tell me a joke\"])\n",
|
||||
"\n",
|
||||
"for res in llm_results.generations:\n",
|
||||
" pl_request_id = res[0].generation_info[\"pl_request_id\"]\n",
|
||||
" promptlayer.track.score(request_id=pl_request_id, score=100)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7eb19139",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Using this allows you to track the performance of your model in the PromptLayer dashboard. If you are using a prompt template, you can attach a template to a request as well.\n",
|
||||
"Overall, this gives you the opportunity to track the performance of different templates and models in the PromptLayer dashboard."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -145,7 +174,7 @@
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "c4fe2cd85a8d9e8baaec5340ce66faff1c77581a9f43e6c45e85e09b6fced008"
|
||||
"hash": "8a5edab282632443219e051e4ade2d1d5bbc671c781051bf1437897cbdfea0f1"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
131
docs/modules/llms/integrations/sagemaker.ipynb
Normal file
131
docs/modules/llms/integrations/sagemaker.ipynb
Normal file
@@ -0,0 +1,131 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SageMakerEndpoint\n",
|
||||
"\n",
|
||||
"This notebooks goes over how to use an LLM hosted on a SageMaker endpoint."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip3 install langchain boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_doc_1 = \"\"\"\n",
|
||||
"Peter and Elizabeth took a taxi to attend the night party in the city. While in the party, Elizabeth collapsed and was rushed to the hospital.\n",
|
||||
"Since she was diagnosed with a brain injury, the doctor told Peter to stay besides her until she gets well.\n",
|
||||
"Therefore, Peter stayed with her at the hospital for 3 days without leaving.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=example_doc_1,\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Dict\n",
|
||||
"\n",
|
||||
"from langchain import PromptTemplate, SagemakerEndpoint\n",
|
||||
"from langchain.llms.sagemaker_endpoint import ContentHandlerBase\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"query = \"\"\"How long was Elizabeth hospitalized?\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"Use the following pieces of context to answer the question at the end.\n",
|
||||
"\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" template=prompt_template, input_variables=[\"context\", \"question\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"class ContentHandler(ContentHandlerBase):\n",
|
||||
" content_type = \"application/json\"\n",
|
||||
" accepts = \"application/json\"\n",
|
||||
"\n",
|
||||
" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:\n",
|
||||
" input_str = json.dumps({prompt: prompt, **model_kwargs})\n",
|
||||
" return input_str.encode('utf-8')\n",
|
||||
" \n",
|
||||
" def transform_output(self, output: bytes) -> str:\n",
|
||||
" response_json = json.loads(output.read().decode(\"utf-8\"))\n",
|
||||
" return response_json[0][\"generated_text\"]\n",
|
||||
"\n",
|
||||
"content_handler = ContentHandler()\n",
|
||||
"\n",
|
||||
"chain = load_qa_chain(\n",
|
||||
" llm=SagemakerEndpoint(\n",
|
||||
" endpoint_name=\"endpoint-name\", \n",
|
||||
" credentials_profile_name=\"credentials-profile-name\", \n",
|
||||
" region_name=\"us-west-2\", \n",
|
||||
" model_kwargs={\"temperature\":1e-10},\n",
|
||||
" content_handler=content_handler\n",
|
||||
" ),\n",
|
||||
" prompt=PROMPT\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -30,36 +30,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ChatMessageHistory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4404d509",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"history = ChatMessageHistory()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "78c1a67b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"history.add_user_message(\"hi!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "525ce606",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"\n",
|
||||
"history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"history.add_ai_message(\"whats up?\")"
|
||||
]
|
||||
},
|
||||
@@ -331,6 +307,99 @@
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fb68bb9e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Saving Message History\n",
|
||||
"\n",
|
||||
"You may often to save messages, and then load them to use again. This can be done easily by first converting the messages to normal python dictionaries, saving those (as json or something) and then loading those. Here is an example of doing that."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b5acbc4b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"from langchain.schema import messages_from_dict, messages_to_dict\n",
|
||||
"\n",
|
||||
"history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"history.add_ai_message(\"whats up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7812ee21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dicts = messages_to_dict(history.messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3ed6e6a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'type': 'human', 'data': {'content': 'hi!', 'additional_kwargs': {}}},\n",
|
||||
" {'type': 'ai', 'data': {'content': 'whats up?', 'additional_kwargs': {}}}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dicts"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cdf4ebd2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"new_messages = messages_from_dict(dicts)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9724e24b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!', additional_kwargs={}),\n",
|
||||
" AIMessage(content='whats up?', additional_kwargs={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"new_messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7826c210",
|
||||
|
||||
@@ -9,7 +9,7 @@ both at a short term but also at a long term level. The concept of "Memory" exis
|
||||
One of the simpler forms of memory occurs in chatbots, where they remember previous conversations.
|
||||
There are a few different ways to accomplish this:
|
||||
- Buffer: This is just passing in the past `N` interactions in as context. `N` can be chosen based on a fixed number, the length of the interactions, or other!
|
||||
- Summary: This involves summarizing previous conversations and passing that summary in, instead of the raw dialouge itself. Compared to `Buffer`, this compresses information: meaning it is more lossy, but also less likely to run into context length limits.
|
||||
- Summary: This involves summarizing previous conversations and passing that summary in, instead of the raw dialogue itself. Compared to `Buffer`, this compresses information: meaning it is more lossy, but also less likely to run into context length limits.
|
||||
- Combination: A combination of the above two approaches, where you compute a summary but also pass in some previous interactions directly!
|
||||
|
||||
## Entity Memory
|
||||
|
||||
288
docs/modules/memory/types/token_buffer.ipynb
Normal file
288
docs/modules/memory/types/token_buffer.ipynb
Normal file
@@ -0,0 +1,288 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff4be5f3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationTokenBufferMemory\n",
|
||||
"\n",
|
||||
"`ConversationTokenBufferMemory` keeps a buffer of recent interactions in memory, and uses token length rather than number of interactions to determine when to flush interactions.\n",
|
||||
"\n",
|
||||
"Let's first walk through how to use the utilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "da3384db",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationTokenBufferMemory\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e00d4938",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2fe28a28",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'Human: not much you\\nAI: not much'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf57b97a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also get the history as a list of messages (this is useful if you are using this with a chat model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3422a3a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10, return_messages=True)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6d2569f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using in a chain\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ebd68c10",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great, just enjoying the day. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" # We set a very low max_token_limit for the purposes of testing.\n",
|
||||
" memory=ConversationTokenBufferMemory(llm=OpenAI(), max_token_limit=60),\n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "86207a61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great, just enjoying the day. How about you?\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Sounds like a productive day! What kind of documentation are you writing?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Just working on writing some documentation!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "76a0ab39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great, just enjoying the day. How about you?\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI: Sounds like a productive day! What kind of documentation are you writing?\n",
|
||||
"Human: For LangChain! Have you heard of it?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Yes, I have heard of LangChain! It is a decentralized language-learning platform that connects native speakers and learners in real time. Is that the documentation you're writing about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"For LangChain! Have you heard of it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "8c669db1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: For LangChain! Have you heard of it?\n",
|
||||
"AI: Yes, I have heard of LangChain! It is a decentralized language-learning platform that connects native speakers and learners in real time. Is that the documentation you're writing about?\n",
|
||||
"Human: Haha nope, although a lot of people confuse it for that\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Oh, I see. Is there another language learning platform you're referring to?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see here that the buffer is updated\n",
|
||||
"conversation_with_summary.predict(input=\"Haha nope, although a lot of people confuse it for that\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8c09a239",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -21,16 +21,17 @@
|
||||
"id": "5d56ce86",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a custom prompt template\n",
|
||||
"## Creating a Custom Prompt Template\n",
|
||||
"\n",
|
||||
"The only two requirements for all prompt templates are:\n",
|
||||
"There are essentially two distinct prompt templates available - string prompt templates and chat prompt templates. String prompt templates provides a simple prompt in string format, while chat prompt templates produces a more structured prompt to be used with a chat API.\n",
|
||||
"\n",
|
||||
"1. They have a input_variables attribute that exposes what input variables this prompt template expects.\n",
|
||||
"2. They expose a format method which takes in keyword arguments corresponding to the expected input_variables and returns the formatted prompt.\n",
|
||||
"In this guide, we will create a custom prompt using a string prompt template. \n",
|
||||
"\n",
|
||||
"Let's create a custom prompt template that takes in the function name as input, and formats the prompt template to provide the source code of the function.\n",
|
||||
"To create a custom string prompt template, there are two requirements:\n",
|
||||
"1. It has an input_variables attribute that exposes what input variables the prompt template expects.\n",
|
||||
"2. It exposes a format method that takes in keyword arguments corresponding to the expected input_variables and returns the formatted prompt.\n",
|
||||
"\n",
|
||||
"First, let's create a function that will return the source code of a function given its name."
|
||||
"We will create a custom prompt template that takes in the function name as input and formats the prompt to provide the source code of the function. To achieve this, let's first create a function that will return the source code of a function given its name."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -62,11 +63,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import BasePromptTemplate\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from pydantic import BaseModel, validator\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FunctionExplainerPromptTemplate(BasePromptTemplate, BaseModel):\n",
|
||||
"class FunctionExplainerPromptTemplate(StringPromptTemplate, BaseModel):\n",
|
||||
" \"\"\" A custom prompt template that takes in the function name as input, and formats the prompt template to provide the source code of the function. \"\"\"\n",
|
||||
"\n",
|
||||
" @validator(\"input_variables\")\n",
|
||||
|
||||
465
docs/modules/prompts/examples/output_parsers.ipynb
Normal file
465
docs/modules/prompts/examples/output_parsers.ipynb
Normal file
@@ -0,0 +1,465 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "084ee2f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Output Parsers\n",
|
||||
"\n",
|
||||
"Language models output text. But many times you may want to get more structured information than just text back. This is where output parsers come in.\n",
|
||||
"\n",
|
||||
"Output parsers are classes that help structure language model responses. There are two main methods an output parser must implement:\n",
|
||||
"\n",
|
||||
"- `get_format_instructions() -> str`: A method which returns a string containing instructions for how the output of a language model should be formatted.\n",
|
||||
"- `parse(str) -> Any`: A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.\n",
|
||||
"\n",
|
||||
"Below we go over some examples of output parsers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5f0c8a33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a1ae632a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PydanticOutputParser\n",
|
||||
"This output parser allows users to specify an arbitrary JSON schema and query LLMs for JSON outputs that conform to that schema.\n",
|
||||
"\n",
|
||||
"Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed JSON. In the OpenAI family, DaVinci can do reliably but Curie's ability already drops off dramatically. \n",
|
||||
"\n",
|
||||
"Use Pydantic to declare your data model. Pydantic's BaseModel like a Python dataclass, but with actual type checking + coercion."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cba6d8e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from pydantic import BaseModel, Field, validator\n",
|
||||
"from typing import List"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0a203100",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = 'text-davinci-003'\n",
|
||||
"temperature = 0.0\n",
|
||||
"model = OpenAI(model_name=model_name, temperature=temperature)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "b3f16168",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why did the chicken cross the playground?', punchline='To get to the other slide!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Define your desired data structure.\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
" setup: str = Field(description=\"question to set up a joke\")\n",
|
||||
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
|
||||
" \n",
|
||||
" # You can add custom validation logic easily with Pydantic.\n",
|
||||
" @validator('setup')\n",
|
||||
" def question_ends_with_question_mark(cls, field):\n",
|
||||
" if field[-1] != '?':\n",
|
||||
" raise ValueError(\"Badly formed question!\")\n",
|
||||
" return field\n",
|
||||
"\n",
|
||||
"# And a query intented to prompt a language model to populate the data structure.\n",
|
||||
"joke_query = \"Tell me a joke.\"\n",
|
||||
"\n",
|
||||
"# Set up a parser + inject instructions into the prompt template.\n",
|
||||
"parser = PydanticOutputParser(pydantic_object=Joke)\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
|
||||
" input_variables=[\"query\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser.get_format_instructions()}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"_input = prompt.format_prompt(query=joke_query)\n",
|
||||
"\n",
|
||||
"output = model(_input.to_string())\n",
|
||||
"\n",
|
||||
"parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "03049f88",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Actor(name='Tom Hanks', film_names=['Forrest Gump', 'Saving Private Ryan', 'The Green Mile', 'Cast Away', 'Toy Story', 'A League of Their Own'])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Here's another example, but with a compound typed field.\n",
|
||||
"class Actor(BaseModel):\n",
|
||||
" name: str = Field(description=\"name of an actor\")\n",
|
||||
" film_names: List[str] = Field(description=\"list of names of films they starred in\")\n",
|
||||
" \n",
|
||||
"actor_query = \"Generate the filmography for a random actor.\"\n",
|
||||
"\n",
|
||||
"parser = PydanticOutputParser(pydantic_object=Actor)\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
|
||||
" input_variables=[\"query\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser.get_format_instructions()}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"_input = prompt.format_prompt(query=actor_query)\n",
|
||||
"\n",
|
||||
"output = model(_input.to_string())\n",
|
||||
"\n",
|
||||
"parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61f67890",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91871002",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Structured Output Parser\n",
|
||||
"\n",
|
||||
"While the Pydantic/JSON parser is more powerful, we initially experimented data structures having text fields only."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b492997a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import StructuredOutputParser, ResponseSchema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09473dce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we define the response schema we want to receive."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "432ac44a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response_schemas = [\n",
|
||||
" ResponseSchema(name=\"answer\", description=\"answer to the user's question\"),\n",
|
||||
" ResponseSchema(name=\"source\", description=\"source used to answer the user's question, should be a website.\")\n",
|
||||
"]\n",
|
||||
"output_parser = StructuredOutputParser.from_response_schemas(response_schemas)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7b92ce96",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now get a string that contains instructions for how the response should be formatted, and we then insert that into our prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "593cfc25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"format_instructions = output_parser.get_format_instructions()\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"answer the users question as best as possible.\\n{format_instructions}\\n{question}\",\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" partial_variables={\"format_instructions\": format_instructions}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0943e783",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now use this to format a prompt to send to the language model, and then parse the returned result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "106f1ba6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "86d9d24f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"_input = prompt.format_prompt(question=\"what's the capital of france\")\n",
|
||||
"output = model(_input.to_string())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "956bdc99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': 'Paris', 'source': 'https://en.wikipedia.org/wiki/Paris'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output_parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "da639285",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And here's an example of using this in a chat model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "8f483d7d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_model = ChatOpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "f761cbf1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" messages=[\n",
|
||||
" HumanMessagePromptTemplate.from_template(\"answer the users question as best as possible.\\n{format_instructions}\\n{question}\") \n",
|
||||
" ],\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" partial_variables={\"format_instructions\": format_instructions}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "edd73ae3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"_input = prompt.format_prompt(question=\"what's the capital of france\")\n",
|
||||
"output = chat_model(_input.to_messages())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "a3c8b91e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': 'Paris', 'source': 'https://en.wikipedia.org/wiki/Paris'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output_parser.parse(output.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9936fa27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## CommaSeparatedListOutputParser\n",
|
||||
"\n",
|
||||
"Here's another parser strictly less powerful than Pydantic/JSON parsing."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "872246d7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import CommaSeparatedListOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "c3f9aee6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output_parser = CommaSeparatedListOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "e77871b7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"format_instructions = output_parser.get_format_instructions()\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"List five {subject}.\\n{format_instructions}\",\n",
|
||||
" input_variables=[\"subject\"],\n",
|
||||
" partial_variables={\"format_instructions\": format_instructions}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "a71cb5d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "783d7d98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"_input = prompt.format(subject=\"ice cream flavors\")\n",
|
||||
"output = model(_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "fcb81344",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Vanilla',\n",
|
||||
" 'Chocolate',\n",
|
||||
" 'Strawberry',\n",
|
||||
" 'Mint Chocolate Chip',\n",
|
||||
" 'Cookies and Cream']"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output_parser.parse(output)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -120,6 +120,25 @@
|
||||
"!cat simple_prompt.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "de75e959",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = load_prompt(\"simple_prompt.json\")\n",
|
||||
"print(prompt.format(adjective=\"funny\", content=\"chickens\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1d788f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Tell me a funny joke about chickens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d788a83c",
|
||||
|
||||
@@ -32,3 +32,4 @@ The user guide here shows more advanced workflows and how to use the library in
|
||||
./examples/prompt_serialization.ipynb
|
||||
./examples/few_shot_examples_data.ipynb
|
||||
./examples/example_selectors.ipynb
|
||||
./examples/output_parsers.ipynb
|
||||
|
||||
@@ -121,7 +121,8 @@
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
326
docs/modules/utils/examples/zapier.ipynb
Normal file
326
docs/modules/utils/examples/zapier.ipynb
Normal file
@@ -0,0 +1,326 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "16763ed3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Zapier Natural Language Actions API\n",
|
||||
"\\\n",
|
||||
"Full docs here: https://nla.zapier.com/api/v1/dynamic/docs\n",
|
||||
"\n",
|
||||
"**Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface.\n",
|
||||
"\n",
|
||||
"NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets, Microsoft Teams, and thousands more apps: https://zapier.com/apps\n",
|
||||
"\n",
|
||||
"Zapier NLA handles ALL the underlying API auth and translation from natural language --> underlying API call --> return simplified output for LLMs. The key idea is you, or your users, expose a set of actions via an oauth-like setup window, which you can then query and execute via a REST API.\n",
|
||||
"\n",
|
||||
"NLA offers both API Key and OAuth for signing NLA API requests.\n",
|
||||
"\n",
|
||||
"1. Server-side (API Key): for quickly getting started, testing, and production scenarios where LangChain will only use actions exposed in the developer's Zapier account (and will use the developer's connected accounts on Zapier.com)\n",
|
||||
"\n",
|
||||
"2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com\n",
|
||||
"\n",
|
||||
"This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/api/v1/dynamic/docs) or reach out to nla@zapier.com for user-facing oauth developer support.\n",
|
||||
"\n",
|
||||
"This example goes over how to use the Zapier integration with a `SimpleSequentialChain`, then an `Agent`.\n",
|
||||
"In code, below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a363309c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5cf33377",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# get from https://platform.openai.com/\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\", \"\")\n",
|
||||
"\n",
|
||||
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in): \n",
|
||||
"os.environ[\"ZAPIER_NLA_API_KEY\"] = os.environ.get(\"ZAPIER_NLA_API_KEY\", \"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4881b484-1b97-478f-b206-aec407ceff66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example with Agent\n",
|
||||
"Zapier tools can be used with an agent. See the example below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b2044b17-c941-4ffb-8a03-027a35e2df81",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.agents.agent_toolkits import ZapierToolkit\n",
|
||||
"from langchain.utilities.zapier import ZapierNLAWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7b505eeb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 0. expose gmail 'find email' and slack 'send channel message' actions\n",
|
||||
"\n",
|
||||
"# first go here, log in, expose (enable) the two actions: https://nla.zapier.com/demo/start -- for this example, can leave all fields \"Have AI guess\"\n",
|
||||
"# in an oauth scenario, you'd get your own <provider> id (instead of 'demo') which you route your users through first"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "cab18227-c232-4214-9256-bb8dd352266c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"zapier = ZapierNLAWrapper()\n",
|
||||
"toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n",
|
||||
"agent = initialize_agent(toolkit.get_tools(), llm, agent=\"zero-shot-react-description\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "f94713de-b64d-465f-a087-00288b5f80ec",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find the email and summarize it.\n",
|
||||
"Action: Gmail: Find Email\n",
|
||||
"Action Input: Find the latest email from Silicon Valley Bank\u001b[0m\n",
|
||||
"Observation: \u001b[31;1m\u001b[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to summarize the email and send it to the #test-zapier channel in Slack.\n",
|
||||
"Action: Slack: Send Channel Message\n",
|
||||
"Action Input: Send a slack message to the #test-zapier channel with the text \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m{\"message__text\": \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\", \"message__permalink\": \"https://langchain.slack.com/archives/C04TSGU0RA7/p1678859932375259\", \"channel\": \"C04TSGU0RA7\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:58:52Z\", \"message__bot_profile__icons__image_36\": \"https://avatars.slack-edge.com/2022-08-02/3888649620612_f864dc1bb794cf7d82b0_36.png\", \"message__blocks[]block_id\": \"kdZZ\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: I have sent a summary of the last email from Silicon Valley Bank to the #test-zapier channel in Slack.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I have sent a summary of the last email from Silicon Valley Bank to the #test-zapier channel in Slack.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bcdea831",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Example with SimpleSequentialChain\n",
|
||||
"If you need more explicit control, use a chain, like below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "10a46e7e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import LLMChain, TransformChain, SimpleSequentialChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.tools.zapier.tool import ZapierNLARunAction\n",
|
||||
"from langchain.utilities.zapier import ZapierNLAWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "b9358048",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 0. expose gmail 'find email' and slack 'send direct message' actions\n",
|
||||
"\n",
|
||||
"# first go here, log in, expose (enable) the two actions: https://nla.zapier.com/demo/start -- for this example, can leave all fields \"Have AI guess\"\n",
|
||||
"# in an oauth scenario, you'd get your own <provider> id (instead of 'demo') which you route your users through first\n",
|
||||
"\n",
|
||||
"actions = ZapierNLAWrapper().list()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "4e80f461",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 1. gmail find email\n",
|
||||
"\n",
|
||||
"GMAIL_SEARCH_INSTRUCTIONS = \"Grab the latest email from Silicon Valley Bank\"\n",
|
||||
"\n",
|
||||
"def nla_gmail(inputs):\n",
|
||||
" action = next((a for a in actions if a[\"description\"].startswith(\"Gmail: Find Email\")), None)\n",
|
||||
" return {\"email_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(inputs[\"instructions\"])}\n",
|
||||
"gmail_chain = TransformChain(input_variables=[\"instructions\"], output_variables=[\"email_data\"], transform=nla_gmail)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "46893233",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 2. generate draft reply\n",
|
||||
"\n",
|
||||
"template = \"\"\"You are an assisstant who drafts replies to an incoming email. Output draft reply in plain text (not JSON).\n",
|
||||
"\n",
|
||||
"Incoming email:\n",
|
||||
"{email_data}\n",
|
||||
"\n",
|
||||
"Draft email reply:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"email_data\"], template=template)\n",
|
||||
"reply_chain = LLMChain(llm=OpenAI(temperature=.7), prompt=prompt_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "cd85c4f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## step 3. send draft reply via a slack direct message\n",
|
||||
"\n",
|
||||
"SLACK_HANDLE = \"@Ankush Gola\"\n",
|
||||
"\n",
|
||||
"def nla_slack(inputs):\n",
|
||||
" action = next((a for a in actions if a[\"description\"].startswith(\"Slack: Send Direct Message\")), None)\n",
|
||||
" instructions = f'Send this to {SLACK_HANDLE} in Slack: {inputs[\"draft_reply\"]}'\n",
|
||||
" return {\"slack_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(instructions)}\n",
|
||||
"slack_chain = TransformChain(input_variables=[\"draft_reply\"], output_variables=[\"slack_data\"], transform=nla_slack)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "4829cab4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SimpleSequentialChain chain...\u001b[0m\n",
|
||||
"\u001b[36;1m\u001b[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3m\n",
|
||||
"Dear Silicon Valley Bridge Bank, \n",
|
||||
"\n",
|
||||
"Thank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \n",
|
||||
"\n",
|
||||
"Best regards, \n",
|
||||
"[Your Name]\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3m{\"message__text\": \"Dear Silicon Valley Bridge Bank, \\n\\nThank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \\n\\nBest regards, \\n[Your Name]\", \"message__permalink\": \"https://langchain.slack.com/archives/D04TKF5BBHU/p1678859968241629\", \"channel\": \"D04TKF5BBHU\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:59:28Z\", \"message__blocks[]block_id\": \"p7i\", \"message__blocks[]elements[]elements[]type\": \"[['text']]\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'{\"message__text\": \"Dear Silicon Valley Bridge Bank, \\\\n\\\\nThank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \\\\n\\\\nBest regards, \\\\n[Your Name]\", \"message__permalink\": \"https://langchain.slack.com/archives/D04TKF5BBHU/p1678859968241629\", \"channel\": \"D04TKF5BBHU\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:59:28Z\", \"message__blocks[]block_id\": \"p7i\", \"message__blocks[]elements[]elements[]type\": \"[[\\'text\\']]\", \"message__blocks[]elements[]type\": \"[\\'rich_text_section\\']\"}'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"## finally, execute\n",
|
||||
"\n",
|
||||
"overall_chain = SimpleSequentialChain(chains=[gmail_chain, reply_chain, slack_chain], verbose=True)\n",
|
||||
"overall_chain.run(GMAIL_SEARCH_INSTRUCTIONS)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "09ff954e-45f2-4595-92ea-91627abde4a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -9,3 +9,4 @@ sphinx-typlog-theme==0.8.0
|
||||
sphinx-panels
|
||||
toml
|
||||
myst_nb
|
||||
sphinx_copybutton
|
||||
|
||||
@@ -1,9 +1,85 @@
|
||||
Evaluation
|
||||
==============
|
||||
|
||||
Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
|
||||
This section of documentation covers how we approach and think about evaluation in LangChain.
|
||||
Both evaluation of internal chains/agents, but also how we would recommend people building on top of LangChain approach evaluation.
|
||||
|
||||
The examples here all highlight how to use language models to assist in evaluation of themselves.
|
||||
The Problem
|
||||
-----------
|
||||
|
||||
It can be really hard to evaluate LangChain chains and agents.
|
||||
There are two main reasons for this:
|
||||
|
||||
**# 1: Lack of data**
|
||||
|
||||
You generally don't have a ton of data to evaluate your chains/agents over before starting a project.
|
||||
This is usually because Large Language Models (the core of most chains/agents) are terrific few-shot and zero shot learners,
|
||||
meaning you are almost always able to get started on a particular task (text-to-SQL, question answering, etc) without
|
||||
a large dataset of examples.
|
||||
This is in stark contrast to traditional machine learning where you had to first collect a bunch of datapoints
|
||||
before even getting started using a model.
|
||||
|
||||
**# 2: Lack of metrics**
|
||||
|
||||
Most chains/agents are performing tasks for which there are not very good metrics to evaluate performance.
|
||||
For example, one of the most common use cases is generating text of some form.
|
||||
Evaluating generated text is much more complicated than evaluating a classification prediction, or a numeric prediction.
|
||||
|
||||
The Solution
|
||||
------------
|
||||
|
||||
LangChain attempts to tackle both of those issues.
|
||||
What we have so far are initial passes at solutions - we do not think we have a perfect solution.
|
||||
So we very much welcome feedback, contributions, integrations, and thoughts on this.
|
||||
|
||||
Here is what we have for each problem so far:
|
||||
|
||||
**# 1: Lack of data**
|
||||
|
||||
We have started `LangChainDatasets <https://huggingface.co/LangChainDatasets>`_ a Community space on Hugging Face.
|
||||
We intend this to be a collection of open source datasets for evaluating common chains and agents.
|
||||
We have contributed five datasets of our own to start, but we highly intend this to be a community effort.
|
||||
In order to contribute a dataset, you simply need to join the community and then you will be able to upload datasets.
|
||||
|
||||
We're also aiming to make it as easy as possible for people to create their own datasets.
|
||||
As a first pass at this, we've added a QAGenerationChain, which given a document comes up
|
||||
with question-answer pairs that can be used to evaluate question-answering tasks over that document down the line.
|
||||
See `this notebook <./evaluation/qa_generation.html>`_ for an example of how to use this chain.
|
||||
|
||||
**# 2: Lack of metrics**
|
||||
|
||||
We have two solutions to the lack of metrics.
|
||||
|
||||
The first solution is to use no metrics, and rather just rely on looking at results by eye to get a sense for how the chain/agent is performing.
|
||||
To assist in this, we have developed (and will continue to develop) `tracing <../tracing.html>`_, a UI-based visualizer of your chain and agent runs.
|
||||
|
||||
The second solution we recommend is to use Language Models themselves to evaluate outputs.
|
||||
For this we have a few different chains and prompts aimed at tackling this issue.
|
||||
|
||||
The Examples
|
||||
------------
|
||||
|
||||
We have created a bunch of examples combining the above two solutions to show how we internally evaluate chains and agents when we are developing.
|
||||
In addition to the examples we've curated, we also highly welcome contributions here.
|
||||
To facilitate that, we've included a `template notebook <./evaluation/benchmarking_template.html>`_ for community members to use to build their own examples.
|
||||
|
||||
The existing examples we have are:
|
||||
|
||||
`Question Answering (State of Union) <./evaluation/qa_benchmarking_sota.html>`_: An notebook showing evaluation of a question-answering task over a State-of-the-Union address.
|
||||
|
||||
`Question Answering (Paul Graham Essay) <./evaluation/qa_benchmarking_pg.html>`_: An notebook showing evaluation of a question-answering task over a Paul Graham essay.
|
||||
|
||||
`SQL Question Answering (Chinook) <./evaluation/sql_qa_benchmarking_chinook.html>`_: An notebook showing evaluation of a question-answering task over a SQL database (the Chinook database).
|
||||
|
||||
`Agent Vectorstore <./evaluation/agent_vectordb_sota_pg.html>`_: An notebook showing evaluation of an agent doing question answering while routing between two different vector databases.
|
||||
|
||||
`Agent Search + Calculator <./evaluation/agent_benchmarking.html>`_: An notebook showing evaluation of an agent doing question answering using a Search engine and a Calculator as tools.
|
||||
|
||||
|
||||
Other Examples
|
||||
------------
|
||||
|
||||
In addition, we also have some more generic resources for evaluation.
|
||||
|
||||
`Question Answering <./evaluation/question_answering.html>`_: An overview of LLMs aimed at evaluating question answering systems in general.
|
||||
|
||||
|
||||
343
docs/use_cases/evaluation/agent_benchmarking.ipynb
Normal file
343
docs/use_cases/evaluation/agent_benchmarking.ipynb
Normal file
@@ -0,0 +1,343 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Agent Benchmarking: Search + Calculator\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance of an agent on tasks where it has access to a calculator and a search tool.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "46bf9205",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--agent-search-calculator-8a025c0ce5fb99d2/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "3a275586643f4ccfba1a8d54be28c351",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"agent-search-calculator\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to load an agent capable of answering these questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain.agents import initialize_agent, Tool, load_tools\n",
|
||||
"\n",
|
||||
"tools = load_tools(['serpapi', 'llm-math'], llm=OpenAI(temperature=0))\n",
|
||||
"agent = initialize_agent(tools, OpenAI(temperature=0), agent=\"zero-shot-react-description\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68504a8f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cbcafc92",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'38,630,316 people live in Canada as of 2023.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(dataset[0]['question'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrying langchain.llms.openai.completion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised APIConnectionError: Error communicating with OpenAI: ('Connection aborted.', ConnectionResetError(54, 'Connection reset by peer')).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions = []\n",
|
||||
"predicted_dataset = []\n",
|
||||
"error_dataset = []\n",
|
||||
"for data in dataset:\n",
|
||||
" new_data = {\"input\": data[\"question\"], \"answer\": data[\"answer\"]}\n",
|
||||
" try:\n",
|
||||
" predictions.append(agent(new_data))\n",
|
||||
" predicted_dataset.append(new_data)\n",
|
||||
" except Exception:\n",
|
||||
" error_dataset.append(new_data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'How many people live in canada as of 2023?',\n",
|
||||
" 'answer': 'approximately 38,625,801',\n",
|
||||
" 'output': '38,630,316 people live in Canada as of 2023.',\n",
|
||||
" 'intermediate_steps': [(AgentAction(tool='Search', tool_input='Population of Canada 2023', log=' I need to find population data\\nAction: Search\\nAction Input: Population of Canada 2023'),\n",
|
||||
" '38,630,316')]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(dataset, predictions, question_key=\"question\", prediction_key=\"output\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 4, ' INCORRECT': 6})"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': \"who is dua lipa's boyfriend? what is his age raised to the .43 power?\",\n",
|
||||
" 'answer': 'her boyfriend is Romain Gravas. his age raised to the .43 power is approximately 4.9373857399466665',\n",
|
||||
" 'output': \"Isaac Carew, Dua Lipa's boyfriend, is 36 years old and his age raised to the .43 power is 4.6688516567750975.\",\n",
|
||||
" 'intermediate_steps': [(AgentAction(tool='Search', tool_input=\"Dua Lipa's boyfriend\", log=' I need to find out who Dua Lipa\\'s boyfriend is and then calculate his age raised to the .43 power\\nAction: Search\\nAction Input: \"Dua Lipa\\'s boyfriend\"'),\n",
|
||||
" 'Dua and Isaac, a model and a chef, dated on and off from 2013 to 2019. The two first split in early 2017, which is when Dua went on to date LANY ...'),\n",
|
||||
" (AgentAction(tool='Search', tool_input='Isaac Carew age', log=' I need to find out Isaac\\'s age\\nAction: Search\\nAction Input: \"Isaac Carew age\"'),\n",
|
||||
" '36 years'),\n",
|
||||
" (AgentAction(tool='Calculator', tool_input='36^.43', log=' I need to calculate 36 raised to the .43 power\\nAction: Calculator\\nAction Input: 36^.43'),\n",
|
||||
" 'Answer: 4.6688516567750975\\n')],\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
516
docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb
Normal file
516
docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb
Normal file
@@ -0,0 +1,516 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Agent VectorDB Question Answering Benchmarking\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task using an agent to route between multiple vectordatabases.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "7b57a50f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--agent-vectordb-qa-sota-pg-d3ae24016b514f92/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a7abbc20615d4c58b75a055a790d7212",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"agent-vectordb-qa-sota-pg\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "61375342",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of the NATO Alliance?',\n",
|
||||
" 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n",
|
||||
" 'steps': [{'tool': 'State of Union QA System', 'tool_input': None},\n",
|
||||
" {'tool': None, 'tool_input': 'What is the purpose of the NATO Alliance?'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dataset[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "02500304",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of YC?',\n",
|
||||
" 'answer': 'The purpose of YC is to cause startups to be founded that would not otherwise have existed.',\n",
|
||||
" 'steps': [{'tool': 'Paul Graham QA System', 'tool_input': None},\n",
|
||||
" {'tool': None, 'tool_input': 'What is the purpose of YC?'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dataset[-1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to create some pipelines for doing question answering. Step one in that is creating indexes over the data in question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "7f0de2b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ef84ff99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore_sota = VectorstoreIndexCreator(vectorstore_kwargs={\"collection_name\":\"sota\"}).from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a question answering chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import VectorDBQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "573719a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_sota = VectorDBQA.from_chain_type(llm=OpenAI(temperature=0), chain_type=\"stuff\", vectorstore=vectorstore_sota, input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e48b03d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we do the same for the Paul Graham data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c2dbb014",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../modules/paul_graham_essay.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "98d16f08",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore_pg = VectorstoreIndexCreator(vectorstore_kwargs={\"collection_name\":\"paul_graham\"}).from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "ec0aab02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_pg = VectorDBQA.from_chain_type(llm=OpenAI(temperature=0), chain_type=\"stuff\", vectorstore=vectorstore_pg, input_key=\"question\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "76b5f8fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now set up an agent to route between them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "ade1aafa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" func=chain_sota.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Paul Graham System\",\n",
|
||||
" func=chain_pg.run,\n",
|
||||
" description=\"useful for when you need to answer questions about Paul Graham. Input should be a fully formed question.\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"id": "104853f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, OpenAI(temperature=0), agent=\"zero-shot-react-description\", max_iterations=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7f036641",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "4664e79f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The purpose of the NATO Alliance is to promote peace and security in the North Atlantic region by providing a collective defense against potential threats.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 48,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(dataset[0]['question'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = []\n",
|
||||
"predicted_dataset = []\n",
|
||||
"error_dataset = []\n",
|
||||
"for data in dataset:\n",
|
||||
" new_data = {\"input\": data[\"question\"], \"answer\": data[\"answer\"]}\n",
|
||||
" try:\n",
|
||||
" predictions.append(agent(new_data))\n",
|
||||
" predicted_dataset.append(new_data)\n",
|
||||
" except Exception:\n",
|
||||
" error_dataset.append(new_data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What is the purpose of the NATO Alliance?',\n",
|
||||
" 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n",
|
||||
" 'output': 'The purpose of the NATO Alliance is to promote peace and security in the North Atlantic region by providing a collective defense against potential threats.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 36,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(predicted_dataset, predictions, question_key=\"input\", prediction_key=\"output\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 19, ' INCORRECT': 14})"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What is the purpose of the Bipartisan Innovation Act mentioned in the text?',\n",
|
||||
" 'answer': 'The Bipartisan Innovation Act will make record investments in emerging technologies and American manufacturing to level the playing field with China and other competitors.',\n",
|
||||
" 'output': 'The purpose of the Bipartisan Innovation Act is to promote innovation and entrepreneurship in the United States by providing tax incentives and other support for startups and small businesses.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
160
docs/use_cases/evaluation/benchmarking_template.ipynb
Normal file
160
docs/use_cases/evaluation/benchmarking_template.ipynb
Normal file
@@ -0,0 +1,160 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a175c650",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Benchmarking Template\n",
|
||||
"\n",
|
||||
"This is an example notebook that can be used to create a benchmarking notebook for a task of your choice. Evaluation is really hard, and so we greatly welcome any contributions that can make it easier for people to experiment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "9fe4d1b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f66405e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "79402a8f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This notebook should so how to load the dataset from LangChainDatasets on Hugging Face\n",
|
||||
"\n",
|
||||
"# Please upload your dataset to https://huggingface.co/LangChainDatasets\n",
|
||||
"\n",
|
||||
"# The value passed into `load_dataset` should NOT have the `LangChainDatasets/` prefix\n",
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"TODO\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"\n",
|
||||
"This next section should have an example of setting up a chain that can be run on this dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a2661ce0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c0062e7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "d28c5e7d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Example of running the chain on a single datapoint (`dataset[0]`) goes here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Example of running the chain on many predictions goes here\n",
|
||||
"\n",
|
||||
"# Sometimes its as simple as `chain.apply(dataset)`\n",
|
||||
"\n",
|
||||
"# Othertimes you may want to write a for loop to catch errors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"\n",
|
||||
"Any guide to evaluating performance in a more systematic manner goes here."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
374
docs/use_cases/evaluation/qa_benchmarking_pg.ipynb
Normal file
374
docs/use_cases/evaluation/qa_benchmarking_pg.ipynb
Normal file
@@ -0,0 +1,374 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Question Answering Benchmarking: Paul Graham Essay\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task over a Paul Graham essay.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "3bd13ab7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--question-answering-paul-graham-76e8f711e038d742/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "63f434a42cba4739919333c75324acc9",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"question-answering-paul-graham\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to create some pipelines for doing question answering. Step one in that is creating an index over the data in question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader(\"../../modules/paul_graham_essay.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "7f0de2b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "ef84ff99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore = VectorstoreIndexCreator().from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a question answering chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import VectorDBQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "573719a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", vectorstore=vectorstore, input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "53b5aa23",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "3f81d951",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What were the two main things the author worked on before college?',\n",
|
||||
" 'answer': 'The two main things the author worked on before college were writing and programming.',\n",
|
||||
" 'result': ' Writing and programming.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain(dataset[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = chain.apply(dataset)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What were the two main things the author worked on before college?',\n",
|
||||
" 'answer': 'The two main things the author worked on before college were writing and programming.',\n",
|
||||
" 'result': ' Writing and programming.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(dataset, predictions, question_key=\"question\", prediction_key=\"result\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 12, ' INCORRECT': 10})"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What did the author write their dissertation on?',\n",
|
||||
" 'answer': 'The author wrote their dissertation on applications of continuations.',\n",
|
||||
" 'result': ' The author does not mention what their dissertation was on, so it is not known.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
451
docs/use_cases/evaluation/qa_benchmarking_sota.ipynb
Normal file
451
docs/use_cases/evaluation/qa_benchmarking_sota.ipynb
Normal file
@@ -0,0 +1,451 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Question Answering Benchmarking: State of the Union Address\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task over a state of the union address.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "f127fb04",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "5d66c27b9b4744989843142f08f5c1b4",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading readme: 0%| | 0.00/21.0 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Downloading and preparing dataset json/LangChainDatasets--question-answering-state-of-the-union to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--question-answering-state-of-the-union-a7e5a3b2db4f440d/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "9e21e2ab96a0491ea5e252720d7dfa26",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "c883830e068c42d39da8406ab38574c4",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data: 0%| | 0.00/2.90k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "3b085715e52e49948d2a59d27e004eba",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Extracting data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Generating train split: 0 examples [00:00, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dataset json downloaded and prepared to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--question-answering-state-of-the-union-a7e5a3b2db4f440d/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51. Subsequent calls will reuse this data.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "ee900d35e27d4843b42b31811b43212b",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"question-answering-state-of-the-union\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ab6a716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"Now we need to create some pipelines for doing question answering. Step one in that is creating an index over the data in question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c18680b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "7f0de2b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ef84ff99",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectorstore = VectorstoreIndexCreator().from_loaders([loader]).vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a question answering chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import VectorDBQA\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "573719a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", vectorstore=vectorstore, input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "37d669e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "3089e409",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of the NATO Alliance?',\n",
|
||||
" 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n",
|
||||
" 'result': ' The NATO Alliance was created to secure peace and stability in Europe after World War 2.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain(dataset[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = chain.apply(dataset)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49d969fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. The first thing we can do is look at them by eye."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "1d583f03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the purpose of the NATO Alliance?',\n",
|
||||
" 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n",
|
||||
" 'result': ' The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(dataset, predictions, question_key=\"question\", prediction_key=\"result\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 7, ' INCORRECT': 4})"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the U.S. Department of Justice doing to combat the crimes of Russian oligarchs?',\n",
|
||||
" 'answer': 'The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.',\n",
|
||||
" 'result': ' The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs and is naming a chief prosecutor for pandemic fraud.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
117
docs/use_cases/evaluation/qa_generation.ipynb
Normal file
117
docs/use_cases/evaluation/qa_generation.ipynb
Normal file
@@ -0,0 +1,117 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ee2a3a21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# QA Generation\n",
|
||||
"This notebook shows how to use the `QAGenerationChain` to come up with question-answer pairs over a specific document.\n",
|
||||
"This is important because often times you may not have data to evaluate your question-answer system over, so this is a cheap and lightweight way to generate it!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "33d3f0b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2029a29c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "87edb84c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc = loader.load()[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "04125b6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import QAGenerationChain\n",
|
||||
"chain = QAGenerationChain.from_llm(ChatOpenAI(temperature = 0))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "4f1593e4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = chain.run(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "ee831f92",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What is the U.S. Department of Justice doing to combat the crimes of Russian oligarchs?',\n",
|
||||
" 'answer': 'The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7028754e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -191,7 +191,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "782ae8c8",
|
||||
"metadata": {},
|
||||
@@ -316,7 +315,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -330,7 +329,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7 (default, Sep 16 2021, 08:50:36) \n[Clang 10.0.0 ]"
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
423
docs/use_cases/evaluation/sql_qa_benchmarking_chinook.ipynb
Normal file
423
docs/use_cases/evaluation/sql_qa_benchmarking_chinook.ipynb
Normal file
@@ -0,0 +1,423 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "984169ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SQL Question Answering Benchmarking: Chinook\n",
|
||||
"\n",
|
||||
"Here we go over how to benchmark performance on a question answering task over a SQL database.\n",
|
||||
"\n",
|
||||
"It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "44874486",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Comment this out if you are NOT using tracing\n",
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f66405e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading the data\n",
|
||||
"\n",
|
||||
"First, let's load the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0df1393f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "b220d07ee5d14909bc842b4545cdc0de",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading readme: 0%| | 0.00/21.0 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Downloading and preparing dataset json/LangChainDatasets--sql-qa-chinook to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--sql-qa-chinook-7528565d2d992b47/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "e89e3c8ef76f49889c4b39c624828c71",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a8421df6c26045e8978c7086cb418222",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading data: 0%| | 0.00/1.44k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "d1fb6becc3324a85bf039a53caf30924",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Extracting data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Generating train split: 0 examples [00:00, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dataset json downloaded and prepared to /Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--sql-qa-chinook-7528565d2d992b47/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51. Subsequent calls will reuse this data.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "9d68ad1b3e4a4bd79f92597aac4d3cc9",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"dataset = load_dataset(\"sql-qa-chinook\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ab44d504",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How many employees are there?', 'answer': '8'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dataset[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a16b75d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up a chain\n",
|
||||
"This uses the example Chinook database.\n",
|
||||
"To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the `.db` file in a notebooks folder at the root of this repository.\n",
|
||||
"\n",
|
||||
"Note that here we load a simple chain. If you want to experiment with more complex chains, or an agent, just create the `chain` object in a different way."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5b2d5e98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI, SQLDatabase, SQLDatabaseChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "33cdcbfc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///../../../notebooks/Chinook.db\")\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b5d8f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a SQL database chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "8843cb0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = SQLDatabaseChain(llm=llm, database=db, input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c0062e7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make a prediction\n",
|
||||
"\n",
|
||||
"First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "d28c5e7d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How many employees are there?',\n",
|
||||
" 'answer': '8',\n",
|
||||
" 'result': ' There are 8 employees.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain(dataset[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c16cd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Make many predictions\n",
|
||||
"Now we can make predictions. Note that we add a try-except because this chain can sometimes error (if SQL is written incorrectly, etc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "24b4c66e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictions = []\n",
|
||||
"predicted_dataset = []\n",
|
||||
"error_dataset = []\n",
|
||||
"for data in dataset:\n",
|
||||
" try:\n",
|
||||
" predictions.append(chain(data))\n",
|
||||
" predicted_dataset.append(data)\n",
|
||||
" except:\n",
|
||||
" error_dataset.append(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4783344b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluate performance\n",
|
||||
"Now we can evaluate the predictions. We can use a language model to score them programatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "d0a9341d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.qa import QAEvalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "1612dec1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"eval_chain = QAEvalChain.from_llm(llm)\n",
|
||||
"graded_outputs = eval_chain.evaluate(predicted_dataset, predictions, question_key=\"question\", prediction_key=\"result\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79587806",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can add in the graded output to the `predictions` dict and then get a count of the grades."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "2a689df5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i, prediction in enumerate(predictions):\n",
|
||||
" prediction['grade'] = graded_outputs[i]['text']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "27b61215",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Counter({' CORRECT': 3, ' INCORRECT': 4})"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"Counter([pred['grade'] for pred in predictions])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12fe30f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also filter the datapoints to the incorrect examples and look at them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "47c692a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"incorrect = [pred for pred in predictions if pred['grade'] == \" INCORRECT\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "0ef976c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How many employees are also customers?',\n",
|
||||
" 'answer': 'None',\n",
|
||||
" 'result': ' 59 employees are also customers.',\n",
|
||||
" 'grade': ' INCORRECT'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"incorrect[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7710401a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
31
docs/use_cases/tabular.md
Normal file
31
docs/use_cases/tabular.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Querying Tabular Data
|
||||
|
||||
Lots of data and information is stored in tabular data, whether it be csvs, excel sheets, or SQL tables.
|
||||
This page covers all resources available in LangChain for working with data in this format.
|
||||
|
||||
## Document Loading
|
||||
If you have text data stored in a tabular format, you may want to load the data into a Document and then index it as you would
|
||||
other text/unstructured data. For this, you should use a document loader like the [CSVLoader](../modules/document_loaders/examples/csv.ipynb)
|
||||
and then you should [create an index](../modules/indexes.rst) over that data, and [query it that way](../modules/indexes/chain_examples/vector_db_qa.ipynb).
|
||||
|
||||
## Querying
|
||||
If you have more numeric tabular data, or have a large amount of data and don't want to index it, you should get started
|
||||
by looking at various chains and agents we have for dealing with this data.
|
||||
|
||||
### Chains
|
||||
|
||||
If you are just getting started, and you have relatively small/simple tabular data, you should get started with chains.
|
||||
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
|
||||
understand what is happening better.
|
||||
|
||||
- [SQL Database Chain](../modules/chains/examples/sqlite.ipynb)
|
||||
|
||||
### Agents
|
||||
|
||||
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
|
||||
The downside of agents are that you have less control. The upside is that they are more powerful,
|
||||
which allows you to use them on larger databases and more complex schemas.
|
||||
|
||||
- [SQL Agent](../modules/agents/agent_toolkits/sql_database.ipynb)
|
||||
- [Pandas Agent](../modules/agents/agent_toolkits/pandas.ipynb)
|
||||
- [CSV Agent](../modules/agents/agent_toolkits/csv.ipynb)
|
||||
@@ -33,6 +33,7 @@ from langchain.llms import (
|
||||
Modal,
|
||||
OpenAI,
|
||||
Petals,
|
||||
SagemakerEndpoint,
|
||||
StochasticAI,
|
||||
Writer,
|
||||
)
|
||||
@@ -90,6 +91,7 @@ __all__ = [
|
||||
"ReActChain",
|
||||
"Wikipedia",
|
||||
"HuggingFaceHub",
|
||||
"SagemakerEndpoint",
|
||||
"HuggingFacePipeline",
|
||||
"SQLDatabase",
|
||||
"SQLDatabaseChain",
|
||||
|
||||
@@ -19,7 +19,7 @@ from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
from langchain.schema import AgentAction, AgentFinish, BaseMessage
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
logger = logging.getLogger()
|
||||
@@ -54,7 +54,7 @@ class Agent(BaseModel):
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
) -> str:
|
||||
) -> Union[str, List[BaseMessage]]:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
@@ -435,10 +435,6 @@ class AgentExecutor(Chain, BaseModel):
|
||||
llm_prefix="",
|
||||
observation_prefix=self.agent.observation_prefix,
|
||||
)
|
||||
return_direct = False
|
||||
if return_direct:
|
||||
# Set the log to "" because we do not want to log it.
|
||||
return AgentFinish({self.agent.return_values[0]: observation}, "")
|
||||
return output, observation
|
||||
|
||||
async def _atake_next_step(
|
||||
@@ -457,9 +453,15 @@ class AgentExecutor(Chain, BaseModel):
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
self.callback_manager.on_agent_action(
|
||||
output, verbose=self.verbose, color="green"
|
||||
)
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_agent_action(
|
||||
output, verbose=self.verbose, color="green"
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_agent_action(
|
||||
output, verbose=self.verbose, color="green"
|
||||
)
|
||||
|
||||
# Otherwise we lookup the tool
|
||||
if output.tool in name_to_tool_map:
|
||||
tool = name_to_tool_map[output.tool]
|
||||
@@ -483,9 +485,6 @@ class AgentExecutor(Chain, BaseModel):
|
||||
observation_prefix=self.agent.observation_prefix,
|
||||
)
|
||||
return_direct = False
|
||||
if return_direct:
|
||||
# Set the log to "" because we do not want to log it.
|
||||
return AgentFinish({self.agent.return_values[0]: observation}, "")
|
||||
return output, observation
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
@@ -510,6 +509,10 @@ class AgentExecutor(Chain, BaseModel):
|
||||
return self._return(next_step_output, intermediate_steps)
|
||||
|
||||
intermediate_steps.append(next_step_output)
|
||||
# See if tool should return directly
|
||||
tool_return = self._get_tool_return(next_step_output)
|
||||
if tool_return is not None:
|
||||
return self._return(tool_return, intermediate_steps)
|
||||
iterations += 1
|
||||
output = self.agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
@@ -538,8 +541,28 @@ class AgentExecutor(Chain, BaseModel):
|
||||
return await self._areturn(next_step_output, intermediate_steps)
|
||||
|
||||
intermediate_steps.append(next_step_output)
|
||||
# See if tool should return directly
|
||||
tool_return = self._get_tool_return(next_step_output)
|
||||
if tool_return is not None:
|
||||
return await self._areturn(tool_return, intermediate_steps)
|
||||
|
||||
iterations += 1
|
||||
output = self.agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
)
|
||||
return await self._areturn(output, intermediate_steps)
|
||||
|
||||
def _get_tool_return(
|
||||
self, next_step_output: Tuple[AgentAction, str]
|
||||
) -> Optional[AgentFinish]:
|
||||
"""Check if the tool is a returning tool."""
|
||||
agent_action, observation = next_step_output
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
# Invalid tools won't be in the map, so we return False.
|
||||
if agent_action.tool in name_to_tool_map:
|
||||
if name_to_tool_map[agent_action.tool].return_direct:
|
||||
return AgentFinish(
|
||||
{self.agent.return_values[0]: observation},
|
||||
"",
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -18,6 +18,7 @@ from langchain.agents.agent_toolkits.vectorstore.toolkit import (
|
||||
VectorStoreRouterToolkit,
|
||||
VectorStoreToolkit,
|
||||
)
|
||||
from langchain.agents.agent_toolkits.zapier.toolkit import ZapierToolkit
|
||||
|
||||
__all__ = [
|
||||
"create_json_agent",
|
||||
@@ -34,4 +35,5 @@ __all__ = [
|
||||
"VectorStoreRouterToolkit",
|
||||
"create_pandas_dataframe_agent",
|
||||
"create_csv_agent",
|
||||
"ZapierToolkit",
|
||||
]
|
||||
|
||||
1
langchain/agents/agent_toolkits/zapier/__init__.py
Normal file
1
langchain/agents/agent_toolkits/zapier/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Zapier Toolkit."""
|
||||
34
langchain/agents/agent_toolkits/zapier/toolkit.py
Normal file
34
langchain/agents/agent_toolkits/zapier/toolkit.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Zapier Toolkit."""
|
||||
from typing import List
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.zapier.tool import ZapierNLARunAction
|
||||
from langchain.utilities.zapier import ZapierNLAWrapper
|
||||
|
||||
|
||||
class ZapierToolkit(BaseToolkit):
|
||||
"""Zapier Toolkit."""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
@classmethod
|
||||
def from_zapier_nla_wrapper(
|
||||
cls, zapier_nla_wrapper: ZapierNLAWrapper
|
||||
) -> "ZapierToolkit":
|
||||
"""Create a toolkit from a ZapierNLAWrapper."""
|
||||
actions = zapier_nla_wrapper.list()
|
||||
tools = [
|
||||
ZapierNLARunAction(
|
||||
action_id=action["id"],
|
||||
zapier_description=action["description"],
|
||||
params_schema=action["params"],
|
||||
api_wrapper=zapier_nla_wrapper,
|
||||
)
|
||||
for action in actions
|
||||
]
|
||||
return cls(tools=tools)
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
return self.tools
|
||||
@@ -32,6 +32,8 @@ class ChatAgent(Agent):
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
) -> str:
|
||||
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
|
||||
if not isinstance(agent_scratchpad, str):
|
||||
raise ValueError("agent_scratchpad should be of type string.")
|
||||
if agent_scratchpad:
|
||||
return (
|
||||
f"This was your previous work "
|
||||
@@ -44,10 +46,13 @@ class ChatAgent(Agent):
|
||||
def _extract_tool_and_input(self, text: str) -> Optional[Tuple[str, str]]:
|
||||
if FINAL_ANSWER_ACTION in text:
|
||||
return "Final Answer", text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||
_, action, _ = text.split("```")
|
||||
try:
|
||||
_, action, _ = text.split("```")
|
||||
response = json.loads(action.strip())
|
||||
return response["action"], response["action_input"]
|
||||
|
||||
response = json.loads(action.strip())
|
||||
return response["action"], response["action_input"]
|
||||
except Exception:
|
||||
raise ValueError(f"Could not parse LLM output: {text}")
|
||||
|
||||
@property
|
||||
def _stop(self) -> List[str]:
|
||||
@@ -70,9 +75,9 @@ class ChatAgent(Agent):
|
||||
SystemMessagePromptTemplate.from_template(template),
|
||||
HumanMessagePromptTemplate.from_template("{input}\n\n{agent_scratchpad}"),
|
||||
]
|
||||
return ChatPromptTemplate(
|
||||
input_variables=["input", "agent_scratchpad"], messages=messages
|
||||
)
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "agent_scratchpad"]
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
|
||||
@@ -18,7 +18,7 @@ ALWAYS use the following format:
|
||||
|
||||
Question: the input question you must answer
|
||||
Thought: you should always think about what to do
|
||||
Action:
|
||||
Action:
|
||||
```
|
||||
$JSON_BLOB
|
||||
```
|
||||
|
||||
1
langchain/agents/conversational_chat/__init__.py
Normal file
1
langchain/agents/conversational_chat/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""An agent designed to hold a conversation in addition to using tools."""
|
||||
157
langchain/agents/conversational_chat/base.py
Normal file
157
langchain/agents/conversational_chat/base.py
Normal file
@@ -0,0 +1,157 @@
|
||||
"""An agent designed to hold a conversation in addition to using tools."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, List, Optional, Sequence, Tuple
|
||||
|
||||
from langchain.agents.agent import Agent
|
||||
from langchain.agents.conversational_chat.prompt import (
|
||||
FORMAT_INSTRUCTIONS,
|
||||
PREFIX,
|
||||
SUFFIX,
|
||||
TEMPLATE_TOOL_RESPONSE,
|
||||
)
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.output_parsers.base import BaseOutputParser
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import (
|
||||
AgentAction,
|
||||
AIMessage,
|
||||
BaseLanguageModel,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
)
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
|
||||
class AgentOutputParser(BaseOutputParser):
|
||||
def get_format_instructions(self) -> str:
|
||||
return FORMAT_INSTRUCTIONS
|
||||
|
||||
def parse(self, text: str) -> Any:
|
||||
cleaned_output = text.strip()
|
||||
if "```json" in cleaned_output:
|
||||
_, cleaned_output = cleaned_output.split("```json")
|
||||
if "```" in cleaned_output:
|
||||
cleaned_output, _ = cleaned_output.split("```")
|
||||
if cleaned_output.startswith("```json"):
|
||||
cleaned_output = cleaned_output[len("```json") :]
|
||||
if cleaned_output.startswith("```"):
|
||||
cleaned_output = cleaned_output[len("```") :]
|
||||
if cleaned_output.endswith("```"):
|
||||
cleaned_output = cleaned_output[: -len("```")]
|
||||
cleaned_output = cleaned_output.strip()
|
||||
response = json.loads(cleaned_output)
|
||||
return {"action": response["action"], "action_input": response["action_input"]}
|
||||
|
||||
|
||||
class ConversationalChatAgent(Agent):
|
||||
"""An agent designed to hold a conversation in addition to using tools."""
|
||||
|
||||
output_parser: BaseOutputParser
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
return "Observation: "
|
||||
|
||||
@property
|
||||
def llm_prefix(self) -> str:
|
||||
"""Prefix to append the llm call with."""
|
||||
return "Thought:"
|
||||
|
||||
@classmethod
|
||||
def create_prompt(
|
||||
cls,
|
||||
tools: Sequence[BaseTool],
|
||||
system_message: str = PREFIX,
|
||||
human_message: str = SUFFIX,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
output_parser: Optional[BaseOutputParser] = None,
|
||||
) -> BasePromptTemplate:
|
||||
tool_strings = "\n".join(
|
||||
[f"> {tool.name}: {tool.description}" for tool in tools]
|
||||
)
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
_output_parser = output_parser or AgentOutputParser()
|
||||
format_instructions = human_message.format(
|
||||
format_instructions=_output_parser.get_format_instructions()
|
||||
)
|
||||
final_prompt = format_instructions.format(
|
||||
tool_names=tool_names, tools=tool_strings
|
||||
)
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "chat_history", "agent_scratchpad"]
|
||||
messages = [
|
||||
SystemMessagePromptTemplate.from_template(system_message),
|
||||
MessagesPlaceholder(variable_name="chat_history"),
|
||||
HumanMessagePromptTemplate.from_template(final_prompt),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
]
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||
|
||||
def _extract_tool_and_input(self, llm_output: str) -> Optional[Tuple[str, str]]:
|
||||
try:
|
||||
response = self.output_parser.parse(llm_output)
|
||||
return response["action"], response["action_input"]
|
||||
except Exception:
|
||||
raise ValueError(f"Could not parse LLM output: {llm_output}")
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
) -> List[BaseMessage]:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts: List[BaseMessage] = []
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts.append(AIMessage(content=action.log))
|
||||
human_message = HumanMessage(
|
||||
content=TEMPLATE_TOOL_RESPONSE.format(observation=observation)
|
||||
)
|
||||
thoughts.append(human_message)
|
||||
return thoughts
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
system_message: str = PREFIX,
|
||||
human_message: str = SUFFIX,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
output_parser: Optional[BaseOutputParser] = None,
|
||||
**kwargs: Any,
|
||||
) -> Agent:
|
||||
"""Construct an agent from an LLM and tools."""
|
||||
cls._validate_tools(tools)
|
||||
_output_parser = output_parser or AgentOutputParser()
|
||||
prompt = cls.create_prompt(
|
||||
tools,
|
||||
system_message=system_message,
|
||||
human_message=human_message,
|
||||
input_variables=input_variables,
|
||||
output_parser=_output_parser,
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
tool_names = [tool.name for tool in tools]
|
||||
return cls(
|
||||
llm_chain=llm_chain,
|
||||
allowed_tools=tool_names,
|
||||
output_parser=_output_parser,
|
||||
**kwargs,
|
||||
)
|
||||
57
langchain/agents/conversational_chat/prompt.py
Normal file
57
langchain/agents/conversational_chat/prompt.py
Normal file
@@ -0,0 +1,57 @@
|
||||
# flake8: noqa
|
||||
PREFIX = """Assistant is a large language model trained by OpenAI.
|
||||
|
||||
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
||||
|
||||
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
|
||||
|
||||
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist."""
|
||||
|
||||
FORMAT_INSTRUCTIONS = """RESPONSE FORMAT INSTRUCTIONS
|
||||
----------------------------
|
||||
|
||||
When responding to me please, please output a response in one of two formats:
|
||||
|
||||
**Option 1:**
|
||||
Use this if you want the human to use a tool.
|
||||
Markdown code snippet formatted in the following schema:
|
||||
|
||||
```json
|
||||
{{{{
|
||||
"action": string \\ The action to take. Must be one of {tool_names}
|
||||
"action_input": string \\ The input to the action
|
||||
}}}}
|
||||
```
|
||||
|
||||
**Option #2:**
|
||||
Use this if you want to respond directly to the human. Markdown code snippet formatted in the following schema:
|
||||
|
||||
```json
|
||||
{{{{
|
||||
"action": "Final Answer",
|
||||
"action_input": string \\ You should put what you want to return to use here
|
||||
}}}}
|
||||
```"""
|
||||
|
||||
SUFFIX = """TOOLS
|
||||
------
|
||||
Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:
|
||||
|
||||
{{tools}}
|
||||
|
||||
{format_instructions}
|
||||
|
||||
USER'S INPUT
|
||||
--------------------
|
||||
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
|
||||
|
||||
{{{{input}}}}"""
|
||||
|
||||
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
|
||||
---------------------
|
||||
{observation}
|
||||
|
||||
USER'S INPUT
|
||||
--------------------
|
||||
|
||||
Okay, so what is the response to my original question? If using information from tools, you must say it explicitly - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else."""
|
||||
@@ -27,7 +27,9 @@ def initialize_agent(
|
||||
`react-docstore`
|
||||
`self-ask-with-search`
|
||||
`conversational-react-description`
|
||||
If None and agent_path is also None, will default to
|
||||
`chat-zero-shot-react-description`,
|
||||
`chat-conversational-react-description`,
|
||||
If None and agent_path is also None, will default to
|
||||
`zero-shot-react-description`.
|
||||
callback_manager: CallbackManager to use. Global callback manager is used if
|
||||
not provided. Defaults to None.
|
||||
|
||||
@@ -8,6 +8,7 @@ import yaml
|
||||
from langchain.agents.agent import Agent
|
||||
from langchain.agents.chat.base import ChatAgent
|
||||
from langchain.agents.conversational.base import ConversationalAgent
|
||||
from langchain.agents.conversational_chat.base import ConversationalChatAgent
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.react.base import ReActDocstoreAgent
|
||||
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent
|
||||
@@ -22,6 +23,7 @@ AGENT_TO_CLASS = {
|
||||
"self-ask-with-search": SelfAskWithSearchAgent,
|
||||
"conversational-react-description": ConversationalAgent,
|
||||
"chat-zero-shot-react-description": ChatAgent,
|
||||
"chat-conversational-react-description": ConversationalChatAgent,
|
||||
}
|
||||
|
||||
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
|
||||
|
||||
@@ -16,6 +16,7 @@ from langchain.chains.loading import load_chain
|
||||
from langchain.chains.mapreduce import MapReduceChain
|
||||
from langchain.chains.moderation import OpenAIModerationChain
|
||||
from langchain.chains.pal.base import PALChain
|
||||
from langchain.chains.qa_generation.base import QAGenerationChain
|
||||
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
|
||||
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
|
||||
@@ -52,4 +53,5 @@ __all__ = [
|
||||
"ChatVectorDBChain",
|
||||
"GraphQAChain",
|
||||
"ConstitutionalChain",
|
||||
"QAGenerationChain",
|
||||
]
|
||||
|
||||
@@ -9,7 +9,7 @@ from pydantic import BaseModel, Extra, root_validator
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.prompts.base import RegexParser
|
||||
from langchain.output_parsers.regex import RegexParser
|
||||
|
||||
|
||||
class MapRerankDocumentsChain(BaseCombineDocumentsChain, BaseModel):
|
||||
|
||||
0
langchain/chains/dbpedia/__init__.py
Normal file
0
langchain/chains/dbpedia/__init__.py
Normal file
61
langchain/chains/dbpedia/base.py
Normal file
61
langchain/chains/dbpedia/base.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.dbpedia.prompt import ANSWER_PROMPT_SELECTOR, PROMPT_SELECTOR
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class DBPediaChain(Chain):
|
||||
query_chain: LLMChain
|
||||
answer_chain: LLMChain
|
||||
input_key: str = "question"
|
||||
output_key: str = "answer"
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
query_prompt: Optional[BasePromptTemplate] = None,
|
||||
answer_prompt: Optional[BasePromptTemplate] = None,
|
||||
**kwargs: Any,
|
||||
) -> DBPediaChain:
|
||||
query_prompt = query_prompt or PROMPT_SELECTOR.get_prompt(llm)
|
||||
query_chain = LLMChain(llm=llm, prompt=query_prompt)
|
||||
answer_prompt = answer_prompt or ANSWER_PROMPT_SELECTOR.get_prompt(llm)
|
||||
answer_chain = LLMChain(llm=llm, prompt=answer_prompt)
|
||||
return cls(query_chain=query_chain, answer_chain=answer_chain, **kwargs)
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
from SPARQLWrapper import JSON, SPARQLWrapper
|
||||
|
||||
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
|
||||
sparql.setReturnFormat(JSON)
|
||||
query = self.query_chain.run(inputs[self.input_key])
|
||||
self.callback_manager.on_text("Query written:", end="\n", verbose=self.verbose)
|
||||
self.callback_manager.on_text(
|
||||
query, color="green", end="\n", verbose=self.verbose
|
||||
)
|
||||
sparql.setQuery(query)
|
||||
result = sparql.query().convert()
|
||||
self.callback_manager.on_text(
|
||||
"Response gotten:", end="\n", verbose=self.verbose
|
||||
)
|
||||
self.callback_manager.on_text(
|
||||
result, color="green", end="\n", verbose=self.verbose
|
||||
)
|
||||
answer = self.answer_chain.run(
|
||||
question=inputs[self.input_key], query=query, response=result
|
||||
)
|
||||
return {self.output_key: answer}
|
||||
52
langchain/chains/dbpedia/prompt.py
Normal file
52
langchain/chains/dbpedia/prompt.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
|
||||
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import HumanMessage
|
||||
|
||||
TEMPLATE = """Write a sparkql query to execute against DBPedia to answer the following question
|
||||
|
||||
Question: {question}
|
||||
SPARQL Query:"""
|
||||
PROMPT = PromptTemplate.from_template(TEMPLATE)
|
||||
|
||||
INSTRUCTIONS_TEMPLATE = """Write a sparkql query to execute against DBPedia to answer the following question.
|
||||
Your answer should be a valid SPARKQL query and NOTHING else.
|
||||
Always return just a SPARKQL query."""
|
||||
INSTRUCTIONS = HumanMessage(content=INSTRUCTIONS_TEMPLATE)
|
||||
CHAT_PROMPT = ChatPromptTemplate.from_messages(
|
||||
[INSTRUCTIONS, HumanMessagePromptTemplate.from_template("{question}")]
|
||||
)
|
||||
|
||||
PROMPT_SELECTOR = ConditionalPromptSelector(
|
||||
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
|
||||
)
|
||||
|
||||
ANSWER_TEMPLATE = """Write a sparkql query to execute against DBPedia to answer the following question
|
||||
|
||||
Question: {question}
|
||||
SPARKQL Query: {query}
|
||||
SPARKQL Response: {response}
|
||||
Final Answer (in plain English):"""
|
||||
ANSWER_PROMPT = PromptTemplate.from_template(ANSWER_TEMPLATE)
|
||||
|
||||
ANSWER_INSTRUCTIONS_TEMPLATE = """I wrote this SPARKQL query:
|
||||
----------
|
||||
{query}
|
||||
----------
|
||||
|
||||
I got this response:
|
||||
----------
|
||||
{response}
|
||||
----------
|
||||
|
||||
Now, use the above information to answer my next question."""
|
||||
ANSWER_INSTRUCTIONS = HumanMessagePromptTemplate.from_template(
|
||||
ANSWER_INSTRUCTIONS_TEMPLATE
|
||||
)
|
||||
ANSWER_CHAT_PROMPT = ChatPromptTemplate.from_messages(
|
||||
[ANSWER_INSTRUCTIONS, HumanMessagePromptTemplate.from_template("{question}")]
|
||||
)
|
||||
|
||||
ANSWER_PROMPT_SELECTOR = ConditionalPromptSelector(
|
||||
default_prompt=ANSWER_PROMPT, conditionals=[(is_chat_model, ANSWER_CHAT_PROMPT)]
|
||||
)
|
||||
0
langchain/chains/qa_generation/__init__.py
Normal file
0
langchain/chains/qa_generation/__init__.py
Normal file
55
langchain/chains/qa_generation/base.py
Normal file
55
langchain/chains/qa_generation/base.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
|
||||
|
||||
|
||||
class QAGenerationChain(Chain):
|
||||
llm_chain: LLMChain
|
||||
text_splitter: TextSplitter = Field(
|
||||
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
|
||||
)
|
||||
input_key: str = "text"
|
||||
output_key: str = "questions"
|
||||
k: Optional[int] = None
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
prompt: Optional[BasePromptTemplate] = None,
|
||||
**kwargs: Any,
|
||||
) -> QAGenerationChain:
|
||||
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
|
||||
chain = LLMChain(llm=llm, prompt=_prompt)
|
||||
return cls(llm_chain=chain, **kwargs)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
docs = self.text_splitter.create_documents([inputs[self.input_key]])
|
||||
results = self.llm_chain.generate([{"text": d.page_content} for d in docs])
|
||||
qa = [json.loads(res[0].text) for res in results.generations]
|
||||
return {self.output_key: qa}
|
||||
|
||||
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
raise NotImplementedError
|
||||
50
langchain/chains/qa_generation/prompt.py
Normal file
50
langchain/chains/qa_generation/prompt.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# flake8: noqa
|
||||
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
|
||||
from langchain.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
|
||||
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
|
||||
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
|
||||
When coming up with this question/answer pair, you must respond in the following format:
|
||||
```
|
||||
{{
|
||||
"question": "$YOUR_QUESTION_HERE",
|
||||
"answer": "$THE_ANSWER_HERE"
|
||||
}}
|
||||
```
|
||||
|
||||
Everything between the ``` must be valid json.
|
||||
"""
|
||||
templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
|
||||
----------------
|
||||
{text}"""
|
||||
CHAT_PROMPT = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessagePromptTemplate.from_template(templ1),
|
||||
HumanMessagePromptTemplate.from_template(templ2),
|
||||
]
|
||||
)
|
||||
templ = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
|
||||
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
|
||||
When coming up with this question/answer pair, you must respond in the following format:
|
||||
```
|
||||
{{
|
||||
"question": "$YOUR_QUESTION_HERE",
|
||||
"answer": "$THE_ANSWER_HERE"
|
||||
}}
|
||||
```
|
||||
|
||||
Everything between the ``` must be valid json.
|
||||
|
||||
Please come up with a question/answer pair, in the specified JSON format, for the following text:
|
||||
----------------
|
||||
{text}"""
|
||||
PROMPT = PromptTemplate.from_template(templ)
|
||||
|
||||
PROMPT_SELECTOR = ConditionalPromptSelector(
|
||||
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
|
||||
)
|
||||
@@ -155,7 +155,7 @@ def _load_refine_chain(
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
_question_prompt = (
|
||||
question_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt(llm)
|
||||
question_prompt or refine_prompts.QUESTION_PROMPT_SELECTOR.get_prompt(llm)
|
||||
)
|
||||
_refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt(
|
||||
llm
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.prompts.base import RegexParser
|
||||
from langchain.output_parsers.regex import RegexParser
|
||||
|
||||
output_parser = RegexParser(
|
||||
regex=r"(.*?)\nScore: (.*)",
|
||||
|
||||
@@ -117,6 +117,8 @@ class SQLDatabaseSequentialChain(Chain, BaseModel):
|
||||
This is useful in cases where the number of tables in the database is large.
|
||||
"""
|
||||
|
||||
return_intermediate_steps: bool = False
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
@@ -154,7 +156,10 @@ class SQLDatabaseSequentialChain(Chain, BaseModel):
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
if not self.return_intermediate_steps:
|
||||
return [self.output_key]
|
||||
else:
|
||||
return [self.output_key, "intermediate_steps"]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
_table_names = self.sql_chain.database.get_table_names()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts.base import CommaSeparatedListOutputParser
|
||||
from langchain.output_parsers.list import CommaSeparatedListOutputParser
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
|
||||
_DEFAULT_TEMPLATE = """Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
||||
from langchain.chat_models.openai import ChatOpenAI
|
||||
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
||||
|
||||
__all__ = ["ChatOpenAI", "PromptLayerChatOpenAI"]
|
||||
__all__ = ["ChatOpenAI", "AzureChatOpenAI", "PromptLayerChatOpenAI"]
|
||||
|
||||
178
langchain/chat_models/azure_openai.py
Normal file
178
langchain/chat_models/azure_openai.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""Azure OpenAI chat wrapper."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional, Tuple
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from langchain.chat_models.openai import (
|
||||
ChatOpenAI,
|
||||
acompletion_with_retry,
|
||||
)
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
ChatResult,
|
||||
)
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
def _create_chat_prompt(messages: List[BaseMessage]) -> str:
|
||||
"""Create a prompt for Azure OpenAI using ChatML."""
|
||||
prompt = "\n".join([message.format_chatml() for message in messages])
|
||||
return prompt + "\n<|im_start|>assistant\n"
|
||||
|
||||
|
||||
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
|
||||
generations = []
|
||||
for res in response["choices"]:
|
||||
message = AIMessage(content=res["text"])
|
||||
gen = ChatGeneration(message=message)
|
||||
generations.append(gen)
|
||||
return ChatResult(generations=generations)
|
||||
|
||||
|
||||
class AzureChatOpenAI(ChatOpenAI):
|
||||
"""Wrapper around Azure OpenAI Chat large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
following environment variables set:
|
||||
- ``OPENAI_API_TYPE``
|
||||
- ``OPENAI_API_KEY``
|
||||
- ``OPENAI_API_BASE``
|
||||
- ``OPENAI_API_VERSION``
|
||||
|
||||
Any parameters that are valid to be passed to the openai.create call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.chat_models import AzureChatOpenAI
|
||||
openai = AzureChatOpenAI(deployment_name="<your deployment name>")
|
||||
"""
|
||||
|
||||
deployment_name: str = ""
|
||||
stop: List[str] = ["<|im_end|>"]
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
openai_api_key = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_key",
|
||||
"OPENAI_API_KEY",
|
||||
)
|
||||
openai_api_base = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_base",
|
||||
"OPENAI_API_BASE",
|
||||
)
|
||||
openai_api_version = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_version",
|
||||
"OPENAI_API_VERSION",
|
||||
)
|
||||
openai_api_type = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_type",
|
||||
"OPENAI_API_TYPE",
|
||||
)
|
||||
try:
|
||||
import openai
|
||||
|
||||
openai.api_type = openai_api_type
|
||||
openai.api_base = openai_api_base
|
||||
openai.api_version = openai_api_version
|
||||
openai.api_key = openai_api_key
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please it install it with `pip install openai`."
|
||||
)
|
||||
try:
|
||||
values["client"] = openai.Completion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `Completion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
return {
|
||||
**super()._default_params,
|
||||
"stop": self.stop,
|
||||
}
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
prompt, params = self._create_prompt(messages, stop)
|
||||
if self.streaming:
|
||||
inner_completion = ""
|
||||
params["stream"] = True
|
||||
for stream_resp in self.completion_with_retry(prompt=prompt, **params):
|
||||
token = stream_resp["choices"][0]["delta"].get("text", "")
|
||||
inner_completion += token
|
||||
self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
message = AIMessage(content=inner_completion)
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
response = self.completion_with_retry(prompt=prompt, **params)
|
||||
return _create_chat_result(response)
|
||||
|
||||
def _create_prompt(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]]
|
||||
) -> Tuple[str, Dict[str, Any]]:
|
||||
params: Dict[str, Any] = {
|
||||
**{"model": self.model_name, "engine": self.deployment_name},
|
||||
**self._default_params,
|
||||
}
|
||||
if stop is not None:
|
||||
if "stop" in params:
|
||||
raise ValueError("`stop` found in both the input and default params.")
|
||||
params["stop"] = stop
|
||||
prompt = _create_chat_prompt(messages)
|
||||
return prompt, params
|
||||
|
||||
async def _agenerate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
prompt, params = self._create_prompt(messages, stop)
|
||||
if self.streaming:
|
||||
inner_completion = ""
|
||||
params["stream"] = True
|
||||
async for stream_resp in await acompletion_with_retry(
|
||||
self, prompt=prompt, **params
|
||||
):
|
||||
token = stream_resp["choices"][0]["delta"].get("text", "")
|
||||
inner_completion += token
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
message = AIMessage(content=inner_completion)
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
else:
|
||||
response = await acompletion_with_retry(self, prompt=prompt, **params)
|
||||
return _create_chat_result(response)
|
||||
@@ -12,6 +12,7 @@ from langchain.schema import (
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
ChatResult,
|
||||
HumanMessage,
|
||||
LLMResult,
|
||||
PromptValue,
|
||||
)
|
||||
@@ -42,19 +43,26 @@ class BaseChatModel(BaseLanguageModel, BaseModel, ABC):
|
||||
"""
|
||||
return callback_manager or get_callback_manager()
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
return {}
|
||||
|
||||
def generate(
|
||||
self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None
|
||||
) -> LLMResult:
|
||||
"""Top Level call"""
|
||||
results = [self._generate(m, stop=stop) for m in messages]
|
||||
return LLMResult(generations=[res.generations for res in results])
|
||||
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
|
||||
generations = [res.generations for res in results]
|
||||
return LLMResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
async def agenerate(
|
||||
self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None
|
||||
) -> LLMResult:
|
||||
"""Top Level call"""
|
||||
results = [await self._agenerate(m, stop=stop) for m in messages]
|
||||
return LLMResult(generations=[res.generations for res in results])
|
||||
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
|
||||
generations = [res.generations for res in results]
|
||||
return LLMResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
def generate_prompt(
|
||||
self, prompts: List[PromptValue], stop: Optional[List[str]] = None
|
||||
@@ -116,13 +124,17 @@ class BaseChatModel(BaseLanguageModel, BaseModel, ABC):
|
||||
) -> BaseMessage:
|
||||
return self._generate(messages, stop=stop).generations[0].message
|
||||
|
||||
def call_as_llm(self, message: str, stop: Optional[List[str]] = None) -> str:
|
||||
result = self([HumanMessage(content=message)], stop=stop)
|
||||
return result.content
|
||||
|
||||
|
||||
class SimpleChatModel(BaseChatModel):
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
output_str = self._call(messages, stop=stop)
|
||||
message = AIMessage(text=output_str)
|
||||
message = AIMessage(content=output_str)
|
||||
generation = ChatGeneration(message=message)
|
||||
return ChatResult(generations=[generation])
|
||||
|
||||
|
||||
@@ -97,7 +97,8 @@ def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
|
||||
message = _convert_dict_to_message(res["message"])
|
||||
gen = ChatGeneration(message=message)
|
||||
generations.append(gen)
|
||||
return ChatResult(generations=generations)
|
||||
llm_output = {"token_usage": response["usage"]}
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
|
||||
class ChatOpenAI(BaseChatModel, BaseModel):
|
||||
@@ -128,7 +129,7 @@ class ChatOpenAI(BaseChatModel, BaseModel):
|
||||
"""Whether to stream the results or not."""
|
||||
n: int = 1
|
||||
"""Number of chat completions to generate for each prompt."""
|
||||
max_tokens: int = 256
|
||||
max_tokens: Optional[int] = None
|
||||
"""Maximum number of tokens to generate."""
|
||||
|
||||
class Config:
|
||||
@@ -221,6 +222,19 @@ class ChatOpenAI(BaseChatModel, BaseModel):
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
overall_token_usage: dict = {}
|
||||
for output in llm_outputs:
|
||||
if output is None:
|
||||
raise ValueError("Should always be something for OpenAI.")
|
||||
token_usage = output["token_usage"]
|
||||
for k, v in token_usage.items():
|
||||
if k in overall_token_usage:
|
||||
overall_token_usage[k] += v
|
||||
else:
|
||||
overall_token_usage[k] = v
|
||||
return {"token_usage": overall_token_usage}
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
@@ -317,3 +331,41 @@ class ChatOpenAI(BaseChatModel, BaseModel):
|
||||
|
||||
# calculate the number of tokens in the encoded text
|
||||
return len(tokenized_text)
|
||||
|
||||
def get_num_tokens_from_messages(
|
||||
self, messages: List[BaseMessage], model: str = "gpt-3.5-turbo-0301"
|
||||
) -> int:
|
||||
"""Calculate num tokens for gpt-3.5-turbo with tiktoken package."""
|
||||
try:
|
||||
import tiktoken
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import tiktoken python package. "
|
||||
"This is needed in order to calculate get_num_tokens. "
|
||||
"Please it install it with `pip install tiktoken`."
|
||||
)
|
||||
|
||||
"""Returns the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo-0301": # note: future models may deviate from this
|
||||
num_tokens = 0
|
||||
messages_dict = [_convert_message_to_dict(m) for m in messages]
|
||||
for message in messages_dict:
|
||||
# every message follows <im_start>{role/name}\n{content}<im_end>\n
|
||||
num_tokens += 4
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name": # if there's a name, the role is omitted
|
||||
num_tokens += -1 # role is always required and always 1 token
|
||||
num_tokens += 2 # every reply is primed with <im_start>assistant
|
||||
return num_tokens
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"get_num_tokens_from_messages() is not presently implemented "
|
||||
f"for model {model}."
|
||||
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
|
||||
"information on how messages are converted to tokens."
|
||||
)
|
||||
|
||||
@@ -17,8 +17,12 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
|
||||
promptlayer key respectively.
|
||||
|
||||
All parameters that can be passed to the OpenAI LLM can also
|
||||
be passed here. The PromptLayerChatOpenAI LLM adds an extra
|
||||
``pl_tags`` parameter that can be used to tag the request.
|
||||
be passed here. The PromptLayerChatOpenAI adds to optional
|
||||
parameters:
|
||||
``pl_tags``: List of strings to tag the request with.
|
||||
``return_pl_id``: If True, the PromptLayer request ID will be
|
||||
returned in the ``generation_info`` field of the
|
||||
``Generation`` object.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
@@ -28,6 +32,7 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
|
||||
"""
|
||||
|
||||
pl_tags: Optional[List[str]]
|
||||
return_pl_id: Optional[bool] = False
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
@@ -43,7 +48,7 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
|
||||
response_dict, params = super()._create_message_dicts(
|
||||
[generation.message], stop
|
||||
)
|
||||
promptlayer_api_request(
|
||||
pl_request_id = promptlayer_api_request(
|
||||
"langchain.PromptLayerChatOpenAI",
|
||||
"langchain",
|
||||
message_dicts,
|
||||
@@ -53,7 +58,14 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
|
||||
request_start_time,
|
||||
request_end_time,
|
||||
get_api_key(),
|
||||
return_pl_id=self.return_pl_id,
|
||||
)
|
||||
if self.return_pl_id:
|
||||
if generation.generation_info is None or not isinstance(
|
||||
generation.generation_info, dict
|
||||
):
|
||||
generation.generation_info = {}
|
||||
generation.generation_info["pl_request_id"] = pl_request_id
|
||||
return generated_responses
|
||||
|
||||
async def _agenerate(
|
||||
@@ -70,7 +82,7 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
|
||||
response_dict, params = super()._create_message_dicts(
|
||||
[generation.message], stop
|
||||
)
|
||||
promptlayer_api_request(
|
||||
pl_request_id = promptlayer_api_request(
|
||||
"langchain.PromptLayerChatOpenAI.async",
|
||||
"langchain",
|
||||
message_dicts,
|
||||
@@ -80,5 +92,12 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
|
||||
request_start_time,
|
||||
request_end_time,
|
||||
get_api_key(),
|
||||
return_pl_id=self.return_pl_id,
|
||||
)
|
||||
if self.return_pl_id:
|
||||
if generation.generation_info is None or not isinstance(
|
||||
generation.generation_info, dict
|
||||
):
|
||||
generation.generation_info = {}
|
||||
generation.generation_info["pl_request_id"] = pl_request_id
|
||||
return generated_responses
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
|
||||
from langchain.document_loaders.airbyte_json import AirbyteJSONLoader
|
||||
from langchain.document_loaders.azlyrics import AZLyricsLoader
|
||||
from langchain.document_loaders.blackboard import BlackboardLoader
|
||||
from langchain.document_loaders.college_confidential import CollegeConfidentialLoader
|
||||
from langchain.document_loaders.conllu import CoNLLULoader
|
||||
from langchain.document_loaders.csv import CSVLoader
|
||||
from langchain.document_loaders.csv_loader import CSVLoader
|
||||
from langchain.document_loaders.directory import DirectoryLoader
|
||||
from langchain.document_loaders.docx import UnstructuredDocxLoader
|
||||
from langchain.document_loaders.email import UnstructuredEmailLoader
|
||||
@@ -17,6 +18,7 @@ from langchain.document_loaders.googledrive import GoogleDriveLoader
|
||||
from langchain.document_loaders.gutenberg import GutenbergLoader
|
||||
from langchain.document_loaders.hn import HNLoader
|
||||
from langchain.document_loaders.html import UnstructuredHTMLLoader
|
||||
from langchain.document_loaders.html_bs import BSHTMLLoader
|
||||
from langchain.document_loaders.ifixit import IFixitLoader
|
||||
from langchain.document_loaders.image import UnstructuredImageLoader
|
||||
from langchain.document_loaders.imsdb import IMSDbLoader
|
||||
@@ -24,11 +26,11 @@ from langchain.document_loaders.markdown import UnstructuredMarkdownLoader
|
||||
from langchain.document_loaders.notebook import NotebookLoader
|
||||
from langchain.document_loaders.notion import NotionDirectoryLoader
|
||||
from langchain.document_loaders.obsidian import ObsidianLoader
|
||||
from langchain.document_loaders.online_pdf import OnlinePDFLoader
|
||||
from langchain.document_loaders.paged_pdf import PagedPDFSplitter
|
||||
from langchain.document_loaders.pdf import (
|
||||
OnlinePDFLoader,
|
||||
PDFMinerLoader,
|
||||
PyMuPDFLoader,
|
||||
PyPDFLoader,
|
||||
UnstructuredPDFLoader,
|
||||
)
|
||||
from langchain.document_loaders.powerpoint import UnstructuredPowerPointLoader
|
||||
@@ -52,6 +54,9 @@ from langchain.document_loaders.youtube import (
|
||||
YoutubeLoader,
|
||||
)
|
||||
|
||||
"""Legacy: only for backwards compat. use PyPDFLoader instead"""
|
||||
PagedPDFSplitter = PyPDFLoader
|
||||
|
||||
__all__ = [
|
||||
"UnstructuredFileLoader",
|
||||
"UnstructuredFileIOLoader",
|
||||
@@ -61,6 +66,7 @@ __all__ = [
|
||||
"ReadTheDocsLoader",
|
||||
"GoogleDriveLoader",
|
||||
"UnstructuredHTMLLoader",
|
||||
"BSHTMLLoader",
|
||||
"UnstructuredPowerPointLoader",
|
||||
"UnstructuredWordDocumentLoader",
|
||||
"UnstructuredPDFLoader",
|
||||
@@ -85,6 +91,7 @@ __all__ = [
|
||||
"IFixitLoader",
|
||||
"GutenbergLoader",
|
||||
"PagedPDFSplitter",
|
||||
"PyPDFLoader",
|
||||
"EverNoteLoader",
|
||||
"AirbyteJSONLoader",
|
||||
"OnlinePDFLoader",
|
||||
@@ -98,4 +105,5 @@ __all__ = [
|
||||
"GoogleApiYoutubeLoader",
|
||||
"GoogleApiClient",
|
||||
"CSVLoader",
|
||||
"BlackboardLoader",
|
||||
]
|
||||
|
||||
293
langchain/document_loaders/blackboard.py
Normal file
293
langchain/document_loaders/blackboard.py
Normal file
@@ -0,0 +1,293 @@
|
||||
"""Loader that loads all documents from a blackboard course."""
|
||||
import contextlib
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Optional, Tuple
|
||||
from urllib.parse import unquote
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.directory import DirectoryLoader
|
||||
from langchain.document_loaders.pdf import PyPDFLoader
|
||||
from langchain.document_loaders.web_base import WebBaseLoader
|
||||
|
||||
|
||||
class BlackboardLoader(WebBaseLoader):
|
||||
"""Loader that loads all documents from a Blackboard course.
|
||||
|
||||
This loader is not compatible with all Blackboard courses. It is only
|
||||
compatible with courses that use the new Blackboard interface.
|
||||
To use this loader, you must have the BbRouter cookie. You can get this
|
||||
cookie by logging into the course and then copying the value of the
|
||||
BbRouter cookie from the browser's developer tools.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.document_loaders import BlackboardLoader
|
||||
|
||||
loader = BlackboardLoader(
|
||||
blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1",
|
||||
bbrouter="expires:12345...",
|
||||
)
|
||||
documents = loader.load()
|
||||
|
||||
"""
|
||||
|
||||
base_url: str
|
||||
folder_path: str
|
||||
load_all_recursively: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
blackboard_course_url: str,
|
||||
bbrouter: str,
|
||||
load_all_recursively: bool = True,
|
||||
basic_auth: Optional[Tuple[str, str]] = None,
|
||||
cookies: Optional[dict] = None,
|
||||
):
|
||||
"""Initialize with blackboard course url.
|
||||
|
||||
The BbRouter cookie is required for most blackboard courses.
|
||||
|
||||
Args:
|
||||
blackboard_course_url: Blackboard course url.
|
||||
bbrouter: BbRouter cookie.
|
||||
load_all_recursively: If True, load all documents recursively.
|
||||
basic_auth: Basic auth credentials.
|
||||
cookies: Cookies.
|
||||
|
||||
Raises:
|
||||
ValueError: If blackboard course url is invalid.
|
||||
"""
|
||||
super().__init__(blackboard_course_url)
|
||||
# Get base url
|
||||
try:
|
||||
self.base_url = blackboard_course_url.split("/webapps/blackboard")[0]
|
||||
except IndexError:
|
||||
raise ValueError(
|
||||
"Invalid blackboard course url. "
|
||||
"Please provide a url that starts with "
|
||||
"https://<blackboard_url>/webapps/blackboard"
|
||||
)
|
||||
if basic_auth is not None:
|
||||
self.session.auth = basic_auth
|
||||
# Combine cookies
|
||||
if cookies is None:
|
||||
cookies = {}
|
||||
cookies.update({"BbRouter": bbrouter})
|
||||
self.session.cookies.update(cookies)
|
||||
self.load_all_recursively = load_all_recursively
|
||||
self.check_bs4()
|
||||
|
||||
def check_bs4(self) -> None:
|
||||
"""Check if BeautifulSoup4 is installed.
|
||||
|
||||
Raises:
|
||||
ImportError: If BeautifulSoup4 is not installed.
|
||||
"""
|
||||
try:
|
||||
import bs4 # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"BeautifulSoup4 is required for BlackboardLoader. "
|
||||
"Please install it with `pip install beautifulsoup4`."
|
||||
)
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load data into document objects.
|
||||
|
||||
Returns:
|
||||
List of documents.
|
||||
"""
|
||||
if self.load_all_recursively:
|
||||
soup_info = self.scrape()
|
||||
self.folder_path = self._get_folder_path(soup_info)
|
||||
relative_paths = self._get_paths(soup_info)
|
||||
documents = []
|
||||
for path in relative_paths:
|
||||
url = self.base_url + path
|
||||
print(f"Fetching documents from {url}")
|
||||
soup_info = self._scrape(url)
|
||||
with contextlib.suppress(ValueError):
|
||||
documents.extend(self._get_documents(soup_info))
|
||||
return documents
|
||||
else:
|
||||
print(f"Fetching documents from {self.web_path}")
|
||||
soup_info = self.scrape()
|
||||
self.folder_path = self._get_folder_path(soup_info)
|
||||
return self._get_documents(soup_info)
|
||||
|
||||
def _get_folder_path(self, soup: Any) -> str:
|
||||
"""Get the folder path to save the documents in.
|
||||
|
||||
Args:
|
||||
soup: BeautifulSoup4 soup object.
|
||||
|
||||
Returns:
|
||||
Folder path.
|
||||
"""
|
||||
# Get the course name
|
||||
course_name = soup.find("span", {"id": "crumb_1"})
|
||||
if course_name is None:
|
||||
raise ValueError("No course name found.")
|
||||
course_name = course_name.text.strip()
|
||||
# Prepare the folder path
|
||||
course_name_clean = (
|
||||
unquote(course_name)
|
||||
.replace(" ", "_")
|
||||
.replace("/", "_")
|
||||
.replace(":", "_")
|
||||
.replace(",", "_")
|
||||
.replace("?", "_")
|
||||
.replace("'", "_")
|
||||
.replace("!", "_")
|
||||
.replace('"', "_")
|
||||
)
|
||||
# Get the folder path
|
||||
folder_path = Path(".") / course_name_clean
|
||||
return str(folder_path)
|
||||
|
||||
def _get_documents(self, soup: Any) -> List[Document]:
|
||||
"""Fetch content from page and return Documents.
|
||||
|
||||
Args:
|
||||
soup: BeautifulSoup4 soup object.
|
||||
|
||||
Returns:
|
||||
List of documents.
|
||||
"""
|
||||
attachments = self._get_attachments(soup)
|
||||
self._download_attachments(attachments)
|
||||
documents = self._load_documents()
|
||||
return documents
|
||||
|
||||
def _get_attachments(self, soup: Any) -> List[str]:
|
||||
"""Get all attachments from a page.
|
||||
|
||||
Args:
|
||||
soup: BeautifulSoup4 soup object.
|
||||
|
||||
Returns:
|
||||
List of attachments.
|
||||
"""
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
|
||||
# Get content list
|
||||
content_list = soup.find("ul", {"class": "contentList"})
|
||||
if content_list is None:
|
||||
raise ValueError("No content list found.")
|
||||
content_list: BeautifulSoup # type: ignore
|
||||
# Get all attachments
|
||||
attachments = []
|
||||
for attachment in content_list.find_all("ul", {"class": "attachments"}):
|
||||
attachment: Tag # type: ignore
|
||||
for link in attachment.find_all("a"):
|
||||
link: Tag # type: ignore
|
||||
href = link.get("href")
|
||||
# Only add if href is not None and does not start with #
|
||||
if href is not None and not href.startswith("#"):
|
||||
attachments.append(href)
|
||||
return attachments
|
||||
|
||||
def _download_attachments(self, attachments: List[str]) -> None:
|
||||
"""Download all attachments.
|
||||
|
||||
Args:
|
||||
attachments: List of attachments.
|
||||
"""
|
||||
# Make sure the folder exists
|
||||
Path(self.folder_path).mkdir(parents=True, exist_ok=True)
|
||||
# Download all attachments
|
||||
for attachment in attachments:
|
||||
self.download(attachment)
|
||||
|
||||
def _load_documents(self) -> List[Document]:
|
||||
"""Load all documents in the folder.
|
||||
|
||||
Returns:
|
||||
List of documents.
|
||||
"""
|
||||
# Create the document loader
|
||||
loader = DirectoryLoader(
|
||||
path=self.folder_path, glob="*.pdf", loader_cls=PyPDFLoader # type: ignore
|
||||
)
|
||||
# Load the documents
|
||||
documents = loader.load()
|
||||
# Return all documents
|
||||
return documents
|
||||
|
||||
def _get_paths(self, soup: Any) -> List[str]:
|
||||
"""Get all relative paths in the navbar."""
|
||||
relative_paths = []
|
||||
course_menu = soup.find("ul", {"class": "courseMenu"})
|
||||
if course_menu is None:
|
||||
raise ValueError("No course menu found.")
|
||||
for link in course_menu.find_all("a"):
|
||||
href = link.get("href")
|
||||
if href is not None and href.startswith("/"):
|
||||
relative_paths.append(href)
|
||||
return relative_paths
|
||||
|
||||
def download(self, path: str) -> None:
|
||||
"""Download a file from a url.
|
||||
|
||||
Args:
|
||||
path: Path to the file.
|
||||
"""
|
||||
# Get the file content
|
||||
response = self.session.get(self.base_url + path, allow_redirects=True)
|
||||
# Get the filename
|
||||
filename = self.parse_filename(response.url)
|
||||
# Write the file to disk
|
||||
with open(Path(self.folder_path) / filename, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
def parse_filename(self, url: str) -> str:
|
||||
"""Parse the filename from a url.
|
||||
|
||||
Args:
|
||||
url: Url to parse the filename from.
|
||||
|
||||
Returns:
|
||||
The filename.
|
||||
"""
|
||||
if (url_path := Path(url)) and url_path.suffix == ".pdf":
|
||||
return url_path.name
|
||||
else:
|
||||
return self._parse_filename_from_url(url)
|
||||
|
||||
def _parse_filename_from_url(self, url: str) -> str:
|
||||
"""Parse the filename from a url.
|
||||
|
||||
Args:
|
||||
url: Url to parse the filename from.
|
||||
|
||||
Returns:
|
||||
The filename.
|
||||
|
||||
Raises:
|
||||
ValueError: If the filename could not be parsed.
|
||||
"""
|
||||
filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url)
|
||||
if filename_matches:
|
||||
filename = filename_matches.group(1)
|
||||
else:
|
||||
raise ValueError(f"Could not parse filename from {url}")
|
||||
if ".pdf" not in filename:
|
||||
raise ValueError(f"Incorrect file type: {filename}")
|
||||
filename = filename.split(".pdf")[0] + ".pdf"
|
||||
filename = unquote(filename)
|
||||
filename = filename.replace("%20", " ")
|
||||
return filename
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
loader = BlackboardLoader(
|
||||
"https://<YOUR BLACKBOARD URL"
|
||||
" HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID"
|
||||
" HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset",
|
||||
"<YOUR BBROUTER COOKIE HERE>",
|
||||
load_all_recursively=True,
|
||||
)
|
||||
documents = loader.load()
|
||||
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
|
||||
@@ -1,47 +0,0 @@
|
||||
from csv import DictReader
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
|
||||
class CSVLoader(BaseLoader):
|
||||
"""Loads a CSV file into a list of documents.
|
||||
|
||||
Each document represents one row of the CSV file. Every row is converted into a
|
||||
key/value pair and outputted to a new line in the document's page_content.
|
||||
|
||||
Output Example:
|
||||
.. code-block:: txt
|
||||
|
||||
column1: value1
|
||||
column2: value2
|
||||
column3: value3
|
||||
"""
|
||||
|
||||
def __init__(self, file_path: str, csv_args: Optional[Dict] = None):
|
||||
self.file_path = file_path
|
||||
if csv_args is None:
|
||||
self.csv_args = {
|
||||
"delimiter": ",",
|
||||
"quotechar": '"',
|
||||
}
|
||||
else:
|
||||
self.csv_args = csv_args
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
docs = []
|
||||
|
||||
with open(self.file_path, newline="") as csvfile:
|
||||
csv = DictReader(csvfile, **self.csv_args) # type: ignore
|
||||
for row in csv:
|
||||
docs.append(
|
||||
Document(
|
||||
page_content="\n".join(
|
||||
f"{k.strip()}: {v.strip()}" for k, v in row.items()
|
||||
),
|
||||
metadata={"source": self.file_path},
|
||||
)
|
||||
)
|
||||
|
||||
return docs
|
||||
60
langchain/document_loaders/csv_loader.py
Normal file
60
langchain/document_loaders/csv_loader.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from csv import DictReader
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
|
||||
class CSVLoader(BaseLoader):
|
||||
"""Loads a CSV file into a list of documents.
|
||||
|
||||
Each document represents one row of the CSV file. Every row is converted into a
|
||||
key/value pair and outputted to a new line in the document's page_content.
|
||||
|
||||
The source for each document loaded from csv is set to the value of the
|
||||
`file_path` argument for all doucments by default.
|
||||
You can override this by setting the `source_column` argument to the
|
||||
name of a column in the CSV file.
|
||||
The source of each document will then be set to the value of the column
|
||||
with the name specified in `source_column`.
|
||||
|
||||
Output Example:
|
||||
.. code-block:: txt
|
||||
|
||||
column1: value1
|
||||
column2: value2
|
||||
column3: value3
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
source_column: Optional[str] = None,
|
||||
csv_args: Optional[Dict] = None,
|
||||
):
|
||||
self.file_path = file_path
|
||||
self.source_column = source_column
|
||||
if csv_args is None:
|
||||
self.csv_args = {
|
||||
"delimiter": ",",
|
||||
"quotechar": '"',
|
||||
}
|
||||
else:
|
||||
self.csv_args = csv_args
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
docs = []
|
||||
|
||||
with open(self.file_path, newline="") as csvfile:
|
||||
csv = DictReader(csvfile, **self.csv_args) # type: ignore
|
||||
for i, row in enumerate(csv):
|
||||
content = "\n".join(f"{k.strip()}: {v.strip()}" for k, v in row.items())
|
||||
if self.source_column is not None:
|
||||
source = row[self.source_column]
|
||||
else:
|
||||
source = self.file_path
|
||||
metadata = {"source": source, "row": i}
|
||||
doc = Document(page_content=content, metadata=metadata)
|
||||
docs.append(doc)
|
||||
|
||||
return docs
|
||||
@@ -5,10 +5,13 @@ from typing import List, Type, Union
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.document_loaders.html_bs import BSHTMLLoader
|
||||
from langchain.document_loaders.text import TextLoader
|
||||
from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
|
||||
FILE_LOADER_TYPE = Union[Type[UnstructuredFileLoader], Type[TextLoader]]
|
||||
FILE_LOADER_TYPE = Union[
|
||||
Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]
|
||||
]
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
|
||||
# 3. Authorize credentials for desktop app:
|
||||
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
|
||||
|
||||
# 4. For service accounts visit
|
||||
# https://cloud.google.com/iam/docs/service-accounts-create
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
@@ -22,6 +23,7 @@ SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
|
||||
class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
"""Loader that loads Google Docs from Google Drive."""
|
||||
|
||||
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
|
||||
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
|
||||
token_path: Path = Path.home() / ".credentials" / "token.json"
|
||||
folder_id: Optional[str] = None
|
||||
@@ -60,6 +62,7 @@ class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
|
||||
try:
|
||||
from google.auth.transport.requests import Request
|
||||
from google.oauth2 import service_account
|
||||
from google.oauth2.credentials import Credentials
|
||||
from google_auth_oauthlib.flow import InstalledAppFlow
|
||||
except ImportError:
|
||||
@@ -72,6 +75,11 @@ class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
)
|
||||
|
||||
creds = None
|
||||
if self.service_account_key.exists():
|
||||
return service_account.Credentials.from_service_account_file(
|
||||
str(self.service_account_key), scopes=SCOPES
|
||||
)
|
||||
|
||||
if self.token_path.exists():
|
||||
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
|
||||
|
||||
|
||||
@@ -10,4 +10,4 @@ class UnstructuredHTMLLoader(UnstructuredFileLoader):
|
||||
def _get_elements(self) -> List:
|
||||
from unstructured.partition.html import partition_html
|
||||
|
||||
return partition_html(filename=self.file_path)
|
||||
return partition_html(filename=self.file_path, **self.unstructured_kwargs)
|
||||
|
||||
36
langchain/document_loaders/html_bs.py
Normal file
36
langchain/document_loaders/html_bs.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""Loader that uses bs4 to load HTML files, enriching metadata with page title."""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Union
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
class BSHTMLLoader(BaseLoader):
|
||||
"""Loader that uses beautiful soup to parse HTML files."""
|
||||
|
||||
def __init__(self, file_path: str) -> None:
|
||||
self.file_path = file_path
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load HTML document into document objects."""
|
||||
with open(self.file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, features="lxml")
|
||||
|
||||
text = soup.get_text()
|
||||
|
||||
if soup.title:
|
||||
title = str(soup.title.string)
|
||||
else:
|
||||
title = ""
|
||||
|
||||
metadata: Dict[str, Union[str, None]] = {
|
||||
"source": self.file_path,
|
||||
"title": title,
|
||||
}
|
||||
return [Document(page_content=text, metadata=metadata)]
|
||||
@@ -99,7 +99,10 @@ class IFixitLoader(BaseLoader):
|
||||
output.append("# " + title)
|
||||
output.append(soup.select_one(".post-content .post-text").text.strip())
|
||||
|
||||
output.append("\n## " + soup.find("div", "post-answers-header").text.strip())
|
||||
answersHeader = soup.find("div", "post-answers-header")
|
||||
if answersHeader:
|
||||
output.append("\n## " + answersHeader.text.strip())
|
||||
|
||||
for answer in soup.select(".js-answers-list .post.post-answer"):
|
||||
if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]:
|
||||
output.append("\n### Accepted Answer")
|
||||
|
||||
@@ -10,4 +10,4 @@ class UnstructuredImageLoader(UnstructuredFileLoader):
|
||||
def _get_elements(self) -> List:
|
||||
from unstructured.partition.image import partition_image
|
||||
|
||||
return partition_image(filename=self.file_path)
|
||||
return partition_image(filename=self.file_path, **self.unstructured_kwargs)
|
||||
|
||||
@@ -9,16 +9,17 @@ from langchain.document_loaders.base import BaseLoader
|
||||
class ObsidianLoader(BaseLoader):
|
||||
"""Loader that loads Obsidian files from disk."""
|
||||
|
||||
def __init__(self, path: str):
|
||||
def __init__(self, path: str, encoding: str = "UTF-8"):
|
||||
"""Initialize with path."""
|
||||
self.file_path = path
|
||||
self.encoding = encoding
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load documents."""
|
||||
ps = list(Path(self.file_path).glob("**/*.md"))
|
||||
docs = []
|
||||
for p in ps:
|
||||
with open(p) as f:
|
||||
with open(p, encoding=self.encoding) as f:
|
||||
text = f.read()
|
||||
metadata = {"source": str(p)}
|
||||
docs.append(Document(page_content=text, metadata=metadata))
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
"""Loader that loads online PDF files."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.pdf import BasePDFLoader, UnstructuredPDFLoader
|
||||
|
||||
|
||||
class OnlinePDFLoader(BasePDFLoader):
|
||||
"""Loader that loads online PDFs."""
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load documents."""
|
||||
loader = UnstructuredPDFLoader(str(self.file_path))
|
||||
return loader.load()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user