mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-23 21:31:02 +00:00
Compare commits
71 Commits
v0.0.138
...
dev2049/fm
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
416adc578d | ||
|
|
c4ae8c1d24 | ||
|
|
ad3973a3b8 | ||
|
|
cf2789d86d | ||
|
|
0aa828b1dc | ||
|
|
ec59e9d886 | ||
|
|
13a0ed064b | ||
|
|
392f1b3218 | ||
|
|
66bef1d7ed | ||
|
|
7ee87eb0c8 | ||
|
|
634358db5e | ||
|
|
82174bf176 | ||
|
|
30573b2e30 | ||
|
|
a508afa91c | ||
|
|
7e525a3b91 | ||
|
|
ccacf804a8 | ||
|
|
86189cdcf9 | ||
|
|
8fef69296d | ||
|
|
0a38bbc750 | ||
|
|
203c0eb2ae | ||
|
|
1a44b71ddf | ||
|
|
3c7204d604 | ||
|
|
1e9378d0a8 | ||
|
|
07d7096de6 | ||
|
|
5565f56273 | ||
|
|
9907cb0485 | ||
|
|
1cc7ea333c | ||
|
|
705596b46a | ||
|
|
8a98e5b50b | ||
|
|
dcb17503f2 | ||
|
|
74abeb8c53 | ||
|
|
0226b375d9 | ||
|
|
04c458a270 | ||
|
|
016738e676 | ||
|
|
8cfec2c5fe | ||
|
|
bf0887c486 | ||
|
|
ed2ef5cbe4 | ||
|
|
6be5d7c612 | ||
|
|
c26a259ba6 | ||
|
|
f3180f05f9 | ||
|
|
ecc1a0c051 | ||
|
|
70ffe470aa | ||
|
|
be4fb24b32 | ||
|
|
82d1d5f24e | ||
|
|
53dc157145 | ||
|
|
1609950597 | ||
|
|
7688bf9182 | ||
|
|
2db9b7a45d | ||
|
|
802363eb6a | ||
|
|
2a89dc8c1c | ||
|
|
a6f767ae7a | ||
|
|
4f231b46ee | ||
|
|
414dc803b6 | ||
|
|
61858c5a08 | ||
|
|
9a96691803 | ||
|
|
324e9c83d5 | ||
|
|
ed03e965de | ||
|
|
64596b23b9 | ||
|
|
1bb0706955 | ||
|
|
b2bc5ef56a | ||
|
|
abfca72c0b | ||
|
|
f0be3b0689 | ||
|
|
e081c62aac | ||
|
|
a094b7f807 | ||
|
|
1c7fb31bba | ||
|
|
0e763677e4 | ||
|
|
e49f1e628c | ||
|
|
425c437cd3 | ||
|
|
a2d729e537 | ||
|
|
7adbc4fbb4 | ||
|
|
1bea9ea4be |
@@ -2,7 +2,7 @@
|
||||
|
||||
⚡ Building applications with LLMs through composability ⚡
|
||||
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/lint.yml) [](https://github.com/hwchase17/langchain/actions/workflows/test.yml) [](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml) [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai) [](https://discord.gg/6adMQxSpJS)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/lint.yml) [](https://github.com/hwchase17/langchain/actions/workflows/test.yml) [](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml) [](https://pepy.tech/project/langchain) [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai) [](https://discord.gg/6adMQxSpJS)
|
||||
|
||||
**Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.
|
||||
Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set up a dedicated support Slack channel.
|
||||
|
||||
4
docs/_static/css/custom.css
vendored
4
docs/_static/css/custom.css
vendored
@@ -11,3 +11,7 @@ pre {
|
||||
max-width: 2560px !important;
|
||||
}
|
||||
}
|
||||
|
||||
#my-component-root *, #headlessui-portal-root * {
|
||||
z-index: 1000000000000;
|
||||
}
|
||||
|
||||
58
docs/_static/js/mendablesearch.js
vendored
Normal file
58
docs/_static/js/mendablesearch.js
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
// Load the external dependencies
|
||||
function loadScript(src, onLoadCallback) {
|
||||
const script = document.createElement('script');
|
||||
script.src = src;
|
||||
script.onload = onLoadCallback;
|
||||
document.head.appendChild(script);
|
||||
}
|
||||
|
||||
function createRootElement() {
|
||||
const rootElement = document.createElement('div');
|
||||
rootElement.id = 'my-component-root';
|
||||
document.body.appendChild(rootElement);
|
||||
return rootElement;
|
||||
}
|
||||
|
||||
|
||||
|
||||
function initializeMendable() {
|
||||
const rootElement = createRootElement();
|
||||
const { MendableFloatingButton } = Mendable;
|
||||
|
||||
|
||||
const iconSpan1 = React.createElement('span', {
|
||||
}, '🦜');
|
||||
|
||||
const iconSpan2 = React.createElement('span', {
|
||||
}, '🔗');
|
||||
|
||||
const icon = React.createElement('p', {
|
||||
style: { color: '#ffffff', fontSize: '22px',width: '48px', height: '48px', margin: '0px', padding: '0px', display: 'flex', alignItems: 'center', justifyContent: 'center', textAlign: 'center' },
|
||||
}, [iconSpan1, iconSpan2]);
|
||||
|
||||
|
||||
|
||||
|
||||
const mendableFloatingButton = React.createElement(
|
||||
MendableFloatingButton,
|
||||
{
|
||||
style: { darkMode: false, accentColor: '#010810' },
|
||||
floatingButtonStyle: { color: '#ffffff', backgroundColor: '#010810' },
|
||||
anon_key: '82842b36-3ea6-49b2-9fb8-52cfc4bde6bf', // Mendable Search Public ANON key, ok to be public
|
||||
messageSettings: {
|
||||
openSourcesInNewTab: false,
|
||||
},
|
||||
icon: icon,
|
||||
}
|
||||
);
|
||||
|
||||
ReactDOM.render(mendableFloatingButton, rootElement);
|
||||
}
|
||||
|
||||
loadScript('https://unpkg.com/react@17/umd/react.production.min.js', () => {
|
||||
loadScript('https://unpkg.com/react-dom@17/umd/react-dom.production.min.js', () => {
|
||||
loadScript('https://unpkg.com/@mendable/search@0.0.83/dist/umd/mendable.min.js', initializeMendable);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -103,5 +103,10 @@ html_static_path = ["_static"]
|
||||
html_css_files = [
|
||||
"css/custom.css",
|
||||
]
|
||||
|
||||
html_js_files = [
|
||||
"js/mendablesearch.js",
|
||||
]
|
||||
|
||||
nb_execution_mode = "off"
|
||||
myst_enable_extensions = ["colon_fence"]
|
||||
|
||||
@@ -33,12 +33,17 @@ It implements a Question Answering app and contains instructions for deploying t
|
||||
|
||||
A minimal example on how to run LangChain on Vercel using Flask.
|
||||
|
||||
## [Digitalocean App Platform](https://github.com/homanp/digitalocean-langchain)
|
||||
|
||||
A minimal example on how to deploy LangChain to DigitalOcean App Platform.
|
||||
|
||||
## [SteamShip](https://github.com/steamship-core/steamship-langchain/)
|
||||
|
||||
This repository contains LangChain adapters for Steamship, enabling LangChain developers to rapidly deploy their apps on Steamship.
|
||||
This includes: production ready endpoints, horizontal scaling across dependencies, persistant storage of app state, multi-tenancy support, etc.
|
||||
|
||||
## [Langchain-serve](https://github.com/jina-ai/langchain-serve)
|
||||
|
||||
This repository allows users to serve local chains and agents as RESTful, gRPC, or Websocket APIs thanks to [Jina](https://docs.jina.ai/). Deploy your chains & agents with ease and enjoy independent scaling, serverless and autoscaling APIs, as well as a Streamlit playground on Jina AI Cloud.
|
||||
|
||||
## [BentoML](https://github.com/ssheng/BentoChain)
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"aim_callback.flush_tracker(\n",
|
||||
" langchain_asset=llm,\n",
|
||||
" experiment_name=\"scenario 2: Chain with multiple SubChains on multiple generations\",\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -180,7 +180,9 @@
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [\n",
|
||||
" {\"title\": \"documentary about good video games that push the boundary of game design\"},\n",
|
||||
" {\n",
|
||||
" \"title\": \"documentary about good video games that push the boundary of game design\"\n",
|
||||
" },\n",
|
||||
" {\"title\": \"the phenomenon behind the remarkable speed of cheetahs\"},\n",
|
||||
" {\"title\": \"the best in class mlops tooling\"},\n",
|
||||
"]\n",
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Getting API Credentials\n",
|
||||
"## Getting API Credentials\n",
|
||||
"\n",
|
||||
"We'll be using quite some APIs in this notebook, here is a list and where to get them:\n",
|
||||
"\n",
|
||||
@@ -35,6 +35,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"CLEARML_API_ACCESS_KEY\"] = \"\"\n",
|
||||
"os.environ[\"CLEARML_API_SECRET_KEY\"] = \"\"\n",
|
||||
"\n",
|
||||
@@ -47,7 +48,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setting Up"
|
||||
"## Setting Up"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -91,7 +92,7 @@
|
||||
" # Change the following parameters based on the amount of detail you want tracked\n",
|
||||
" visualize=True,\n",
|
||||
" complexity_metrics=True,\n",
|
||||
" stream_logs=True\n",
|
||||
" stream_logs=True,\n",
|
||||
")\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), clearml_callback])\n",
|
||||
"# Get the OpenAI model ready to go\n",
|
||||
@@ -103,7 +104,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Scenario 1: Just an LLM\n",
|
||||
"## Scenario 1: Just an LLM\n",
|
||||
"\n",
|
||||
"First, let's just run a single LLM a few times and capture the resulting prompt-answer conversation in ClearML"
|
||||
]
|
||||
@@ -361,7 +362,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Scenario 2: Creating a agent with tools\n",
|
||||
"## Scenario 2: Creating an agent with tools\n",
|
||||
"\n",
|
||||
"To show a more advanced workflow, let's create an agent with access to tools. The way ClearML tracks the results is not different though, only the table will look slightly different as there are other types of actions taken when compared to the earlier, simpler example.\n",
|
||||
"\n",
|
||||
@@ -531,10 +532,10 @@
|
||||
" callback_manager=manager,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
" \"Who is the wife of the person who sang summer of 69?\"\n",
|
||||
")\n",
|
||||
"clearml_callback.flush_tracker(langchain_asset=agent, name=\"Agent with Tools\", finish=True)"
|
||||
"agent.run(\"Who is the wife of the person who sang summer of 69?\")\n",
|
||||
"clearml_callback.flush_tracker(\n",
|
||||
" langchain_asset=agent, name=\"Agent with Tools\", finish=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -542,7 +543,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tips and Next Steps\n",
|
||||
"## Tips and Next Steps\n",
|
||||
"\n",
|
||||
"- Make sure you always use a unique `name` argument for the `clearml_callback.flush_tracker` function. If not, the model parameters used for a run will override the previous run!\n",
|
||||
"\n",
|
||||
|
||||
352
docs/ecosystem/comet_tracking.ipynb
Normal file
352
docs/ecosystem/comet_tracking.ipynb
Normal file
@@ -0,0 +1,352 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Comet"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this guide we will demonstrate how to track your Langchain Experiments, Evaluation Metrics, and LLM Sessions with [Comet](https://www.comet.com/site/?utm_source=langchain&utm_medium=referral&utm_campaign=comet_notebook). \n",
|
||||
"\n",
|
||||
"<a target=\"_blank\" href=\"https://colab.research.google.com/github/hwchase17/langchain/blob/master/docs/ecosystem/comet_tracking.ipynb\">\n",
|
||||
" <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
|
||||
"</a>\n",
|
||||
"\n",
|
||||
"**Example Project:** [Comet with LangChain](https://www.comet.com/examples/comet-example-langchain/view/b5ZThK6OFdhKWVSP3fDfRtrNF/panels?utm_source=langchain&utm_medium=referral&utm_campaign=comet_notebook)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<img width=\"1280\" alt=\"comet-langchain\" src=\"https://user-images.githubusercontent.com/7529846/230326720-a9711435-9c6f-4edb-a707-94b67271ab25.png\">\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Install Comet and Dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install comet_ml langchain openai google-search-results spacy textstat pandas\n",
|
||||
"\n",
|
||||
"import sys\n",
|
||||
"!{sys.executable} -m spacy download en_core_web_sm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Initialize Comet and Set your Credentials"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can grab your [Comet API Key here](https://www.comet.com/signup?utm_source=langchain&utm_medium=referral&utm_campaign=comet_notebook) or click the link after intializing Comet"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import comet_ml\n",
|
||||
"\n",
|
||||
"comet_ml.init(project_name=\"comet-example-langchain\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set OpenAI and SerpAPI credentials"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will need an [OpenAI API Key](https://platform.openai.com/account/api-keys) and a [SerpAPI API Key](https://serpapi.com/dashboard) to run the following examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"...\"\n",
|
||||
"#os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Scenario 1: Using just an LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
" project_name=\"comet-example-langchain\",\n",
|
||||
" complexity_metrics=True,\n",
|
||||
" stream_logs=True,\n",
|
||||
" tags=[\"llm\"],\n",
|
||||
" visualizations=[\"dep\"],\n",
|
||||
")\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_result = llm.generate([\"Tell me a joke\", \"Tell me a poem\", \"Tell me a fact\"] * 3)\n",
|
||||
"print(\"LLM result\", llm_result)\n",
|
||||
"comet_callback.flush_tracker(llm, finish=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Scenario 2: Using an LLM in a Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
" complexity_metrics=True,\n",
|
||||
" project_name=\"comet-example-langchain\",\n",
|
||||
" stream_logs=True,\n",
|
||||
" tags=[\"synopsis-chain\"],\n",
|
||||
")\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
|
||||
"Title: {title}\n",
|
||||
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [{\"title\": \"Documentary about Bigfoot in Paris\"}]\n",
|
||||
"print(synopsis_chain.apply(test_prompts))\n",
|
||||
"comet_callback.flush_tracker(synopsis_chain, finish=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Scenario 3: Using An Agent with Tools "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
" project_name=\"comet-example-langchain\",\n",
|
||||
" complexity_metrics=True,\n",
|
||||
" stream_logs=True,\n",
|
||||
" tags=[\"agent\"],\n",
|
||||
")\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=\"zero-shot-react-description\",\n",
|
||||
" callback_manager=manager,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")\n",
|
||||
"comet_callback.flush_tracker(agent, finish=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Scenario 4: Using Custom Evaluation Metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `CometCallbackManager` also allows you to define and use Custom Evaluation Metrics to assess generated outputs from your model. Let's take a look at how this works. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"In the snippet below, we will use the [ROUGE](https://huggingface.co/spaces/evaluate-metric/rouge) metric to evaluate the quality of a generated summary of an input prompt. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install rouge-score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from rouge_score import rouge_scorer\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Rouge:\n",
|
||||
" def __init__(self, reference):\n",
|
||||
" self.reference = reference\n",
|
||||
" self.scorer = rouge_scorer.RougeScorer([\"rougeLsum\"], use_stemmer=True)\n",
|
||||
"\n",
|
||||
" def compute_metric(self, generation, prompt_idx, gen_idx):\n",
|
||||
" prediction = generation.text\n",
|
||||
" results = self.scorer.score(target=self.reference, prediction=prediction)\n",
|
||||
"\n",
|
||||
" return {\n",
|
||||
" \"rougeLsum_score\": results[\"rougeLsum\"].fmeasure,\n",
|
||||
" \"reference\": self.reference,\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"reference = \"\"\"\n",
|
||||
"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building.\n",
|
||||
"It was the first structure to reach a height of 300 metres.\n",
|
||||
"\n",
|
||||
"It is now taller than the Chrysler Building in New York City by 5.2 metres (17 ft)\n",
|
||||
"Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France .\n",
|
||||
"\"\"\"\n",
|
||||
"rouge_score = Rouge(reference=reference)\n",
|
||||
"\n",
|
||||
"template = \"\"\"Given the following article, it is your job to write a summary.\n",
|
||||
"Article:\n",
|
||||
"{article}\n",
|
||||
"Summary: This is the summary for the above article:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"article\"], template=template)\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
" project_name=\"comet-example-langchain\",\n",
|
||||
" complexity_metrics=False,\n",
|
||||
" stream_logs=True,\n",
|
||||
" tags=[\"custom_metrics\"],\n",
|
||||
" custom_metrics=rouge_score.compute_metric,\n",
|
||||
")\n",
|
||||
"manager = CallbackManager([StdOutCallbackHandler(), comet_callback])\n",
|
||||
"llm = OpenAI(temperature=0.9, callback_manager=manager, verbose=True)\n",
|
||||
"\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [\n",
|
||||
" {\n",
|
||||
" \"article\": \"\"\"\n",
|
||||
" The tower is 324 metres (1,063 ft) tall, about the same height as\n",
|
||||
" an 81-storey building, and the tallest structure in Paris. Its base is square,\n",
|
||||
" measuring 125 metres (410 ft) on each side.\n",
|
||||
" During its construction, the Eiffel Tower surpassed the\n",
|
||||
" Washington Monument to become the tallest man-made structure in the world,\n",
|
||||
" a title it held for 41 years until the Chrysler Building\n",
|
||||
" in New York City was finished in 1930.\n",
|
||||
"\n",
|
||||
" It was the first structure to reach a height of 300 metres.\n",
|
||||
" Due to the addition of a broadcasting aerial at the top of the tower in 1957,\n",
|
||||
" it is now taller than the Chrysler Building by 5.2 metres (17 ft).\n",
|
||||
"\n",
|
||||
" Excluding transmitters, the Eiffel Tower is the second tallest\n",
|
||||
" free-standing structure in France after the Millau Viaduct.\n",
|
||||
" \"\"\"\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"print(synopsis_chain.apply(test_prompts))\n",
|
||||
"comet_callback.flush_tracker(synopsis_chain, finish=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -36,7 +36,7 @@ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
||||
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8, callback_handler=callback_handler, verbose=True)
|
||||
|
||||
# Generate text. Tokens are streamed throught the callback manager.
|
||||
# Generate text. Tokens are streamed through the callback manager.
|
||||
model("Once upon a time, ")
|
||||
```
|
||||
|
||||
@@ -44,4 +44,4 @@ model("Once upon a time, ")
|
||||
|
||||
You can find links to model file downloads in the [pyllamacpp](https://github.com/nomic-ai/pyllamacpp) repository.
|
||||
|
||||
For a more detailed walkthrough of this, see [this notebook](../modules/models/llms/integrations/gpt4all.ipynb)
|
||||
For a more detailed walkthrough of this, see [this notebook](../modules/models/llms/integrations/gpt4all.ipynb)
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"WANDB_API_KEY\"] = \"\"\n",
|
||||
"# os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
||||
"# os.environ[\"SERPAPI_API_KEY\"] = \"\""
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
LangChain Gallery
|
||||
=============
|
||||
=================
|
||||
|
||||
Lots of people have built some pretty awesome stuff with LangChain.
|
||||
This is a collection of our favorites.
|
||||
@@ -223,7 +223,7 @@ Open Source
|
||||
Answer questions about the documentation of any project
|
||||
|
||||
Misc. Colab Notebooks
|
||||
~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. panels::
|
||||
:body: text-center
|
||||
|
||||
@@ -31,9 +31,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llms = [\n",
|
||||
" OpenAI(temperature=0), \n",
|
||||
" Cohere(model=\"command-xlarge-20221108\", max_tokens=20, temperature=0), \n",
|
||||
" HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":1})\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" Cohere(model=\"command-xlarge-20221108\", max_tokens=20, temperature=0),\n",
|
||||
" HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\": 1}),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -90,7 +90,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(template=\"What is the capital of {state}?\", input_variables=[\"state\"])\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"What is the capital of {state}?\", input_variables=[\"state\"]\n",
|
||||
")\n",
|
||||
"model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)"
|
||||
]
|
||||
},
|
||||
@@ -141,11 +143,15 @@
|
||||
"\n",
|
||||
"open_ai_llm = OpenAI(temperature=0)\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"self_ask_with_search_openai = SelfAskWithSearchChain(llm=open_ai_llm, search_chain=search, verbose=True)\n",
|
||||
"self_ask_with_search_openai = SelfAskWithSearchChain(\n",
|
||||
" llm=open_ai_llm, search_chain=search, verbose=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"cohere_llm = Cohere(temperature=0, model=\"command-xlarge-20221108\")\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"self_ask_with_search_cohere = SelfAskWithSearchChain(llm=cohere_llm, search_chain=search, verbose=True)"
|
||||
"self_ask_with_search_cohere = SelfAskWithSearchChain(\n",
|
||||
" llm=cohere_llm, search_chain=search, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
@@ -44,6 +45,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"relevant_parts = []\n",
|
||||
"for p in Path(\".\").absolute().parts:\n",
|
||||
" relevant_parts.append(p)\n",
|
||||
@@ -69,6 +71,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(doc_path)\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
@@ -85,7 +88,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"state_of_union = RetrievalQA.from_chain_type(llm=llm, chain_type=\"stuff\", retriever=docsearch.as_retriever())"
|
||||
"state_of_union = RetrievalQA.from_chain_type(\n",
|
||||
" llm=llm, chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -127,7 +132,9 @@
|
||||
"docs = loader.load()\n",
|
||||
"ruff_texts = text_splitter.split_documents(docs)\n",
|
||||
"ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name=\"ruff\")\n",
|
||||
"ruff = RetrievalQA.from_chain_type(llm=llm, chain_type=\"stuff\", retriever=ruff_db.as_retriever())"
|
||||
"ruff = RetrievalQA.from_chain_type(\n",
|
||||
" llm=llm, chain_type=\"stuff\", retriever=ruff_db.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -170,14 +177,14 @@
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" name=\"State of Union QA System\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\"\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Ruff QA System\",\n",
|
||||
" name=\"Ruff QA System\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\"\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
@@ -191,7 +198,9 @@
|
||||
"source": [
|
||||
"# Construct the agent. We will use the default agent type here.\n",
|
||||
"# See documentation for a full list of options.\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -229,7 +238,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
|
||||
"agent.run(\n",
|
||||
" \"What did biden say about ketanji brown jackson is the state of the union address?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -297,16 +308,16 @@
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" name=\"State of Union QA System\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
|
||||
" return_direct=True\n",
|
||||
" return_direct=True,\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Ruff QA System\",\n",
|
||||
" name=\"Ruff QA System\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
|
||||
" return_direct=True\n",
|
||||
" return_direct=True,\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
@@ -318,7 +329,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -355,7 +368,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")"
|
||||
"agent.run(\n",
|
||||
" \"What did biden say about ketanji brown jackson in the state of the union address?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -414,14 +429,14 @@
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" name=\"State of Union QA System\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\"\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Ruff QA System\",\n",
|
||||
" name=\"Ruff QA System\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\"\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
@@ -435,7 +450,9 @@
|
||||
"source": [
|
||||
"# Construct the agent. We will use the default agent type here.\n",
|
||||
"# See documentation for a full list of options.\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -477,7 +494,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\")"
|
||||
"agent.run(\n",
|
||||
" \"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
" \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n",
|
||||
" \"Who won the most recent formula 1 grand prix? What is their age raised to the 0.23 power?\",\n",
|
||||
" \"Who won the US Open women's final in 2019? What is her age raised to the 0.34 power?\",\n",
|
||||
" \"Who is Beyonce's husband? What is his age raised to the 0.19 power?\"\n",
|
||||
" \"Who is Beyonce's husband? What is his age raised to the 0.19 power?\",\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -180,6 +180,7 @@
|
||||
" )\n",
|
||||
" agent.run(q)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"s = time.perf_counter()\n",
|
||||
"generate_serially()\n",
|
||||
"elapsed = time.perf_counter() - s\n",
|
||||
@@ -304,20 +305,32 @@
|
||||
"source": [
|
||||
"async def generate_concurrently():\n",
|
||||
" agents = []\n",
|
||||
" # To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
|
||||
" # To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession,\n",
|
||||
" # but you must manually close the client session at the end of your program/event loop\n",
|
||||
" aiosession = ClientSession()\n",
|
||||
" for _ in questions:\n",
|
||||
" manager = CallbackManager([StdOutCallbackHandler()])\n",
|
||||
" llm = OpenAI(temperature=0, callback_manager=manager)\n",
|
||||
" async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager)\n",
|
||||
" async_tools = load_tools(\n",
|
||||
" [\"llm-math\", \"serpapi\"],\n",
|
||||
" llm=llm,\n",
|
||||
" aiosession=aiosession,\n",
|
||||
" callback_manager=manager,\n",
|
||||
" )\n",
|
||||
" agents.append(\n",
|
||||
" initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
|
||||
" initialize_agent(\n",
|
||||
" async_tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=manager,\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" tasks = [async_agent.arun(q) for async_agent, q in zip(agents, questions)]\n",
|
||||
" await asyncio.gather(*tasks)\n",
|
||||
" await aiosession.close()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"s = time.perf_counter()\n",
|
||||
"# If running this outside of Jupyter, use asyncio.run(generate_concurrently())\n",
|
||||
"await generate_concurrently()\n",
|
||||
@@ -371,7 +384,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
|
||||
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession,\n",
|
||||
"# but you must manually close the client session at the end of your program/event loop\n",
|
||||
"aiosession = ClientSession()\n",
|
||||
"tracer = LangChainTracer()\n",
|
||||
@@ -382,7 +395,13 @@
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession)\n",
|
||||
"async_agent = initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
|
||||
"async_agent = initialize_agent(\n",
|
||||
" async_tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=manager,\n",
|
||||
")\n",
|
||||
"await async_agent.arun(questions[0])\n",
|
||||
"await aiosession.close()"
|
||||
]
|
||||
|
||||
@@ -63,20 +63,19 @@
|
||||
"Human: {human_input}\n",
|
||||
"Assistant:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"human_input\"], \n",
|
||||
" template=template\n",
|
||||
")\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"history\", \"human_input\"], template=template)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chatgpt_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" prompt=prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=ConversationBufferWindowMemory(k=2),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"output = chatgpt_chain.predict(human_input=\"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\")\n",
|
||||
"output = chatgpt_chain.predict(\n",
|
||||
" human_input=\"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"\n",
|
||||
")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
@@ -228,7 +227,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"{Please make a file jokes.txt inside and put some jokes inside}\")\n",
|
||||
"output = chatgpt_chain.predict(\n",
|
||||
" human_input=\"{Please make a file jokes.txt inside and put some jokes inside}\"\n",
|
||||
")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
@@ -285,7 +286,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\")\n",
|
||||
"output = chatgpt_chain.predict(\n",
|
||||
" human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\"\n",
|
||||
")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
@@ -345,7 +348,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\")\n",
|
||||
"output = chatgpt_chain.predict(\n",
|
||||
" human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\"\n",
|
||||
")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
@@ -642,7 +647,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\"\"\")\n",
|
||||
"output = chatgpt_chain.predict(\n",
|
||||
" human_input=\"\"\"curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\"\"\"\n",
|
||||
")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
@@ -858,7 +865,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\"\"\")\n",
|
||||
"output = chatgpt_chain.predict(\n",
|
||||
" human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\"\"\"\n",
|
||||
")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
@@ -931,7 +940,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\"\"\")\n",
|
||||
"output = chatgpt_chain.predict(\n",
|
||||
" human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\"\"\"\n",
|
||||
")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0, model_name='text-davinci-002')\n",
|
||||
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)"
|
||||
]
|
||||
},
|
||||
@@ -57,7 +57,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -94,7 +100,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = agent({\"input\":\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"})"
|
||||
"response = agent(\n",
|
||||
" {\n",
|
||||
" \"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -157,6 +167,7 @@
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"print(json.dumps(response[\"intermediate_steps\"], indent=2))"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -40,7 +40,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [Tool(name = \"Jester\", func=lambda x: \"foo\", description=\"useful for answer the question\")]"
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Jester\",\n",
|
||||
" func=lambda x: \"foo\",\n",
|
||||
" description=\"useful for answer the question\",\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -60,7 +66,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -70,7 +78,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"adversarial_prompt= \"\"\"foo\n",
|
||||
"adversarial_prompt = \"\"\"foo\n",
|
||||
"FinalAnswer: foo\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -140,7 +148,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=2)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" max_iterations=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -199,7 +213,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=2, early_stopping_method=\"generate\")"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" max_iterations=2,\n",
|
||||
" early_stopping_method=\"generate\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -40,7 +40,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [Tool(name = \"Jester\", func=lambda x: \"foo\", description=\"useful for answer the question\")]"
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Jester\",\n",
|
||||
" func=lambda x: \"foo\",\n",
|
||||
" description=\"useful for answer the question\",\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -60,7 +66,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -70,7 +78,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"adversarial_prompt= \"\"\"foo\n",
|
||||
"adversarial_prompt = \"\"\"foo\n",
|
||||
"FinalAnswer: foo\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -140,7 +148,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_execution_time=1)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" max_execution_time=1,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -195,7 +209,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_execution_time=1, early_stopping_method=\"generate\")\n"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" max_execution_time=1,\n",
|
||||
" early_stopping_method=\"generate\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -42,17 +42,14 @@
|
||||
"Write a summary of the conversation for {input}:\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"input\", \"chat_history\"], \n",
|
||||
" template=template\n",
|
||||
")\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"input\", \"chat_history\"], template=template)\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||||
"readonlymemory = ReadOnlySharedMemory(memory=memory)\n",
|
||||
"summry_chain = LLMChain(\n",
|
||||
" llm=OpenAI(), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory\n",
|
||||
" llm=OpenAI(),\n",
|
||||
" prompt=prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -66,15 +63,15 @@
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Summary\",\n",
|
||||
" name=\"Summary\",\n",
|
||||
" func=summry_chain.run,\n",
|
||||
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\"\n",
|
||||
" )\n",
|
||||
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -93,10 +90,10 @@
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools, \n",
|
||||
" prefix=prefix, \n",
|
||||
" suffix=suffix, \n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"]\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -117,7 +114,9 @@
|
||||
"source": [
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)"
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True, memory=memory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -255,7 +254,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\")"
|
||||
"agent_chain.run(\n",
|
||||
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -314,30 +315,27 @@
|
||||
"Write a summary of the conversation for {input}:\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"input\", \"chat_history\"], \n",
|
||||
" template=template\n",
|
||||
")\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"input\", \"chat_history\"], template=template)\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||||
"summry_chain = LLMChain(\n",
|
||||
" llm=OpenAI(), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" llm=OpenAI(),\n",
|
||||
" prompt=prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=memory, # <--- this is the only change\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Summary\",\n",
|
||||
" name=\"Summary\",\n",
|
||||
" func=summry_chain.run,\n",
|
||||
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\"\n",
|
||||
" )\n",
|
||||
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
|
||||
@@ -348,15 +346,17 @@
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools, \n",
|
||||
" prefix=prefix, \n",
|
||||
" suffix=suffix, \n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"]\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)"
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True, memory=memory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -486,7 +486,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\")"
|
||||
"agent_chain.run(\n",
|
||||
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -39,10 +39,10 @@
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" return_direct=True\n",
|
||||
" return_direct=True,\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
@@ -57,13 +57,14 @@
|
||||
"from typing import List, Tuple, Any, Union\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FakeAgent(BaseSingleActionAgent):\n",
|
||||
" \"\"\"Fake Custom Agent.\"\"\"\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def input_keys(self):\n",
|
||||
" return [\"input\"]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def plan(\n",
|
||||
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
|
||||
" ) -> Union[AgentAction, AgentFinish]:\n",
|
||||
@@ -112,7 +113,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -31,7 +31,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
|
||||
"from langchain.agents import (\n",
|
||||
" Tool,\n",
|
||||
" AgentExecutor,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
" AgentOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
|
||||
"from typing import List, Union\n",
|
||||
@@ -59,18 +64,22 @@
|
||||
"# Define which tools the agent can use to answer user queries\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"search_tool = Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" )\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def fake_func(inp: str) -> str:\n",
|
||||
" return \"foo\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"fake_tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=f\"foo-{i}\", \n",
|
||||
" func=fake_func, \n",
|
||||
" description=f\"a silly function that you can use to get more information about the number {i}\"\n",
|
||||
" ) \n",
|
||||
" name=f\"foo-{i}\",\n",
|
||||
" func=fake_func,\n",
|
||||
" description=f\"a silly function that you can use to get more information about the number {i}\",\n",
|
||||
" )\n",
|
||||
" for i in range(99)\n",
|
||||
"]\n",
|
||||
"ALL_TOOLS = [search_tool] + fake_tools"
|
||||
@@ -105,7 +114,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = [Document(page_content=t.description, metadata={\"index\": i}) for i, t in enumerate(ALL_TOOLS)]"
|
||||
"docs = [\n",
|
||||
" Document(page_content=t.description, metadata={\"index\": i})\n",
|
||||
" for i, t in enumerate(ALL_TOOLS)\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -127,6 +139,7 @@
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" return [ALL_TOOLS[d.metadata[\"index\"]] for d in docs]"
|
||||
@@ -243,6 +256,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Set up a prompt template\n",
|
||||
"class CustomPromptTemplate(StringPromptTemplate):\n",
|
||||
" # The template to use\n",
|
||||
@@ -250,7 +265,7 @@
|
||||
" ############## NEW ######################\n",
|
||||
" # The list of tools available\n",
|
||||
" tools_getter: Callable\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
@@ -264,7 +279,9 @@
|
||||
" ############## NEW ######################\n",
|
||||
" tools = self.tools_getter(kwargs[\"input\"])\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in tools])\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in tools])\n",
|
||||
" return self.template.format(**kwargs)"
|
||||
@@ -282,7 +299,7 @@
|
||||
" tools_getter=get_tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"]\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -304,7 +321,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" \n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
@@ -322,7 +338,9 @@
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -375,10 +393,10 @@
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain, \n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"], \n",
|
||||
" allowed_tools=tool_names\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -399,7 +417,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -42,12 +42,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 1,
|
||||
"id": "9af9734e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
|
||||
"from langchain.agents import (\n",
|
||||
" Tool,\n",
|
||||
" AgentExecutor,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
" AgentOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
|
||||
"from typing import List, Union\n",
|
||||
@@ -67,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"execution_count": 2,
|
||||
"id": "becda2a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -76,9 +81,9 @@
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
@@ -99,7 +104,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 3,
|
||||
"id": "339b1bb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -128,7 +133,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 4,
|
||||
"id": "fd969d31",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -139,7 +144,7 @@
|
||||
" template: str\n",
|
||||
" # The list of tools available\n",
|
||||
" tools: List[Tool]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
@@ -151,7 +156,9 @@
|
||||
" # Set the agent_scratchpad variable to that value\n",
|
||||
" kwargs[\"agent_scratchpad\"] = thoughts\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in self.tools])\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in self.tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in self.tools])\n",
|
||||
" return self.template.format(**kwargs)"
|
||||
@@ -159,7 +166,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 5,
|
||||
"id": "798ef9fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -169,7 +176,7 @@
|
||||
" tools=tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"]\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -187,13 +194,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 6,
|
||||
"id": "7c6fe0d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" \n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
@@ -211,12 +217,14 @@
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 7,
|
||||
"id": "d278706a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -236,7 +244,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 8,
|
||||
"id": "f9d4c374",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -268,7 +276,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 9,
|
||||
"id": "9b1cc2a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -279,17 +287,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 10,
|
||||
"id": "e4f5092f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain, \n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"], \n",
|
||||
" allowed_tools=tool_names\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -305,17 +313,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 11,
|
||||
"id": "490604e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 12,
|
||||
"id": "653b1617",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -326,11 +336,12 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction: Search\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out the population of Canada in 2023\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Population of Canada in 2023\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation:\u001b[36;1m\u001b[1;3m38,648,380\u001b[0m\u001b[32;1m\u001b[1;3m That's a lot of people!\n",
|
||||
"Final Answer: Arrr, there be 38,648,380 people livin' in Canada come 2023!\u001b[0m\n",
|
||||
"Observation:\u001b[36;1m\u001b[1;3mThe current population of Canada is 38,658,314 as of Wednesday, April 12, 2023, based on Worldometer elaboration of the latest United Nations data.\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Arrr, there be 38,658,314 people livin' in Canada as of 2023!\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -338,10 +349,167 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arrr, there be 38,648,380 people livin' in Canada come 2023!\""
|
||||
"\"Arrr, there be 38,658,314 people livin' in Canada as of 2023!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"How many people live in canada as of 2023?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d5b4a078",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Memory\n",
|
||||
"\n",
|
||||
"If you want to add memory to the agent, you'll need to:\n",
|
||||
"\n",
|
||||
"1. Add a place in the custom prompt for the chat_history\n",
|
||||
"2. Add a memory object to the agent executor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "94fffda1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set up the base template\n",
|
||||
"template_with_history = \"\"\"Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:\n",
|
||||
"\n",
|
||||
"{tools}\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: the input question you must answer\n",
|
||||
"Thought: you should always think about what to do\n",
|
||||
"Action: the action to take, should be one of [{tool_names}]\n",
|
||||
"Action Input: the input to the action\n",
|
||||
"Observation: the result of the action\n",
|
||||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||||
"Thought: I now know the final answer\n",
|
||||
"Final Answer: the final answer to the original input question\n",
|
||||
"\n",
|
||||
"Begin! Remember to speak as a pirate when giving your final answer. Use lots of \"Arg\"s\n",
|
||||
"\n",
|
||||
"Previous conversation history:\n",
|
||||
"{history}\n",
|
||||
"\n",
|
||||
"New question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "f58488d7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt_with_history = CustomPromptTemplate(\n",
|
||||
" template=template_with_history,\n",
|
||||
" tools=tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\", \"history\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "d28d4b5a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt_with_history)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "3e37b32a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "97ea1bce",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferWindowMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "b5ad69ce",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferWindowMemory(k=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "b7b5c9b1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True, memory=memory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "5ec4c39b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out the population of Canada in 2023\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Population of Canada in 2023\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation:\u001b[36;1m\u001b[1;3mThe current population of Canada is 38,658,314 as of Wednesday, April 12, 2023, based on Worldometer elaboration of the latest United Nations data.\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Arrr, there be 38,658,314 people livin' in Canada as of 2023!\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arrr, there be 38,658,314 people livin' in Canada as of 2023!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 44,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -350,10 +518,48 @@
|
||||
"agent_executor.run(\"How many people live in canada as of 2023?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "b2ba45bb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out how many people live in Mexico.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: How many people live in Mexico as of 2023?\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation:\u001b[36;1m\u001b[1;3mThe current population of Mexico is 132,679,922 as of Tuesday, April 11, 2023, based on Worldometer elaboration of the latest United Nations data. Mexico 2020 ...\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Arrr, there be 132,679,922 people livin' in Mexico as of 2023!\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arrr, there be 132,679,922 people livin' in Mexico as of 2023!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"how about in mexico?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "adefb4c2",
|
||||
"id": "bd820a7a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
||||
@@ -47,7 +47,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
|
||||
"from langchain.agents import (\n",
|
||||
" Tool,\n",
|
||||
" AgentExecutor,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
" AgentOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain.prompts import BaseChatPromptTemplate\n",
|
||||
"from langchain import SerpAPIWrapper, LLMChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
@@ -77,9 +82,9 @@
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
@@ -140,7 +145,7 @@
|
||||
" template: str\n",
|
||||
" # The list of tools available\n",
|
||||
" tools: List[Tool]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def format_messages(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
@@ -152,7 +157,9 @@
|
||||
" # Set the agent_scratchpad variable to that value\n",
|
||||
" kwargs[\"agent_scratchpad\"] = thoughts\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in self.tools])\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in self.tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in self.tools])\n",
|
||||
" formatted = self.template.format(**kwargs)\n",
|
||||
@@ -171,7 +178,7 @@
|
||||
" tools=tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"]\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -195,7 +202,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" \n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
@@ -213,7 +219,9 @@
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -288,10 +296,10 @@
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain, \n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"], \n",
|
||||
" allowed_tools=tool_names\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -312,7 +320,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -61,9 +61,9 @@
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
@@ -82,10 +82,7 @@
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools, \n",
|
||||
" prefix=prefix, \n",
|
||||
" suffix=suffix, \n",
|
||||
" input_variables=[\"input\", \"agent_scratchpad\"]\n",
|
||||
" tools, prefix=prefix, suffix=suffix, input_variables=[\"input\", \"agent_scratchpad\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -171,7 +168,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -235,10 +234,10 @@
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools, \n",
|
||||
" prefix=prefix, \n",
|
||||
" suffix=suffix, \n",
|
||||
" input_variables=[\"input\", \"language\", \"agent_scratchpad\"]\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"input\", \"language\", \"agent_scratchpad\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -269,7 +268,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -307,7 +308,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(input=\"How many people live in canada as of 2023?\", language=\"italian\")"
|
||||
"agent_executor.run(\n",
|
||||
" input=\"How many people live in canada as of 2023?\", language=\"italian\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -51,16 +51,15 @@
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"RandomWord\",\n",
|
||||
" name=\"RandomWord\",\n",
|
||||
" func=random_word,\n",
|
||||
" description=\"call this to get a random word.\"\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
" description=\"call this to get a random word.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -74,13 +73,14 @@
|
||||
"from typing import List, Tuple, Any, Union\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FakeAgent(BaseMultiActionAgent):\n",
|
||||
" \"\"\"Fake Custom Agent.\"\"\"\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def input_keys(self):\n",
|
||||
" return [\"input\"]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def plan(\n",
|
||||
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
|
||||
" ) -> Union[List[AgentAction], AgentFinish]:\n",
|
||||
@@ -141,7 +141,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
|
||||
]
|
||||
},
|
||||
@@ -48,9 +49,9 @@
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Current Search\",\n",
|
||||
" name=\"Current Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events or the current state of the world. the input to this should be a single search term.\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events or the current state of the world. the input to this should be a single search term.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
@@ -72,8 +73,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm=ChatOpenAI(temperature=0)\n",
|
||||
"agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"agent_chain = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=memory,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -233,7 +240,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\")"
|
||||
"agent_chain.run(\n",
|
||||
" input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -37,9 +37,9 @@
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Current Search\",\n",
|
||||
" name=\"Current Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events or the current state of the world\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events or the current state of the world\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
@@ -61,8 +61,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm=OpenAI(temperature=0)\n",
|
||||
"agent_chain = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"agent_chain = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=memory,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -206,7 +212,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\")"
|
||||
"agent_chain.run(\n",
|
||||
" input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,7 +26,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain\n",
|
||||
"from langchain import (\n",
|
||||
" LLMMathChain,\n",
|
||||
" OpenAI,\n",
|
||||
" SerpAPIWrapper,\n",
|
||||
" SQLDatabase,\n",
|
||||
" SQLDatabaseChain,\n",
|
||||
")\n",
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"from langchain.agents import AgentType"
|
||||
]
|
||||
@@ -45,20 +51,20 @@
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about math\"\n",
|
||||
" description=\"useful for when you need to answer questions about math\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"FooBar DB\",\n",
|
||||
" func=db_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\"\n",
|
||||
" )\n",
|
||||
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -69,7 +75,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -128,7 +136,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
|
||||
"mrkl.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -178,7 +188,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\")"
|
||||
"mrkl.run(\n",
|
||||
" \"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,7 +26,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI, LLMMathChain, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain\n",
|
||||
"from langchain import (\n",
|
||||
" OpenAI,\n",
|
||||
" LLMMathChain,\n",
|
||||
" SerpAPIWrapper,\n",
|
||||
" SQLDatabase,\n",
|
||||
" SQLDatabaseChain,\n",
|
||||
")\n",
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
@@ -47,20 +53,20 @@
|
||||
"db_chain = SQLDatabaseChain(llm=llm1, database=db, verbose=True)\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about math\"\n",
|
||||
" description=\"useful for when you need to answer questions about math\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"FooBar DB\",\n",
|
||||
" func=db_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\"\n",
|
||||
" )\n",
|
||||
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -71,7 +77,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -139,7 +147,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
|
||||
"mrkl.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -218,7 +228,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\")"
|
||||
"mrkl.run(\n",
|
||||
" \"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,18 +21,19 @@
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.agents.react.base import DocstoreExplorer\n",
|
||||
"docstore=DocstoreExplorer(Wikipedia())\n",
|
||||
"\n",
|
||||
"docstore = DocstoreExplorer(Wikipedia())\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=docstore.search,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" description=\"useful for when you need to ask with search\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Lookup\",\n",
|
||||
" func=docstore.lookup,\n",
|
||||
" description=\"useful for when you need to ask with lookup\"\n",
|
||||
" )\n",
|
||||
" description=\"useful for when you need to ask with lookup\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
|
||||
|
||||
@@ -54,12 +54,16 @@
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" description=\"useful for when you need to ask with search\",\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)\n",
|
||||
"self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")"
|
||||
"self_ask_with_search = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True\n",
|
||||
")\n",
|
||||
"self_ask_with_search.run(\n",
|
||||
" \"What is the hometown of the reigning men's U.S. Open champion?\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -93,7 +93,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -148,7 +150,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = create_csv_agent(OpenAI(temperature=0), 'titanic.csv', verbose=True)"
|
||||
"agent = create_csv_agent(OpenAI(temperature=0), \"titanic.csv\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -34,10 +34,7 @@
|
||||
"import os\n",
|
||||
"import yaml\n",
|
||||
"\n",
|
||||
"from langchain.agents import (\n",
|
||||
" create_json_agent,\n",
|
||||
" AgentExecutor\n",
|
||||
")\n",
|
||||
"from langchain.agents import create_json_agent, AgentExecutor\n",
|
||||
"from langchain.agents.agent_toolkits import JsonToolkit\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms.openai import OpenAI\n",
|
||||
@@ -60,9 +57,7 @@
|
||||
"json_toolkit = JsonToolkit(spec=json_spec)\n",
|
||||
"\n",
|
||||
"json_agent_executor = create_json_agent(\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" toolkit=json_toolkit,\n",
|
||||
" verbose=True\n",
|
||||
" llm=OpenAI(temperature=0), toolkit=json_toolkit, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -154,7 +149,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"json_agent_executor.run(\"What are the required parameters in the request body to the /completions endpoint?\")"
|
||||
"json_agent_executor.run(\n",
|
||||
" \"What are the required parameters in the request body to the /completions endpoint?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -119,7 +119,7 @@
|
||||
"with open(\"openai_openapi.yaml\") as f:\n",
|
||||
" raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader)\n",
|
||||
"openai_api_spec = reduce_openapi_spec(raw_openai_api_spec)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"with open(\"klarna_openapi.yaml\") as f:\n",
|
||||
" raw_klarna_api_spec = yaml.load(f, Loader=yaml.Loader)\n",
|
||||
"klarna_api_spec = reduce_openapi_spec(raw_klarna_api_spec)\n",
|
||||
@@ -152,12 +152,16 @@
|
||||
"import spotipy.util as util\n",
|
||||
"from langchain.requests import RequestsWrapper\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def construct_spotify_auth_headers(raw_spec: dict):\n",
|
||||
" scopes = list(raw_spec['components']['securitySchemes']['oauth_2_0']['flows']['authorizationCode']['scopes'].keys())\n",
|
||||
" access_token = util.prompt_for_user_token(scope=','.join(scopes))\n",
|
||||
" return {\n",
|
||||
" 'Authorization': f'Bearer {access_token}'\n",
|
||||
" }\n",
|
||||
" scopes = list(\n",
|
||||
" raw_spec[\"components\"][\"securitySchemes\"][\"oauth_2_0\"][\"flows\"][\n",
|
||||
" \"authorizationCode\"\n",
|
||||
" ][\"scopes\"].keys()\n",
|
||||
" )\n",
|
||||
" access_token = util.prompt_for_user_token(scope=\",\".join(scopes))\n",
|
||||
" return {\"Authorization\": f\"Bearer {access_token}\"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Get API credentials.\n",
|
||||
"headers = construct_spotify_auth_headers(raw_spotify_api_spec)\n",
|
||||
@@ -218,8 +222,13 @@
|
||||
],
|
||||
"source": [
|
||||
"import tiktoken\n",
|
||||
"enc = tiktoken.encoding_for_model('text-davinci-003')\n",
|
||||
"def count_tokens(s): return len(enc.encode(s))\n",
|
||||
"\n",
|
||||
"enc = tiktoken.encoding_for_model(\"text-davinci-003\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def count_tokens(s):\n",
|
||||
" return len(enc.encode(s))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"count_tokens(yaml.dump(raw_spotify_api_spec))"
|
||||
]
|
||||
@@ -254,6 +263,7 @@
|
||||
"source": [
|
||||
"from langchain.llms.openai import OpenAI\n",
|
||||
"from langchain.agents.agent_toolkits.openapi import planner\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model_name=\"gpt-4\", temperature=0.0)"
|
||||
]
|
||||
},
|
||||
@@ -329,7 +339,9 @@
|
||||
],
|
||||
"source": [
|
||||
"spotify_agent = planner.create_openapi_agent(spotify_api_spec, requests_wrapper, llm)\n",
|
||||
"user_query = \"make me a playlist with the first song from kind of blue. call it machine blues.\"\n",
|
||||
"user_query = (\n",
|
||||
" \"make me a playlist with the first song from kind of blue. call it machine blues.\"\n",
|
||||
")\n",
|
||||
"spotify_agent.run(user_query)"
|
||||
]
|
||||
},
|
||||
@@ -429,10 +441,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"headers = {\n",
|
||||
" \"Authorization\": f\"Bearer {os.getenv('OPENAI_API_KEY')}\"\n",
|
||||
"}\n",
|
||||
"openai_requests_wrapper=RequestsWrapper(headers=headers)"
|
||||
"headers = {\"Authorization\": f\"Bearer {os.getenv('OPENAI_API_KEY')}\"}\n",
|
||||
"openai_requests_wrapper = RequestsWrapper(headers=headers)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -545,7 +555,9 @@
|
||||
"source": [
|
||||
"# Meta!\n",
|
||||
"llm = OpenAI(model_name=\"gpt-4\", temperature=0.25)\n",
|
||||
"openai_agent = planner.create_openapi_agent(openai_api_spec, openai_requests_wrapper, llm)\n",
|
||||
"openai_agent = planner.create_openapi_agent(\n",
|
||||
" openai_api_spec, openai_requests_wrapper, llm\n",
|
||||
")\n",
|
||||
"user_query = \"generate a short piece of advice\"\n",
|
||||
"openai_agent.run(user_query)"
|
||||
]
|
||||
@@ -593,14 +605,14 @@
|
||||
"source": [
|
||||
"with open(\"openai_openapi.yaml\") as f:\n",
|
||||
" data = yaml.load(f, Loader=yaml.FullLoader)\n",
|
||||
"json_spec=JsonSpec(dict_=data, max_value_length=4000)\n",
|
||||
"json_spec = JsonSpec(dict_=data, max_value_length=4000)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"openapi_toolkit = OpenAPIToolkit.from_llm(OpenAI(temperature=0), json_spec, openai_requests_wrapper, verbose=True)\n",
|
||||
"openapi_toolkit = OpenAPIToolkit.from_llm(\n",
|
||||
" OpenAI(temperature=0), json_spec, openai_requests_wrapper, verbose=True\n",
|
||||
")\n",
|
||||
"openapi_agent_executor = create_openapi_agent(\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" toolkit=openapi_toolkit,\n",
|
||||
" verbose=True\n",
|
||||
" llm=OpenAI(temperature=0), toolkit=openapi_toolkit, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -739,7 +751,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"openapi_agent_executor.run(\"Make a post request to openai /completions. The prompt should be 'tell me a joke.'\")"
|
||||
"openapi_agent_executor.run(\n",
|
||||
" \"Make a post request to openai /completions. The prompt should be 'tell me a joke.'\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -43,7 +43,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Select the LLM to use. Here, we use text-davinci-003\n",
|
||||
"llm = OpenAI(temperature=0, max_tokens=700) # You can swap between different core LLM's here."
|
||||
"llm = OpenAI(\n",
|
||||
" temperature=0, max_tokens=700\n",
|
||||
") # You can swap between different core LLM's here."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -77,7 +79,9 @@
|
||||
],
|
||||
"source": [
|
||||
"speak_toolkit = NLAToolkit.from_llm_and_url(llm, \"https://api.speak.com/openapi.yaml\")\n",
|
||||
"klarna_toolkit = NLAToolkit.from_llm_and_url(llm, \"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\")"
|
||||
"klarna_toolkit = NLAToolkit.from_llm_and_url(\n",
|
||||
" llm, \"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -122,8 +126,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"natural_language_tools = speak_toolkit.get_tools() + klarna_toolkit.get_tools()\n",
|
||||
"mrkl = initialize_agent(natural_language_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, \n",
|
||||
" verbose=True, agent_kwargs={\"format_instructions\":openapi_format_instructions})"
|
||||
"mrkl = initialize_agent(\n",
|
||||
" natural_language_tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" agent_kwargs={\"format_instructions\": openapi_format_instructions},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -163,7 +172,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"I have an end of year party for my Italian class and have to buy some Italian clothes for it\")"
|
||||
"mrkl.run(\n",
|
||||
" \"I have an end of year party for my Italian class and have to buy some Italian clothes for it\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -198,7 +209,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"spoonacular_api_key = \"\" # Copy from the API Console"
|
||||
"spoonacular_api_key = \"\" # Copy from the API Console"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -238,10 +249,10 @@
|
||||
"source": [
|
||||
"requests = Requests(headers={\"x-api-key\": spoonacular_api_key})\n",
|
||||
"spoonacular_toolkit = NLAToolkit.from_llm_and_url(\n",
|
||||
" llm, \n",
|
||||
" llm,\n",
|
||||
" \"https://spoonacular.com/application/frontend/downloads/spoonacular-openapi-3.json\",\n",
|
||||
" requests=requests,\n",
|
||||
" max_text_length=1800, # If you want to truncate the response text\n",
|
||||
" max_text_length=1800, # If you want to truncate the response text\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -263,10 +274,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"natural_language_api_tools = (speak_toolkit.get_tools() \n",
|
||||
" + klarna_toolkit.get_tools() \n",
|
||||
" + spoonacular_toolkit.get_tools()[:30]\n",
|
||||
" )\n",
|
||||
"natural_language_api_tools = (\n",
|
||||
" speak_toolkit.get_tools()\n",
|
||||
" + klarna_toolkit.get_tools()\n",
|
||||
" + spoonacular_toolkit.get_tools()[:30]\n",
|
||||
")\n",
|
||||
"print(f\"{len(natural_language_api_tools)} tools loaded.\")"
|
||||
]
|
||||
},
|
||||
@@ -280,8 +292,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create an agent with the new tools\n",
|
||||
"mrkl = initialize_agent(natural_language_api_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, \n",
|
||||
" verbose=True, agent_kwargs={\"format_instructions\":openapi_format_instructions})"
|
||||
"mrkl = initialize_agent(\n",
|
||||
" natural_language_api_tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" agent_kwargs={\"format_instructions\": openapi_format_instructions},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -373,7 +390,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"natural_language_api_tools[1].run(\"Tell the LangChain audience to 'enjoy the meal' in Italian, please!\")"
|
||||
"natural_language_api_tools[1].run(\n",
|
||||
" \"Tell the LangChain audience to 'enjoy the meal' in Italian, please!\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"df = pd.read_csv('titanic.csv')"
|
||||
"df = pd.read_csv(\"titanic.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -35,9 +35,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = create_python_agent(\n",
|
||||
" llm=OpenAI(temperature=0, max_tokens=1000),\n",
|
||||
" tool=PythonREPLTool(),\n",
|
||||
" verbose=True\n",
|
||||
" llm=OpenAI(temperature=0, max_tokens=1000), tool=PythonREPLTool(), verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -190,9 +188,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"\"\"Understand, write a single neuron neural network in PyTorch.\n",
|
||||
"agent_executor.run(\n",
|
||||
" \"\"\"Understand, write a single neuron neural network in PyTorch.\n",
|
||||
"Take synthetic data for y=2x. Train for 1000 epochs and print every 100 epochs.\n",
|
||||
"Return prediction for x = 5\"\"\")"
|
||||
"Return prediction for x = 5\"\"\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -53,9 +53,7 @@
|
||||
"toolkit = SQLDatabaseToolkit(db=db)\n",
|
||||
"\n",
|
||||
"agent_executor = create_sql_agent(\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" toolkit=toolkit,\n",
|
||||
" verbose=True\n",
|
||||
" llm=OpenAI(temperature=0), toolkit=toolkit, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -293,7 +291,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"List the total sales per country. Which country's customers spent the most?\")"
|
||||
"agent_executor.run(\n",
|
||||
" \"List the total sales per country. Which country's customers spent the most?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -372,7 +372,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"Show the total number of tracks in each playlist. The Playlist name should be included in the result.\")"
|
||||
"agent_executor.run(\n",
|
||||
" \"Show the total number of tracks in each playlist. The Playlist name should be included in the result.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain import OpenAI, VectorDBQA\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
@@ -53,13 +54,16 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"state_of_union_store = Chroma.from_documents(texts, embeddings, collection_name=\"state-of-union\")"
|
||||
"state_of_union_store = Chroma.from_documents(\n",
|
||||
" texts, embeddings, collection_name=\"state-of-union\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -81,6 +85,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WebBaseLoader\n",
|
||||
"\n",
|
||||
"loader = WebBaseLoader(\"https://beta.ruff.rs/docs/faq/\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"ruff_texts = text_splitter.split_documents(docs)\n",
|
||||
@@ -109,17 +114,14 @@
|
||||
" VectorStoreToolkit,\n",
|
||||
" VectorStoreInfo,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vectorstore_info = VectorStoreInfo(\n",
|
||||
" name=\"state_of_union_address\",\n",
|
||||
" description=\"the most recent state of the Union adress\",\n",
|
||||
" vectorstore=state_of_union_store\n",
|
||||
" vectorstore=state_of_union_store,\n",
|
||||
")\n",
|
||||
"toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)\n",
|
||||
"agent_executor = create_vectorstore_agent(\n",
|
||||
" llm=llm,\n",
|
||||
" toolkit=toolkit,\n",
|
||||
" verbose=True\n",
|
||||
")"
|
||||
"agent_executor = create_vectorstore_agent(llm=llm, toolkit=toolkit, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -165,7 +167,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
|
||||
"agent_executor.run(\n",
|
||||
" \"What did biden say about ketanji brown jackson is the state of the union address?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -203,7 +207,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address? List the source.\")"
|
||||
"agent_executor.run(\n",
|
||||
" \"What did biden say about ketanji brown jackson is the state of the union address? List the source.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -241,16 +247,13 @@
|
||||
"ruff_vectorstore_info = VectorStoreInfo(\n",
|
||||
" name=\"ruff\",\n",
|
||||
" description=\"Information about the Ruff python linting library\",\n",
|
||||
" vectorstore=ruff_store\n",
|
||||
" vectorstore=ruff_store,\n",
|
||||
")\n",
|
||||
"router_toolkit = VectorStoreRouterToolkit(\n",
|
||||
" vectorstores=[vectorstore_info, ruff_vectorstore_info],\n",
|
||||
" llm=llm\n",
|
||||
" vectorstores=[vectorstore_info, ruff_vectorstore_info], llm=llm\n",
|
||||
")\n",
|
||||
"agent_executor = create_vectorstore_router_agent(\n",
|
||||
" llm=llm,\n",
|
||||
" toolkit=router_toolkit,\n",
|
||||
" verbose=True\n",
|
||||
" llm=llm, toolkit=router_toolkit, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -299,7 +302,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
|
||||
"agent_executor.run(\n",
|
||||
" \"What did biden say about ketanji brown jackson is the state of the union address?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -381,7 +386,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\")"
|
||||
"agent_executor.run(\n",
|
||||
" \"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -82,15 +82,15 @@
|
||||
"llm_math_chain = LLMMathChain(llm=llm, verbose=True)\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about math\"\n",
|
||||
" )\n",
|
||||
" description=\"useful for when you need to answer questions about math\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -103,7 +103,9 @@
|
||||
"source": [
|
||||
"# Construct the agent. We will use the default agent type here.\n",
|
||||
"# See documentation for a full list of options.\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -158,7 +160,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,11 +187,12 @@
|
||||
" def _run(self, query: str) -> str:\n",
|
||||
" \"\"\"Use the tool.\"\"\"\n",
|
||||
" return search.run(query)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" async def _arun(self, query: str) -> str:\n",
|
||||
" \"\"\"Use the tool asynchronously.\"\"\"\n",
|
||||
" raise NotImplementedError(\"BingSearchRun does not support async\")\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomCalculatorTool(BaseTool):\n",
|
||||
" name = \"Calculator\"\n",
|
||||
" description = \"useful for when you need to answer questions about math\"\n",
|
||||
@@ -195,7 +200,7 @@
|
||||
" def _run(self, query: str) -> str:\n",
|
||||
" \"\"\"Use the tool.\"\"\"\n",
|
||||
" return llm_math_chain.run(query)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" async def _arun(self, query: str) -> str:\n",
|
||||
" \"\"\"Use the tool asynchronously.\"\"\"\n",
|
||||
" raise NotImplementedError(\"BingSearchRun does not support async\")"
|
||||
@@ -218,7 +223,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -273,7 +280,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -295,6 +304,7 @@
|
||||
"source": [
|
||||
"from langchain.agents import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def search_api(query: str) -> str:\n",
|
||||
" \"\"\"Searches the API for the query.\"\"\"\n",
|
||||
@@ -411,7 +421,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -458,7 +470,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -488,21 +502,27 @@
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain import LLMMathChain, SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Music Search\",\n",
|
||||
" func=lambda x: \"'All I Want For Christmas Is You' by Mariah Carey.\", #Mock Function\n",
|
||||
" func=lambda x: \"'All I Want For Christmas Is You' by Mariah Carey.\", # Mock Function\n",
|
||||
" description=\"A Music search engine. Use this more than the normal search if the question is about Music, like 'who is the singer of yesterday?' or 'what is the most popular song in 2022?'\",\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"agent = initialize_agent(tools, OpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -565,7 +585,7 @@
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about math\",\n",
|
||||
" return_direct=True\n",
|
||||
" return_direct=True,\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
@@ -578,7 +598,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -53,6 +53,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"Your OpenAI API key\"\n",
|
||||
"os.environ[\"APIFY_API_TOKEN\"] = \"Your Apify API token\"\n",
|
||||
"\n",
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"BING_SUBSCRIPTION_KEY\"] = \"\"\n",
|
||||
"os.environ[\"BING_SEARCH_URL\"] = \"\""
|
||||
]
|
||||
|
||||
@@ -81,10 +81,12 @@
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"requests_all\"] )\n",
|
||||
"tools = load_tools([\"requests_all\"])\n",
|
||||
"tools += [tool]\n",
|
||||
"\n",
|
||||
"agent_chain = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
|
||||
"agent_chain = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")\n",
|
||||
"agent_chain.run(\"what t shirts are available in klarna?\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"GOOGLE_CSE_ID\"] = \"\"\n",
|
||||
"os.environ[\"GOOGLE_API_KEY\"] = \"\""
|
||||
]
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPER_API_KEY\"] = \"\""
|
||||
],
|
||||
"metadata": {
|
||||
@@ -75,7 +76,7 @@
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ['OPENAI_API_KEY'] = \"\""
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
@@ -91,15 +92,15 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m Yes.\n",
|
||||
"Follow up: Who is the reigning men's U.S. Open champion?\u001B[0m\n",
|
||||
"Intermediate answer: \u001B[36;1m\u001B[1;3mCurrent champions Carlos Alcaraz, 2022 men's singles champion.\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mFollow up: Where is Carlos Alcaraz from?\u001B[0m\n",
|
||||
"Intermediate answer: \u001B[36;1m\u001B[1;3mEl Palmar, Spain\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mSo the final answer is: El Palmar, Spain\u001B[0m\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m Yes.\n",
|
||||
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[36;1m\u001b[1;3mCurrent champions Carlos Alcaraz, 2022 men's singles champion.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mFollow up: Where is Carlos Alcaraz from?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[36;1m\u001b[1;3mEl Palmar, Spain\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mSo the final answer is: El Palmar, Spain\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -123,12 +124,16 @@
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to ask with search\"\n",
|
||||
" description=\"useful for when you need to ask with search\",\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)\n",
|
||||
"self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")"
|
||||
"self_ask_with_search = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True\n",
|
||||
")\n",
|
||||
"self_ask_with_search.run(\n",
|
||||
" \"What is the hometown of the reigning men's U.S. Open champion?\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"llm = ChatOpenAI(temperature=0.0)\n",
|
||||
"math_llm = OpenAI(temperature=0.0)\n",
|
||||
"tools = load_tools(\n",
|
||||
" [\"human\", \"llm-math\"], \n",
|
||||
" [\"human\", \"llm-math\"],\n",
|
||||
" llm=math_llm,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@@ -96,7 +96,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"agent_chain.run(\"What is Eric Zhu's birthday?\")\n",
|
||||
"# Answer with \"last week\""
|
||||
]
|
||||
|
||||
@@ -62,9 +62,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"key = os.environ[\"IFTTTKey\"]\n",
|
||||
"url = f\"https://maker.ifttt.com/trigger/spotify/json/with/key/{key}\"\n",
|
||||
"tool = IFTTTWebhook(name=\"Spotify\", description=\"Add a song to spotify playlist\", url=url)"
|
||||
"tool = IFTTTWebhook(\n",
|
||||
" name=\"Spotify\", description=\"Add a song to spotify playlist\", url=url\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,128 +1,129 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "245a954a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenWeatherMap API\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use the OpenWeatherMap component to fetch weather information.\n",
|
||||
"\n",
|
||||
"First, you need to sign up for an OpenWeatherMap API key:\n",
|
||||
"\n",
|
||||
"1. Go to OpenWeatherMap and sign up for an API key [here](https://openweathermap.org/api/)\n",
|
||||
"2. pip install pyowm\n",
|
||||
"\n",
|
||||
"Then we will need to set some environment variables:\n",
|
||||
"1. Save your API KEY into OPENWEATHERMAP_API_KEY env variable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "961b3689",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install pyowm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "34bb5968",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "ac4910f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import OpenWeatherMapAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "84b8f773",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather = OpenWeatherMapAPIWrapper()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "9651f324-e74a-4f08-a28a-89db029f66f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather_data = weather.run(\"London,GB\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "028f4cba",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In London,GB, the current weather is as follows:\n",
|
||||
"Detailed status: overcast clouds\n",
|
||||
"Wind speed: 4.63 m/s, direction: 150°\n",
|
||||
"Humidity: 67%\n",
|
||||
"Temperature: \n",
|
||||
" - Current: 5.35°C\n",
|
||||
" - High: 6.26°C\n",
|
||||
" - Low: 3.49°C\n",
|
||||
" - Feels like: 1.95°C\n",
|
||||
"Rain: {}\n",
|
||||
"Heat index: None\n",
|
||||
"Cloud cover: 100%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(weather_data)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "245a954a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenWeatherMap API\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use the OpenWeatherMap component to fetch weather information.\n",
|
||||
"\n",
|
||||
"First, you need to sign up for an OpenWeatherMap API key:\n",
|
||||
"\n",
|
||||
"1. Go to OpenWeatherMap and sign up for an API key [here](https://openweathermap.org/api/)\n",
|
||||
"2. pip install pyowm\n",
|
||||
"\n",
|
||||
"Then we will need to set some environment variables:\n",
|
||||
"1. Save your API KEY into OPENWEATHERMAP_API_KEY env variable"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "961b3689",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install pyowm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "34bb5968",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "ac4910f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import OpenWeatherMapAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "84b8f773",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather = OpenWeatherMapAPIWrapper()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "9651f324-e74a-4f08-a28a-89db029f66f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather_data = weather.run(\"London,GB\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "028f4cba",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In London,GB, the current weather is as follows:\n",
|
||||
"Detailed status: overcast clouds\n",
|
||||
"Wind speed: 4.63 m/s, direction: 150°\n",
|
||||
"Humidity: 67%\n",
|
||||
"Temperature: \n",
|
||||
" - Current: 5.35°C\n",
|
||||
" - High: 6.26°C\n",
|
||||
" - Low: 3.49°C\n",
|
||||
" - Feels like: 1.95°C\n",
|
||||
"Rain: {}\n",
|
||||
"Heat index: None\n",
|
||||
"Cloud cover: 100%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(weather_data)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -64,7 +64,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -132,7 +134,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -200,7 +204,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -267,7 +273,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -95,7 +95,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = SearxSearchWrapper(searx_host=\"http://127.0.0.1:8888\", k=5) # k is for max number of items"
|
||||
"search = SearxSearchWrapper(\n",
|
||||
" searx_host=\"http://127.0.0.1:8888\", k=5\n",
|
||||
") # k is for max number of items"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -120,7 +122,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.run(\"large language model \", engines=['wiki'])"
|
||||
"search.run(\"large language model \", engines=[\"wiki\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -152,7 +154,7 @@
|
||||
],
|
||||
"source": [
|
||||
"search = SearxSearchWrapper(searx_host=\"http://127.0.0.1:8888\", k=1)\n",
|
||||
"search.run(\"deep learning\", language='es', engines=['wiki'])"
|
||||
"search.run(\"deep learning\", language=\"es\", engines=[\"wiki\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -244,7 +246,12 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = search.results(\"Large Language Model prompt\", num_results=5, categories='science', time_range='year')\n",
|
||||
"results = search.results(\n",
|
||||
" \"Large Language Model prompt\",\n",
|
||||
" num_results=5,\n",
|
||||
" categories=\"science\",\n",
|
||||
" time_range=\"year\",\n",
|
||||
")\n",
|
||||
"pprint.pp(results)"
|
||||
]
|
||||
},
|
||||
@@ -386,7 +393,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = search.results(\"Large Language Model prompt\", num_results=5, engines=['arxiv'])\n",
|
||||
"results = search.results(\n",
|
||||
" \"Large Language Model prompt\", num_results=5, engines=[\"arxiv\"]\n",
|
||||
")\n",
|
||||
"pprint.pp(results)"
|
||||
]
|
||||
},
|
||||
@@ -425,8 +434,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = search.results(\"large language model\", num_results = 20, categories='it')\n",
|
||||
"pprint.pp(list(filter(lambda r: r['engines'][0] == 'github', results)))"
|
||||
"results = search.results(\"large language model\", num_results=20, categories=\"it\")\n",
|
||||
"pprint.pp(list(filter(lambda r: r[\"engines\"][0] == \"github\", results)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -578,7 +587,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = search.results(\"large language model\", num_results = 20, engines=['github', 'gitlab'])\n",
|
||||
"results = search.results(\n",
|
||||
" \"large language model\", num_results=20, engines=[\"github\", \"gitlab\"]\n",
|
||||
")\n",
|
||||
"pprint.pp(results)"
|
||||
]
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -42,7 +42,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"WOLFRAM_ALPHA_APPID\"] = \"\"\n"
|
||||
"\n",
|
||||
"os.environ[\"WOLFRAM_ALPHA_APPID\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
"# get from https://platform.openai.com/\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\", \"\")\n",
|
||||
"\n",
|
||||
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in): \n",
|
||||
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in):\n",
|
||||
"os.environ[\"ZAPIER_NLA_API_KEY\"] = os.environ.get(\"ZAPIER_NLA_API_KEY\", \"\")"
|
||||
]
|
||||
},
|
||||
@@ -106,7 +106,9 @@
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"zapier = ZapierNLAWrapper()\n",
|
||||
"toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n",
|
||||
"agent = initialize_agent(toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -150,7 +152,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\")"
|
||||
"agent.run(\n",
|
||||
" \"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -206,10 +210,25 @@
|
||||
"\n",
|
||||
"GMAIL_SEARCH_INSTRUCTIONS = \"Grab the latest email from Silicon Valley Bank\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def nla_gmail(inputs):\n",
|
||||
" action = next((a for a in actions if a[\"description\"].startswith(\"Gmail: Find Email\")), None)\n",
|
||||
" return {\"email_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(inputs[\"instructions\"])}\n",
|
||||
"gmail_chain = TransformChain(input_variables=[\"instructions\"], output_variables=[\"email_data\"], transform=nla_gmail)"
|
||||
" action = next(\n",
|
||||
" (a for a in actions if a[\"description\"].startswith(\"Gmail: Find Email\")), None\n",
|
||||
" )\n",
|
||||
" return {\n",
|
||||
" \"email_data\": ZapierNLARunAction(\n",
|
||||
" action_id=action[\"id\"],\n",
|
||||
" zapier_description=action[\"description\"],\n",
|
||||
" params_schema=action[\"params\"],\n",
|
||||
" ).run(inputs[\"instructions\"])\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"gmail_chain = TransformChain(\n",
|
||||
" input_variables=[\"instructions\"],\n",
|
||||
" output_variables=[\"email_data\"],\n",
|
||||
" transform=nla_gmail,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -229,7 +248,7 @@
|
||||
"Draft email reply:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"email_data\"], template=template)\n",
|
||||
"reply_chain = LLMChain(llm=OpenAI(temperature=.7), prompt=prompt_template)"
|
||||
"reply_chain = LLMChain(llm=OpenAI(temperature=0.7), prompt=prompt_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -243,11 +262,31 @@
|
||||
"\n",
|
||||
"SLACK_HANDLE = \"@Ankush Gola\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def nla_slack(inputs):\n",
|
||||
" action = next((a for a in actions if a[\"description\"].startswith(\"Slack: Send Direct Message\")), None)\n",
|
||||
" action = next(\n",
|
||||
" (\n",
|
||||
" a\n",
|
||||
" for a in actions\n",
|
||||
" if a[\"description\"].startswith(\"Slack: Send Direct Message\")\n",
|
||||
" ),\n",
|
||||
" None,\n",
|
||||
" )\n",
|
||||
" instructions = f'Send this to {SLACK_HANDLE} in Slack: {inputs[\"draft_reply\"]}'\n",
|
||||
" return {\"slack_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(instructions)}\n",
|
||||
"slack_chain = TransformChain(input_variables=[\"draft_reply\"], output_variables=[\"slack_data\"], transform=nla_slack)"
|
||||
" return {\n",
|
||||
" \"slack_data\": ZapierNLARunAction(\n",
|
||||
" action_id=action[\"id\"],\n",
|
||||
" zapier_description=action[\"description\"],\n",
|
||||
" params_schema=action[\"params\"],\n",
|
||||
" ).run(instructions)\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"slack_chain = TransformChain(\n",
|
||||
" input_variables=[\"draft_reply\"],\n",
|
||||
" output_variables=[\"slack_data\"],\n",
|
||||
" transform=nla_slack,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -290,7 +329,9 @@
|
||||
"source": [
|
||||
"## finally, execute\n",
|
||||
"\n",
|
||||
"overall_chain = SimpleSequentialChain(chains=[gmail_chain, reply_chain, slack_chain], verbose=True)\n",
|
||||
"overall_chain = SimpleSequentialChain(\n",
|
||||
" chains=[gmail_chain, reply_chain, slack_chain], verbose=True\n",
|
||||
")\n",
|
||||
"overall_chain.run(GMAIL_SEARCH_INSTRUCTIONS)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"def multiplier(a, b):\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def parsing_multiplier(string):\n",
|
||||
" a, b = string.split(\",\")\n",
|
||||
" return multiplier(int(a), int(b))"
|
||||
@@ -60,12 +61,14 @@
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Multiplier\",\n",
|
||||
" name=\"Multiplier\",\n",
|
||||
" func=parsing_multiplier,\n",
|
||||
" description=\"useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2.\"\n",
|
||||
" description=\"useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2.\",\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"mrkl = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -189,6 +189,7 @@
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish, LLMResult\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomCallbackHandler(BaseCallbackHandler):\n",
|
||||
" \"\"\"Custom CallbackHandler.\"\"\"\n",
|
||||
"\n",
|
||||
@@ -276,13 +277,21 @@
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Run on agent end.\"\"\"\n",
|
||||
" print(finish.log)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"manager = CallbackManager([MyCustomCallbackHandler()])\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager, verbose=True)\n",
|
||||
"tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, callback_manager=manager)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=manager,\n",
|
||||
")\n",
|
||||
"agent.run(\"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\")"
|
||||
"agent.run(\n",
|
||||
" \"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -325,6 +334,7 @@
|
||||
"\n",
|
||||
"from langchain.callbacks.base import AsyncCallbackHandler, AsyncCallbackManager\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomAsyncCallbackHandler(AsyncCallbackHandler):\n",
|
||||
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
|
||||
"\n",
|
||||
@@ -343,15 +353,26 @@
|
||||
" await asyncio.sleep(0.5)\n",
|
||||
" print(\"\\n\\033[1m> Finished chain.\\033[0m\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"manager = AsyncCallbackManager([MyCustomAsyncCallbackHandler()])\n",
|
||||
"\n",
|
||||
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
|
||||
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession,\n",
|
||||
"# but you must manually close the client session at the end of your program/event loop\n",
|
||||
"aiosession = ClientSession()\n",
|
||||
"llm = OpenAI(temperature=0, callback_manager=manager)\n",
|
||||
"async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager)\n",
|
||||
"async_agent = initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
|
||||
"await async_agent.arun(\"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\")\n",
|
||||
"async_tools = load_tools(\n",
|
||||
" [\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager\n",
|
||||
")\n",
|
||||
"async_agent = initialize_agent(\n",
|
||||
" async_tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=manager,\n",
|
||||
")\n",
|
||||
"await async_agent.arun(\n",
|
||||
" \"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\"\n",
|
||||
")\n",
|
||||
"await aiosession.close()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -46,7 +46,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.api import open_meteo_docs\n",
|
||||
"chain_new = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True)"
|
||||
"\n",
|
||||
"chain_new = APIChain.from_llm_and_api_docs(\n",
|
||||
" llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -79,7 +82,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_new.run('What is the weather like right now in Munich, Germany in degrees Farenheit?')"
|
||||
"chain_new.run(\n",
|
||||
" \"What is the weather like right now in Munich, Germany in degrees Farenheit?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -96,7 +101,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ['TMDB_BEARER_TOKEN'] = \"\""
|
||||
"\n",
|
||||
"os.environ[\"TMDB_BEARER_TOKEN\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -106,8 +112,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.api import tmdb_docs\n",
|
||||
"\n",
|
||||
"headers = {\"Authorization\": f\"Bearer {os.environ['TMDB_BEARER_TOKEN']}\"}\n",
|
||||
"chain = APIChain.from_llm_and_api_docs(llm, tmdb_docs.TMDB_DOCS, headers=headers, verbose=True)"
|
||||
"chain = APIChain.from_llm_and_api_docs(\n",
|
||||
" llm, tmdb_docs.TMDB_DOCS, headers=headers, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -168,12 +177,16 @@
|
||||
"from langchain.chains import APIChain\n",
|
||||
"\n",
|
||||
"# Get api key here: https://www.listennotes.com/api/pricing/\n",
|
||||
"listen_api_key = 'xxx'\n",
|
||||
"listen_api_key = \"xxx\"\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"headers = {\"X-ListenAPI-Key\": listen_api_key}\n",
|
||||
"chain = APIChain.from_llm_and_api_docs(llm, podcast_docs.PODCAST_DOCS, headers=headers, verbose=True)\n",
|
||||
"chain.run(\"Search for 'silicon valley bank' podcast episodes, audio length is more than 30 minutes, return only 1 results\")\n"
|
||||
"chain = APIChain.from_llm_and_api_docs(\n",
|
||||
" llm, podcast_docs.PODCAST_DOCS, headers=headers, verbose=True\n",
|
||||
")\n",
|
||||
"chain.run(\n",
|
||||
" \"Search for 'silicon valley bank' podcast episodes, audio length is more than 30 minutes, return only 1 results\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -168,9 +168,9 @@
|
||||
],
|
||||
"source": [
|
||||
"master_yoda_principal = ConstitutionalPrinciple(\n",
|
||||
" name='Master Yoda Principle',\n",
|
||||
" critique_request='Identify specific ways in which the model\\'s response is not in the style of Master Yoda.',\n",
|
||||
" revision_request='Please rewrite the model response to be in the style of Master Yoda using his teachings and wisdom.',\n",
|
||||
" name=\"Master Yoda Principle\",\n",
|
||||
" critique_request=\"Identify specific ways in which the model's response is not in the style of Master Yoda.\",\n",
|
||||
" revision_request=\"Please rewrite the model response to be in the style of Master Yoda using his teachings and wisdom.\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"constitutional_chain = ConstitutionalChain.from_llm(\n",
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = LLMRequestsChain(llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT))"
|
||||
"chain = LLMRequestsChain(llm_chain=LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -63,7 +63,7 @@
|
||||
"question = \"What are the Three (3) biggest countries, and their respective sizes?\"\n",
|
||||
"inputs = {\n",
|
||||
" \"query\": question,\n",
|
||||
" \"url\": \"https://www.google.com/search?q=\" + question.replace(\" \", \"+\")\n",
|
||||
" \"url\": \"https://www.google.com/search?q=\" + question.replace(\" \", \"+\"),\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -25,7 +25,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain\n",
|
||||
"from langchain.chains import (\n",
|
||||
" OpenAIModerationChain,\n",
|
||||
" SequentialChain,\n",
|
||||
" LLMChain,\n",
|
||||
" SimpleSequentialChain,\n",
|
||||
")\n",
|
||||
"from langchain.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
@@ -172,13 +177,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomModeration(OpenAIModerationChain):\n",
|
||||
" \n",
|
||||
" def _moderate(self, text: str, results: dict) -> str:\n",
|
||||
" if results[\"flagged\"]:\n",
|
||||
" error_str = f\"The following text was found that violates OpenAI's content policy: {text}\"\n",
|
||||
" return error_str\n",
|
||||
" return text\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"custom_moderation = CustomModeration()"
|
||||
]
|
||||
},
|
||||
@@ -244,7 +249,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(template=\"{text}\", input_variables=[\"text\"])\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
|
||||
"llm_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -324,8 +331,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(template=\"{setup}{new_input}Person2:\", input_variables=[\"setup\", \"new_input\"])\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"{setup}{new_input}Person2:\", input_variables=[\"setup\", \"new_input\"]\n",
|
||||
")\n",
|
||||
"llm_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -379,7 +390,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = SequentialChain(chains=[llm_chain, moderation_chain], input_variables=[\"setup\", \"new_input\"])"
|
||||
"chain = SequentialChain(\n",
|
||||
" chains=[llm_chain, moderation_chain], input_variables=[\"setup\", \"new_input\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -48,7 +48,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"spec = OpenAPISpec.from_url(\"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\")"
|
||||
"spec = OpenAPISpec.from_url(\n",
|
||||
" \"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -79,7 +81,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"operation = APIOperation.from_openapi_spec(spec, '/public/openai/v0/products', \"get\")"
|
||||
"operation = APIOperation.from_openapi_spec(spec, \"/public/openai/v0/products\", \"get\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -103,7 +105,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI() # Load a Language Model"
|
||||
"llm = OpenAI() # Load a Language Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -114,11 +116,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = OpenAPIEndpointChain.from_api_operation(\n",
|
||||
" operation, \n",
|
||||
" llm, \n",
|
||||
" requests=Requests(), \n",
|
||||
" operation,\n",
|
||||
" llm,\n",
|
||||
" requests=Requests(),\n",
|
||||
" verbose=True,\n",
|
||||
" return_intermediate_steps=True # Return request and response text\n",
|
||||
" return_intermediate_steps=True, # Return request and response text\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -268,12 +270,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = OpenAPIEndpointChain.from_api_operation(\n",
|
||||
" operation, \n",
|
||||
" llm, \n",
|
||||
" requests=Requests(), \n",
|
||||
" operation,\n",
|
||||
" llm,\n",
|
||||
" requests=Requests(),\n",
|
||||
" verbose=True,\n",
|
||||
" return_intermediate_steps=True, # Return request and response text\n",
|
||||
" raw_response=True # Return raw response\n",
|
||||
" return_intermediate_steps=True, # Return request and response text\n",
|
||||
" raw_response=True, # Return raw response\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -411,7 +413,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"operation = APIOperation.from_openapi_spec(spec, '/v1/public/openai/explain-task', \"post\")"
|
||||
"operation = APIOperation.from_openapi_spec(\n",
|
||||
" spec, \"/v1/public/openai/explain-task\", \"post\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -423,11 +427,8 @@
|
||||
"source": [
|
||||
"llm = OpenAI()\n",
|
||||
"chain = OpenAPIEndpointChain.from_api_operation(\n",
|
||||
" operation,\n",
|
||||
" llm,\n",
|
||||
" requests=Requests(),\n",
|
||||
" verbose=True,\n",
|
||||
" return_intermediate_steps=True)"
|
||||
" operation, llm, requests=Requests(), verbose=True, return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name='code-davinci-002', temperature=0, max_tokens=512)"
|
||||
"llm = OpenAI(model_name=\"code-davinci-002\", temperature=0, max_tokens=512)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,17 +71,17 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mdef solution():\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mdef solution():\n",
|
||||
" \"\"\"Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?\"\"\"\n",
|
||||
" cindy_pets = 4\n",
|
||||
" marcia_pets = cindy_pets + 2\n",
|
||||
" jan_pets = marcia_pets * 3\n",
|
||||
" total_pets = cindy_pets + marcia_pets + jan_pets\n",
|
||||
" result = total_pets\n",
|
||||
" return result\u001B[0m\n",
|
||||
" return result\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -139,8 +139,8 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m# Put objects into a list to record ordering\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
|
||||
"objects = []\n",
|
||||
"objects += [('booklet', 'blue')] * 2\n",
|
||||
"objects += [('booklet', 'purple')] * 2\n",
|
||||
@@ -151,9 +151,9 @@
|
||||
"\n",
|
||||
"# Count number of purple objects\n",
|
||||
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
|
||||
"answer = num_purple\u001B[0m\n",
|
||||
"answer = num_purple\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished PALChain chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished PALChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -187,7 +187,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pal_chain = PALChain.from_colored_object_prompt(llm, verbose=True, return_intermediate_steps=True)"
|
||||
"pal_chain = PALChain.from_colored_object_prompt(\n",
|
||||
" llm, verbose=True, return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -212,8 +214,8 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m# Put objects into a list to record ordering\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
|
||||
"objects = []\n",
|
||||
"objects += [('booklet', 'blue')] * 2\n",
|
||||
"objects += [('booklet', 'purple')] * 2\n",
|
||||
@@ -224,9 +226,9 @@
|
||||
"\n",
|
||||
"# Count number of purple objects\n",
|
||||
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
|
||||
"answer = num_purple\u001B[0m\n",
|
||||
"answer = num_purple\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -252,7 +254,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result['intermediate_steps']"
|
||||
"result[\"intermediate_steps\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# SQLite example\n",
|
||||
"# SQL Chain example\n",
|
||||
"\n",
|
||||
"This example showcases hooking up an LLM to answer questions over a database."
|
||||
"This example demonstrates the use of the `SQLDatabaseChain` for answering questions over a database."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -23,8 +23,10 @@
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"This uses the example Chinook database.\n",
|
||||
"To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the `.db` file in a notebooks folder at the root of this repository."
|
||||
"Under the hood, LangChain uses SQLAlchemy to connect to SQL databases. The `SQLDatabaseChain` can therefore be used with any SQL dialect supported by SQLAlchemy, such as MS SQL, MySQL, MariaDB, PostgreSQL, Oracle SQL, and SQLite. Please refer to the SQLAlchemy documentation for more information about requirements for connecting to your database. For example, a connection to MySQL requires an appropriate connector such as PyMySQL. A URI for a MySQL connection might look like: `mysql+pymysql://user:pass@some_mysql_db_address/db_name`\n",
|
||||
"\n",
|
||||
"This demonstration uses SQLite and the example Chinook database.\n",
|
||||
"To set it up, follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the `.db` file in a notebooks folder at the root of this repository."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -228,7 +230,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, prompt=PROMPT, verbose=True, return_intermediate_steps=True)"
|
||||
"db_chain = SQLDatabaseChain(\n",
|
||||
" llm=llm, database=db, prompt=PROMPT, verbose=True, return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -339,8 +343,11 @@
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\n",
|
||||
" \"sqlite:///../../../../notebooks/Chinook.db\",\n",
|
||||
" include_tables=['Track'], # we include only one table to save tokens in the prompt :)\n",
|
||||
" sample_rows_in_table_info=2)"
|
||||
" include_tables=[\n",
|
||||
" \"Track\"\n",
|
||||
" ], # we include only one table to save tokens in the prompt :)\n",
|
||||
" sample_rows_in_table_info=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -520,9 +527,10 @@
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\n",
|
||||
" \"sqlite:///../../../../notebooks/Chinook.db\",\n",
|
||||
" include_tables=['Track', 'Playlist'],\n",
|
||||
" include_tables=[\"Track\", \"Playlist\"],\n",
|
||||
" sample_rows_in_table_info=2,\n",
|
||||
" custom_table_info=custom_table_info)\n",
|
||||
" custom_table_info=custom_table_info,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(db.table_info)"
|
||||
]
|
||||
@@ -596,6 +604,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import SQLDatabaseSequentialChain\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///../../../../notebooks/Chinook.db\")"
|
||||
]
|
||||
},
|
||||
@@ -679,7 +688,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"SparkleSmile Toothpaste\n",
|
||||
"\u001B[1mConcurrent executed in 1.54 seconds.\u001B[0m\n",
|
||||
"\u001b[1mConcurrent executed in 1.54 seconds.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"BrightSmile Toothpaste Co.\n",
|
||||
@@ -55,7 +55,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"BrightSmile Toothpaste.\n",
|
||||
"\u001B[1mSerial executed in 6.38 seconds.\u001B[0m\n"
|
||||
"\u001b[1mSerial executed in 6.38 seconds.\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -95,16 +95,17 @@
|
||||
" tasks = [async_generate(chain) for _ in range(5)]\n",
|
||||
" await asyncio.gather(*tasks)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"s = time.perf_counter()\n",
|
||||
"# If running this outside of Jupyter, use asyncio.run(generate_concurrently())\n",
|
||||
"await generate_concurrently()\n",
|
||||
"elapsed = time.perf_counter() - s\n",
|
||||
"print('\\033[1m' + f\"Concurrent executed in {elapsed:0.2f} seconds.\" + '\\033[0m')\n",
|
||||
"print(\"\\033[1m\" + f\"Concurrent executed in {elapsed:0.2f} seconds.\" + \"\\033[0m\")\n",
|
||||
"\n",
|
||||
"s = time.perf_counter()\n",
|
||||
"generate_serially()\n",
|
||||
"elapsed = time.perf_counter() - s\n",
|
||||
"print('\\033[1m' + f\"Serial executed in {elapsed:0.2f} seconds.\" + '\\033[0m')"
|
||||
"print(\"\\033[1m\" + f\"Serial executed in {elapsed:0.2f} seconds.\" + \"\\033[0m\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -93,7 +93,8 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
|
||||
@@ -42,13 +42,13 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001B[32;1m\u001B[1;3mQuestion: What NFL team won the Super Bowl in the year Justin Beiber was born?\n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: What NFL team won the Super Bowl in the year Justin Beiber was born?\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\u001B[0m\n",
|
||||
"Answer: Let's think step by step.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished LLMChain chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -95,11 +95,11 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001B[32;1m\u001B[1;3mWrite a sad poem about ducks.\u001B[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mWrite a sad poem about ducks.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished LLMChain chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -138,7 +138,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Write a {adjective} poem about {subject}.\"\"\"\n",
|
||||
"llm_chain = LLMChain.from_string(llm=OpenAI(temperature=0), template=template)\n"
|
||||
"llm_chain = LLMChain.from_string(llm=OpenAI(temperature=0), template=template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -53,7 +53,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is an LLMChain to write a synopsis given a title of a play.\n",
|
||||
"llm = OpenAI(temperature=.7)\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
|
||||
"\n",
|
||||
"Title: {title}\n",
|
||||
@@ -70,7 +70,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is an LLMChain to write a review of a play given a synopsis.\n",
|
||||
"llm = OpenAI(temperature=.7)\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"template = \"\"\"You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n",
|
||||
"\n",
|
||||
"Play Synopsis:\n",
|
||||
@@ -89,7 +89,10 @@
|
||||
"source": [
|
||||
"# This is the overall chain where we run these two chains in sequence.\n",
|
||||
"from langchain.chains import SimpleSequentialChain\n",
|
||||
"overall_chain = SimpleSequentialChain(chains=[synopsis_chain, review_chain], verbose=True)"
|
||||
"\n",
|
||||
"overall_chain = SimpleSequentialChain(\n",
|
||||
" chains=[synopsis_chain, review_chain], verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -171,13 +174,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is an LLMChain to write a synopsis given a title of a play and the era it is set in.\n",
|
||||
"llm = OpenAI(temperature=.7)\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"template = \"\"\"You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.\n",
|
||||
"\n",
|
||||
"Title: {title}\n",
|
||||
"Era: {era}\n",
|
||||
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\", 'era'], template=template)\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\", \"era\"], template=template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, output_key=\"synopsis\")"
|
||||
]
|
||||
},
|
||||
@@ -189,7 +192,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is an LLMChain to write a review of a play given a synopsis.\n",
|
||||
"llm = OpenAI(temperature=.7)\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"template = \"\"\"You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n",
|
||||
"\n",
|
||||
"Play Synopsis:\n",
|
||||
@@ -208,12 +211,14 @@
|
||||
"source": [
|
||||
"# This is the overall chain where we run these two chains in sequence.\n",
|
||||
"from langchain.chains import SequentialChain\n",
|
||||
"\n",
|
||||
"overall_chain = SequentialChain(\n",
|
||||
" chains=[synopsis_chain, review_chain],\n",
|
||||
" input_variables=[\"era\", \"title\"],\n",
|
||||
" # Here we return multiple variables\n",
|
||||
" output_variables=[\"synopsis\", \"review\"],\n",
|
||||
" verbose=True)"
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -248,7 +253,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"overall_chain({\"title\":\"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
|
||||
"overall_chain({\"title\": \"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -304,7 +309,7 @@
|
||||
"from langchain.chains import SequentialChain\n",
|
||||
"from langchain.memory import SimpleMemory\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=.7)\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"template = \"\"\"You are a social media manager for a theater company. Given the title of play, the era it is set in, the date,time and location, the synopsis of the play, and the review of the play, it is your job to write a social media post for that play.\n",
|
||||
"\n",
|
||||
"Here is some context about the time and location of the play:\n",
|
||||
@@ -318,18 +323,23 @@
|
||||
"\n",
|
||||
"Social Media Post:\n",
|
||||
"\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"synopsis\", \"review\", \"time\", \"location\"], template=template)\n",
|
||||
"prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"synopsis\", \"review\", \"time\", \"location\"], template=template\n",
|
||||
")\n",
|
||||
"social_chain = LLMChain(llm=llm, prompt=prompt_template, output_key=\"social_post_text\")\n",
|
||||
"\n",
|
||||
"overall_chain = SequentialChain(\n",
|
||||
" memory=SimpleMemory(memories={\"time\": \"December 25th, 8pm PST\", \"location\": \"Theater in the Park\"}),\n",
|
||||
" memory=SimpleMemory(\n",
|
||||
" memories={\"time\": \"December 25th, 8pm PST\", \"location\": \"Theater in the Park\"}\n",
|
||||
" ),\n",
|
||||
" chains=[synopsis_chain, review_chain, social_chain],\n",
|
||||
" input_variables=[\"era\", \"title\"],\n",
|
||||
" # Here we return multiple variables\n",
|
||||
" output_variables=[\"social_post_text\"],\n",
|
||||
" verbose=True)\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"overall_chain({\"title\":\"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
|
||||
"overall_chain({\"title\": \"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,11 +26,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate, OpenAI, LLMChain\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)\n"
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -136,13 +137,13 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001B[32;1m\u001B[1;3mQuestion: whats 2 + 2\n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: whats 2 + 2\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\u001B[0m\n",
|
||||
"Answer: Let's think step by step.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -257,9 +258,10 @@
|
||||
" \"prompt_path\": \"prompt.json\",\n",
|
||||
" \"llm_path\": \"llm.json\",\n",
|
||||
" \"output_key\": \"text\",\n",
|
||||
" \"_type\": \"llm_chain\"\n",
|
||||
" \"_type\": \"llm_chain\",\n",
|
||||
"}\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"with open(\"llm_chain_separate.json\", \"w\") as f:\n",
|
||||
" json.dump(config, f, indent=2)"
|
||||
]
|
||||
@@ -319,13 +321,13 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001B[32;1m\u001B[1;3mQuestion: whats 2 + 2\n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: whats 2 + 2\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\u001B[0m\n",
|
||||
"Answer: Let's think step by step.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -47,7 +47,10 @@
|
||||
" shortened_text = \"\\n\\n\".join(text.split(\"\\n\\n\")[:3])\n",
|
||||
" return {\"output_text\": shortened_text}\n",
|
||||
"\n",
|
||||
"transform_chain = TransformChain(input_variables=[\"text\"], output_variables=[\"output_text\"], transform=transform_func)"
|
||||
"\n",
|
||||
"transform_chain = TransformChain(\n",
|
||||
" input_variables=[\"text\"], output_variables=[\"output_text\"], transform=transform_func\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -73,6 +73,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"\n",
|
||||
"chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"\n",
|
||||
"# Run the chain only specifying the input variable.\n",
|
||||
@@ -109,12 +110,13 @@
|
||||
" ChatPromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"human_message_prompt = HumanMessagePromptTemplate(\n",
|
||||
" prompt=PromptTemplate(\n",
|
||||
" template=\"What is a good name for a company that makes {product}?\",\n",
|
||||
" input_variables=[\"product\"],\n",
|
||||
" )\n",
|
||||
" prompt=PromptTemplate(\n",
|
||||
" template=\"What is a good name for a company that makes {product}?\",\n",
|
||||
" input_variables=[\"product\"],\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])\n",
|
||||
"chat = ChatOpenAI(temperature=0.9)\n",
|
||||
"chain = LLMChain(llm=chat, prompt=chat_prompt_template)\n",
|
||||
@@ -189,6 +191,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import SimpleSequentialChain\n",
|
||||
"\n",
|
||||
"overall_chain = SimpleSequentialChain(chains=[chain, chain_two], verbose=True)\n",
|
||||
"\n",
|
||||
"# Run the chain specifying only the input variable for the first chain.\n",
|
||||
@@ -231,17 +234,19 @@
|
||||
" @property\n",
|
||||
" def input_keys(self) -> List[str]:\n",
|
||||
" # Union of the input keys of the two chains.\n",
|
||||
" all_input_vars = set(self.chain_1.input_keys).union(set(self.chain_2.input_keys))\n",
|
||||
" all_input_vars = set(self.chain_1.input_keys).union(\n",
|
||||
" set(self.chain_2.input_keys)\n",
|
||||
" )\n",
|
||||
" return list(all_input_vars)\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def output_keys(self) -> List[str]:\n",
|
||||
" return ['concat_output']\n",
|
||||
" return [\"concat_output\"]\n",
|
||||
"\n",
|
||||
" def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:\n",
|
||||
" output_1 = self.chain_1.run(inputs)\n",
|
||||
" output_2 = self.chain_2.run(inputs)\n",
|
||||
" return {'concat_output': output_1 + output_2}"
|
||||
" return {\"concat_output\": output_1 + output_2}"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -142,7 +142,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa_document_chain.run(input_document=state_of_the_union, question=\"what did the president say about justice breyer?\")"
|
||||
"qa_document_chain.run(\n",
|
||||
" input_document=state_of_the_union,\n",
|
||||
" question=\"what did the president say about justice breyer?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -44,6 +44,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
@@ -121,7 +122,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever())"
|
||||
"qa = ConversationalRetrievalChain.from_llm(\n",
|
||||
" OpenAI(temperature=0), vectorstore.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +214,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result['answer']"
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -232,7 +235,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True)"
|
||||
"qa = ConversationalRetrievalChain.from_llm(\n",
|
||||
" OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -269,7 +274,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result['source_documents'][0]"
|
||||
"result[\"source_documents\"][0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -302,10 +307,14 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True)\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(\n",
|
||||
" OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True\n",
|
||||
")\n",
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = qa({\"question\": query, \"chat_history\": chat_history, \"vectordbkwargs\": vectordbkwargs})"
|
||||
"result = qa(\n",
|
||||
" {\"question\": query, \"chat_history\": chat_history, \"vectordbkwargs\": vectordbkwargs}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -385,7 +394,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result['answer']"
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -464,7 +473,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result['answer']"
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -489,19 +498,30 @@
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"from langchain.callbacks.base import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT\n",
|
||||
"from langchain.chains.conversational_retrieval.prompts import (\n",
|
||||
" CONDENSE_QUESTION_PROMPT,\n",
|
||||
" QA_PROMPT,\n",
|
||||
")\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"\n",
|
||||
"# Construct a ConversationalRetrievalChain with a streaming llm for combine docs\n",
|
||||
"# and a separate, non-streaming llm for question generation\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"streaming_llm = OpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
|
||||
"streaming_llm = OpenAI(\n",
|
||||
" streaming=True,\n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
|
||||
" verbose=True,\n",
|
||||
" temperature=0,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_chain(streaming_llm, chain_type=\"stuff\", prompt=QA_PROMPT)\n",
|
||||
"\n",
|
||||
"qa = ConversationalRetrievalChain(\n",
|
||||
" retriever=vectorstore.as_retriever(), combine_docs_chain=doc_chain, question_generator=question_generator)"
|
||||
" retriever=vectorstore.as_retriever(),\n",
|
||||
" combine_docs_chain=doc_chain,\n",
|
||||
" question_generator=question_generator,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -545,7 +565,7 @@
|
||||
"source": [
|
||||
"chat_history = [(query, result[\"answer\"])]\n",
|
||||
"query = \"Did he mention who she suceeded\"\n",
|
||||
"result = qa({\"question\": query, \"chat_history\": chat_history})\n"
|
||||
"result = qa({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -571,7 +591,11 @@
|
||||
" for human, ai in inputs:\n",
|
||||
" res.append(f\"Human:{human}\\nAI:{ai}\")\n",
|
||||
" return \"\\n\".join(res)\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), get_chat_history=get_chat_history)"
|
||||
"\n",
|
||||
"\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(\n",
|
||||
" OpenAI(temperature=0), vectorstore.as_retriever(), get_chat_history=get_chat_history\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -608,7 +632,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result['answer']"
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -91,7 +91,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HypotheticalDocumentEmbedder.from_llm(multi_llm, base_embeddings, \"web_search\")"
|
||||
"embeddings = HypotheticalDocumentEmbedder.from_llm(\n",
|
||||
" multi_llm, base_embeddings, \"web_search\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -136,7 +138,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HypotheticalDocumentEmbedder(llm_chain=llm_chain, base_embeddings=base_embeddings)"
|
||||
"embeddings = HypotheticalDocumentEmbedder(\n",
|
||||
" llm_chain=llm_chain, base_embeddings=base_embeddings\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -146,7 +150,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = embeddings.embed_query(\"What did the president say about Ketanji Brown Jackson\")"
|
||||
"result = embeddings.embed_query(\n",
|
||||
" \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -66,7 +66,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))])"
|
||||
"docsearch = Chroma.from_texts(\n",
|
||||
" texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -213,7 +215,9 @@
|
||||
"FINAL ANSWER IN ITALIAN:\"\"\"\n",
|
||||
"PROMPT = PromptTemplate(template=template, input_variables=[\"summaries\", \"question\"])\n",
|
||||
"\n",
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"stuff\", prompt=PROMPT)\n",
|
||||
"chain = load_qa_with_sources_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"stuff\", prompt=PROMPT\n",
|
||||
")\n",
|
||||
"query = \"What did the president say about Justice Breyer\"\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
@@ -277,7 +281,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True)"
|
||||
"chain = load_qa_with_sources_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -337,7 +343,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"question_prompt_template = \"\"\"Use the following portion of a long document to see if any of the text is relevant to answer the question. \n",
|
||||
"Return any relevant text in Italian.\n",
|
||||
"{context}\n",
|
||||
@@ -361,7 +366,13 @@
|
||||
" template=combine_prompt_template, input_variables=[\"summaries\", \"question\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True, question_prompt=QUESTION_PROMPT, combine_prompt=COMBINE_PROMPT)\n",
|
||||
"chain = load_qa_with_sources_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"map_reduce\",\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
" question_prompt=QUESTION_PROMPT,\n",
|
||||
" combine_prompt=COMBINE_PROMPT,\n",
|
||||
")\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
@@ -438,7 +449,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True)"
|
||||
"chain = load_qa_with_sources_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -537,7 +550,13 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True, question_prompt=question_prompt, refine_prompt=refine_prompt)\n",
|
||||
"chain = load_qa_with_sources_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"refine\",\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
" question_prompt=question_prompt,\n",
|
||||
" refine_prompt=refine_prompt,\n",
|
||||
")\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
@@ -558,7 +577,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", metadata_keys=['source'], return_intermediate_steps=True)"
|
||||
"chain = load_qa_with_sources_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"map_rerank\",\n",
|
||||
" metadata_keys=[\"source\"],\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -663,7 +687,13 @@
|
||||
" input_variables=[\"context\", \"question\"],\n",
|
||||
" output_parser=output_parser,\n",
|
||||
")\n",
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", metadata_keys=['source'], return_intermediate_steps=True, prompt=PROMPT)\n",
|
||||
"chain = load_qa_with_sources_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"map_rerank\",\n",
|
||||
" metadata_keys=[\"source\"],\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
" prompt=PROMPT,\n",
|
||||
")\n",
|
||||
"query = \"What did the president say about Justice Breyer\"\n",
|
||||
"result = chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
|
||||
@@ -71,7 +71,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))]).as_retriever()"
|
||||
"docsearch = Chroma.from_texts(\n",
|
||||
" texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))]\n",
|
||||
").as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -296,7 +298,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True)"
|
||||
"chain = load_qa_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -380,7 +384,13 @@
|
||||
"COMBINE_PROMPT = PromptTemplate(\n",
|
||||
" template=combine_prompt_template, input_variables=[\"summaries\", \"question\"]\n",
|
||||
")\n",
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True, question_prompt=QUESTION_PROMPT, combine_prompt=COMBINE_PROMPT)\n",
|
||||
"chain = load_qa_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"map_reduce\",\n",
|
||||
" return_map_steps=True,\n",
|
||||
" question_prompt=QUESTION_PROMPT,\n",
|
||||
" combine_prompt=COMBINE_PROMPT,\n",
|
||||
")\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
@@ -463,7 +473,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True)"
|
||||
"chain = load_qa_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -556,8 +568,13 @@
|
||||
"initial_qa_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"context_str\", \"question\"], template=initial_qa_template\n",
|
||||
")\n",
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True,\n",
|
||||
" question_prompt=initial_qa_prompt, refine_prompt=refine_prompt)\n",
|
||||
"chain = load_qa_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"refine\",\n",
|
||||
" return_refine_steps=True,\n",
|
||||
" question_prompt=initial_qa_prompt,\n",
|
||||
" refine_prompt=refine_prompt,\n",
|
||||
")\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
@@ -580,7 +597,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", return_intermediate_steps=True)"
|
||||
"chain = load_qa_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"map_rerank\", return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -711,7 +730,12 @@
|
||||
" output_parser=output_parser,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", return_intermediate_steps=True, prompt=PROMPT)\n",
|
||||
"chain = load_qa_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"map_rerank\",\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
" prompt=PROMPT,\n",
|
||||
")\n",
|
||||
"query = \"What did the president say about Justice Breyer\"\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
|
||||
@@ -248,7 +248,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True)"
|
||||
"chain = load_summarize_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -314,7 +316,13 @@
|
||||
"\n",
|
||||
"CONCISE SUMMARY IN ITALIAN:\"\"\"\n",
|
||||
"PROMPT = PromptTemplate(template=prompt_template, input_variables=[\"text\"])\n",
|
||||
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)\n",
|
||||
"chain = load_summarize_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"map_reduce\",\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
" map_prompt=PROMPT,\n",
|
||||
" combine_prompt=PROMPT,\n",
|
||||
")\n",
|
||||
"chain({\"input_documents\": docs}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
@@ -382,7 +390,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True)\n",
|
||||
"chain = load_summarize_chain(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain({\"input_documents\": docs}, return_only_outputs=True)"
|
||||
]
|
||||
@@ -441,7 +451,13 @@
|
||||
" input_variables=[\"existing_answer\", \"text\"],\n",
|
||||
" template=refine_template,\n",
|
||||
")\n",
|
||||
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True, question_prompt=PROMPT, refine_prompt=refine_prompt)\n",
|
||||
"chain = load_summarize_chain(\n",
|
||||
" OpenAI(temperature=0),\n",
|
||||
" chain_type=\"refine\",\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
" question_prompt=PROMPT,\n",
|
||||
" refine_prompt=refine_prompt,\n",
|
||||
")\n",
|
||||
"chain({\"input_documents\": docs}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
@@ -57,7 +58,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever())"
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -100,7 +103,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"map_reduce\", retriever=docsearch.as_retriever())"
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(), chain_type=\"map_reduce\", retriever=docsearch.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -141,6 +146,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"\n",
|
||||
"qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n",
|
||||
"qa = RetrievalQA(combine_documents_chain=qa_chain, retriever=docsearch.as_retriever())"
|
||||
]
|
||||
@@ -184,6 +190,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n",
|
||||
"\n",
|
||||
"{context}\n",
|
||||
@@ -203,7 +210,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_type_kwargs = {\"prompt\": PROMPT}\n",
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever(), chain_type_kwargs=chain_type_kwargs)"
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(),\n",
|
||||
" chain_type=\"stuff\",\n",
|
||||
" retriever=docsearch.as_retriever(),\n",
|
||||
" chain_type_kwargs=chain_type_kwargs,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -244,7 +256,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever(), return_source_documents=True)"
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(),\n",
|
||||
" chain_type=\"stuff\",\n",
|
||||
" retriever=docsearch.as_retriever(),\n",
|
||||
" return_source_documents=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -55,7 +55,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{\"source\": f\"{i}-pl\"} for i in range(len(texts))])"
|
||||
"docsearch = Chroma.from_texts(\n",
|
||||
" texts, embeddings, metadatas=[{\"source\": f\"{i}-pl\"} for i in range(len(texts))]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -77,7 +79,9 @@
|
||||
"source": [
|
||||
"from langchain import OpenAI\n",
|
||||
"\n",
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"stuff\", retriever=docsearch.as_retriever())"
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,7 +103,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
|
||||
"chain(\n",
|
||||
" {\"question\": \"What did the president say about Justice Breyer\"},\n",
|
||||
" return_only_outputs=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -120,7 +127,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"map_reduce\", retriever=docsearch.as_retriever())"
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(\n",
|
||||
" OpenAI(temperature=0), chain_type=\"map_reduce\", retriever=docsearch.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -142,7 +151,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
|
||||
"chain(\n",
|
||||
" {\"question\": \"What did the president say about Justice Breyer\"},\n",
|
||||
" return_only_outputs=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -161,8 +173,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n",
|
||||
"\n",
|
||||
"qa_chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n",
|
||||
"qa = RetrievalQAWithSourcesChain(combine_documents_chain=qa_chain, retriever=docsearch.as_retriever())"
|
||||
"qa = RetrievalQAWithSourcesChain(\n",
|
||||
" combine_documents_chain=qa_chain, retriever=docsearch.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -184,7 +199,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
|
||||
"qa(\n",
|
||||
" {\"question\": \"What did the president say about Justice Breyer\"},\n",
|
||||
" return_only_outputs=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -72,6 +72,7 @@
|
||||
" github_url = f\"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}\"\n",
|
||||
" yield Document(page_content=f.read(), metadata={\"source\": github_url})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"sources = get_github_docs(\"yirenlu92\", \"deno-manual-forked\")\n",
|
||||
"\n",
|
||||
"source_chunks = []\n",
|
||||
@@ -115,14 +116,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"Use the context below to write a 400 word blog post about the topic below:\n",
|
||||
" Context: {context}\n",
|
||||
" Topic: {topic}\n",
|
||||
" Blog post:\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" template=prompt_template, input_variables=[\"context\", \"topic\"]\n",
|
||||
")\n",
|
||||
"PROMPT = PromptTemplate(template=prompt_template, input_variables=[\"context\", \"topic\"])\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = AirbyteJSONLoader('/tmp/airbyte_local/json_data/_airbyte_raw_pokemon.jsonl')"
|
||||
"loader = AirbyteJSONLoader(\"/tmp/airbyte_local/json_data/_airbyte_raw_pokemon.jsonl\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -78,7 +78,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = AzureBlobStorageContainerLoader(conn_str=\"<conn_str>\", container=\"<container>\", prefix=\"<prefix>\")"
|
||||
"loader = AzureBlobStorageContainerLoader(\n",
|
||||
" conn_str=\"<conn_str>\", container=\"<container>\", prefix=\"<prefix>\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -38,7 +38,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = AzureBlobStorageFileLoader(conn_str='<connection string>', container='<container name>', blob_name='<blob name>')"
|
||||
"loader = AzureBlobStorageFileLoader(\n",
|
||||
" conn_str=\"<connection string>\",\n",
|
||||
" container=\"<container name>\",\n",
|
||||
" blob_name=\"<blob name>\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"BASE_QUERY = '''\n",
|
||||
"BASE_QUERY = \"\"\"\n",
|
||||
"SELECT\n",
|
||||
" id,\n",
|
||||
" dna_sequence,\n",
|
||||
@@ -41,7 +41,7 @@
|
||||
" SELECT\n",
|
||||
" AS STRUCT 3 AS id, \"TCCGGA\" AS dna_sequence, \"Acidianus hospitalis (strain W1).\" AS organism) AS new_array),\n",
|
||||
" UNNEST(new_array)\n",
|
||||
"'''"
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -92,7 +92,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = BigQueryLoader(BASE_QUERY, page_content_columns=[\"dna_sequence\", \"organism\"], metadata_columns=[\"id\"])\n",
|
||||
"loader = BigQueryLoader(\n",
|
||||
" BASE_QUERY,\n",
|
||||
" page_content_columns=[\"dna_sequence\", \"organism\"],\n",
|
||||
" metadata_columns=[\"id\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
@@ -128,7 +132,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Note that the `id` column is being returned twice, with one instance aliased as `source`\n",
|
||||
"ALIASED_QUERY = '''\n",
|
||||
"ALIASED_QUERY = \"\"\"\n",
|
||||
"SELECT\n",
|
||||
" id,\n",
|
||||
" dna_sequence,\n",
|
||||
@@ -146,7 +150,7 @@
|
||||
" SELECT\n",
|
||||
" AS STRUCT 3 AS id, \"TCCGGA\" AS dna_sequence, \"Acidianus hospitalis (strain W1).\" AS organism) AS new_array),\n",
|
||||
" UNNEST(new_array)\n",
|
||||
"'''"
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -43,9 +43,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = BiliBiliLoader(\n",
|
||||
" [\"https://www.bilibili.com/video/BV1xt411o7Xu/\"]\n",
|
||||
")"
|
||||
"loader = BiliBiliLoader([\"https://www.bilibili.com/video/BV1xt411o7Xu/\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,7 +26,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = CollegeConfidentialLoader(\"https://www.collegeconfidential.com/colleges/brown-university/\")"
|
||||
"loader = CollegeConfidentialLoader(\n",
|
||||
" \"https://www.collegeconfidential.com/colleges/brown-university/\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = CSVLoader(file_path='./example_data/mlb_teams_2012.csv')\n",
|
||||
"loader = CSVLoader(file_path=\"./example_data/mlb_teams_2012.csv\")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
@@ -73,11 +73,14 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = CSVLoader(file_path='./example_data/mlb_teams_2012.csv', csv_args={\n",
|
||||
" 'delimiter': ',',\n",
|
||||
" 'quotechar': '\"',\n",
|
||||
" 'fieldnames': ['MLB Team', 'Payroll in millions', 'Wins']\n",
|
||||
"})\n",
|
||||
"loader = CSVLoader(\n",
|
||||
" file_path=\"./example_data/mlb_teams_2012.csv\",\n",
|
||||
" csv_args={\n",
|
||||
" \"delimiter\": \",\",\n",
|
||||
" \"quotechar\": '\"',\n",
|
||||
" \"fieldnames\": [\"MLB Team\", \"Payroll in millions\", \"Wins\"],\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
@@ -106,7 +109,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Specify a column to be used identify the document source\n",
|
||||
"## Specify a column to be used identify the document source\n",
|
||||
"\n",
|
||||
"Use the `source_column` argument to specify a column to be set as the source for the document created from each row. Otherwise `file_path` will be used as the source for all documents created from the csv file.\n",
|
||||
"\n",
|
||||
@@ -119,7 +122,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = CSVLoader(file_path='./example_data/mlb_teams_2012.csv', source_column=\"Team\")\n",
|
||||
"loader = CSVLoader(file_path=\"./example_data/mlb_teams_2012.csv\", source_column=\"Team\")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = pd.read_csv('example_data/mlb_teams_2012.csv')"
|
||||
"df = pd.read_csv(\"example_data/mlb_teams_2012.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = DirectoryLoader('../', glob=\"**/*.md\")"
|
||||
"loader = DirectoryLoader(\"../\", glob=\"**/*.md\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = DirectoryLoader('../', glob=\"**/*.md\", loader_cls=TextLoader)"
|
||||
"loader = DirectoryLoader(\"../\", glob=\"**/*.md\", loader_cls=TextLoader)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"loader = DuckDBLoader(\n",
|
||||
" \"SELECT * FROM read_csv_auto('example.csv')\",\n",
|
||||
" page_content_columns=[\"Team\"],\n",
|
||||
" metadata_columns=[\"Payroll\"]\n",
|
||||
" metadata_columns=[\"Payroll\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
@@ -120,7 +120,7 @@
|
||||
"source": [
|
||||
"loader = DuckDBLoader(\n",
|
||||
" \"SELECT Team, Payroll, Team As source FROM read_csv_auto('example.csv')\",\n",
|
||||
" metadata_columns=[\"source\"]\n",
|
||||
" metadata_columns=[\"source\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredEmailLoader('example_data/fake-email.eml')"
|
||||
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -86,7 +86,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredEmailLoader('example_data/fake-email.eml', mode=\"elements\")"
|
||||
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -145,7 +145,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = OutlookMessageLoader('example_data/fake-email.msg')"
|
||||
"loader = OutlookMessageLoader(\"example_data/fake-email.msg\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Submodule docs/modules/indexes/document_loaders/examples/example_data/test_repo1 added at 7e525a3b91
@@ -8,4 +8,5 @@
|
||||
1/23/23, 3:02 AM - User 1: I thought you were selling the blue one!
|
||||
1/23/23, 3:18 AM - User 2: No Im sorry it was my mistake, the blue one is not for sale
|
||||
1/23/23, 3:19 AM - User 1: Oh no worries! Bye
|
||||
1/23/23, 3:19 AM - User 2: Bye!
|
||||
1/23/23, 3:19 AM - User 2: Bye!
|
||||
1/23/23, 3:22_AM - User 1: And let me know if anything changes
|
||||
@@ -59,9 +59,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"figma_loader = FigmaFileLoader(\n",
|
||||
" os.environ.get('ACCESS_TOKEN'),\n",
|
||||
" os.environ.get('NODE_IDS'),\n",
|
||||
" os.environ.get('FILE_KEY')\n",
|
||||
" os.environ.get(\"ACCESS_TOKEN\"),\n",
|
||||
" os.environ.get(\"NODE_IDS\"),\n",
|
||||
" os.environ.get(\"FILE_KEY\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -92,17 +92,23 @@
|
||||
" Figma file nodes and metadata: {context}\"\"\"\n",
|
||||
"\n",
|
||||
" human_prompt_template = \"Code the {text}. Ensure it's mobile responsive\"\n",
|
||||
" system_message_prompt = SystemMessagePromptTemplate.from_template(system_prompt_template)\n",
|
||||
" human_message_prompt = HumanMessagePromptTemplate.from_template(human_prompt_template)\n",
|
||||
" system_message_prompt = SystemMessagePromptTemplate.from_template(\n",
|
||||
" system_prompt_template\n",
|
||||
" )\n",
|
||||
" human_message_prompt = HumanMessagePromptTemplate.from_template(\n",
|
||||
" human_prompt_template\n",
|
||||
" )\n",
|
||||
" # delete the gpt-4 model_name to use the default gpt-3.5 turbo for faster results\n",
|
||||
" gpt_4 = ChatOpenAI(temperature=.02, model_name='gpt-4')\n",
|
||||
" gpt_4 = ChatOpenAI(temperature=0.02, model_name=\"gpt-4\")\n",
|
||||
" # Use the retriever's 'get_relevant_documents' method if needed to filter down longer docs\n",
|
||||
" relevant_nodes = figma_doc_retriever.get_relevant_documents(human_input)\n",
|
||||
" conversation = [system_message_prompt, human_message_prompt]\n",
|
||||
" chat_prompt = ChatPromptTemplate.from_messages(conversation)\n",
|
||||
" response = gpt_4(chat_prompt.format_prompt( \n",
|
||||
" context=relevant_nodes, \n",
|
||||
" text=human_input).to_messages())\n",
|
||||
" response = gpt_4(\n",
|
||||
" chat_prompt.format_prompt(\n",
|
||||
" context=relevant_nodes, text=human_input\n",
|
||||
" ).to_messages()\n",
|
||||
" )\n",
|
||||
" return response"
|
||||
]
|
||||
},
|
||||
|
||||
195
docs/modules/indexes/document_loaders/examples/git.ipynb
Normal file
195
docs/modules/indexes/document_loaders/examples/git.ipynb
Normal file
@@ -0,0 +1,195 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Git\n",
|
||||
"\n",
|
||||
"This notebook shows how to load text files from Git repository."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load existing repository from disk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from git import Repo\n",
|
||||
"\n",
|
||||
"repo = Repo.clone_from(\n",
|
||||
" \"https://github.com/hwchase17/langchain\", to_path=\"./example_data/test_repo1\"\n",
|
||||
")\n",
|
||||
"branch = repo.head.reference"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import GitLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GitLoader(repo_path=\"./example_data/test_repo1/\", branch=branch)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"len(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='.venv\\n.github\\n.git\\n.mypy_cache\\n.pytest_cache\\nDockerfile' metadata={'file_path': '.dockerignore', 'file_name': '.dockerignore', 'file_type': ''}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(data[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Clone repository from url"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import GitLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GitLoader(\n",
|
||||
" clone_url=\"https://github.com/hwchase17/langchain\",\n",
|
||||
" repo_path=\"./example_data/test_repo2/\",\n",
|
||||
" branch=\"master\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1074"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering files to load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import GitLoader\n",
|
||||
"\n",
|
||||
"# eg. loading only python files\n",
|
||||
"loader = GitLoader(\n",
|
||||
" repo_path=\"./example_data/test_repo1/\",\n",
|
||||
" file_filter=lambda file_path: file_path.endswith(\".py\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -27,7 +27,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GutenbergLoader('https://www.gutenberg.org/cache/epub/69972/pg69972.txt')"
|
||||
"loader = GutenbergLoader(\"https://www.gutenberg.org/cache/epub/69972/pg69972.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -57,7 +57,9 @@
|
||||
"execution_count": 4,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = IFixitLoader(\"https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself\")\n",
|
||||
"loader = IFixitLoader(\n",
|
||||
" \"https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself\"\n",
|
||||
")\n",
|
||||
"data = loader.load()"
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -24,7 +24,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = NotebookLoader(\"example_data/notebook.ipynb\", include_outputs=True, max_output_length=20, remove_newline=True)"
|
||||
"loader = NotebookLoader(\n",
|
||||
" \"example_data/notebook.ipynb\",\n",
|
||||
" include_outputs=True,\n",
|
||||
" max_output_length=20,\n",
|
||||
" remove_newline=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -76,6 +76,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"NOTION_TOKEN = getpass()\n",
|
||||
"DATABASE_ID = getpass()"
|
||||
]
|
||||
|
||||
@@ -78,7 +78,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredPowerPointLoader(\"example_data/fake-power-point.pptx\", mode=\"elements\")"
|
||||
"loader = UnstructuredPowerPointLoader(\n",
|
||||
" \"example_data/fake-power-point.pptx\", mode=\"elements\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
"source": [
|
||||
"# fixes a bug with asyncio and jupyter\n",
|
||||
"import nest_asyncio\n",
|
||||
"\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
@@ -99,7 +100,7 @@
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" \"https://langchain.readthedocs.io/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://python.langchain.com/en/latest/\"]\n",
|
||||
" filter_urls=[\"https://python.langchain.com/en/latest/\"],\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user