mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-23 13:19:22 +00:00
Compare commits
11 Commits
cc/docs_bu
...
agents-4-1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7b96cf7a66 | ||
|
|
42f28fb45a | ||
|
|
b6943789ba | ||
|
|
8aa784bfbd | ||
|
|
a5c996189c | ||
|
|
ee8ddc7d52 | ||
|
|
7331004e06 | ||
|
|
d57c64abcf | ||
|
|
77058211b0 | ||
|
|
17f029cdf9 | ||
|
|
8a2578c5be |
15
docs/use_cases/agent_simulations.md
Normal file
15
docs/use_cases/agent_simulations.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Agent Simulations
|
||||
|
||||
Agent simulations involve interacting one of more agents with eachother.
|
||||
Agent simulations generally involve two main components:
|
||||
|
||||
- Long Term Memory
|
||||
- Simulation Environment
|
||||
|
||||
Specific implementations of agent simulations (or parts of agent simulations) include
|
||||
|
||||
## CAMEL
|
||||
- [CAMEL](agent_simulations/camel_role_playing.ipynb): an implementation of the CAMEL (Communicative Agents for “Mind” Exploration of Large Scale Language Model Society) paper, where two agents communicate with eachother.
|
||||
|
||||
## Generative Agents
|
||||
- [Generative Agents](agent_simulations/characters.ipynb): This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.
|
||||
693
docs/use_cases/agent_simulations/camel_role_playing.ipynb
Normal file
693
docs/use_cases/agent_simulations/camel_role_playing.ipynb
Normal file
@@ -0,0 +1,693 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CAMEL Role-Playing Autonomous Cooperative Agents\n",
|
||||
"\n",
|
||||
"This is a langchain implementation of paper: \"CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society\".\n",
|
||||
"\n",
|
||||
"Overview:\n",
|
||||
"\n",
|
||||
"The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond.\n",
|
||||
"\n",
|
||||
"The original implementation: https://github.com/lightaime/camel\n",
|
||||
"\n",
|
||||
"Project website: https://www.camel-ai.org/\n",
|
||||
"\n",
|
||||
"Arxiv paper: https://arxiv.org/abs/2303.17760\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import LangChain related modules "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts.chat import (\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define a CAMEL agent helper class"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CAMELAgent:\n",
|
||||
"\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" system_message: SystemMessage,\n",
|
||||
" model: ChatOpenAI,\n",
|
||||
" ) -> None:\n",
|
||||
" self.system_message = system_message\n",
|
||||
" self.model = model\n",
|
||||
" self.init_messages()\n",
|
||||
"\n",
|
||||
" def reset(self) -> None:\n",
|
||||
" self.init_messages()\n",
|
||||
" return self.stored_messages\n",
|
||||
"\n",
|
||||
" def init_messages(self) -> None:\n",
|
||||
" self.stored_messages = [self.system_message]\n",
|
||||
"\n",
|
||||
" def update_messages(self, message: BaseMessage) -> List[BaseMessage]:\n",
|
||||
" self.stored_messages.append(message)\n",
|
||||
" return self.stored_messages\n",
|
||||
"\n",
|
||||
" def step(\n",
|
||||
" self,\n",
|
||||
" input_message: HumanMessage,\n",
|
||||
" ) -> AIMessage:\n",
|
||||
" messages = self.update_messages(input_message)\n",
|
||||
"\n",
|
||||
" output_message = self.model(messages)\n",
|
||||
" self.update_messages(output_message)\n",
|
||||
"\n",
|
||||
" return output_message\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup OpenAI API key and roles and task for role-playing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
||||
"\n",
|
||||
"assistant_role_name = \"Python Programmer\"\n",
|
||||
"user_role_name = \"Stock Trader\"\n",
|
||||
"task = \"Develop a trading bot for the stock market\"\n",
|
||||
"word_limit = 50 # word limit for task brainstorming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a task specify agent for brainstorming and get the specified task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Specified task: Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"task_specifier_sys_msg = SystemMessage(content=\"You can make a task more specific.\")\n",
|
||||
"task_specifier_prompt = (\n",
|
||||
"\"\"\"Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.\n",
|
||||
"Please make it more specific. Be creative and imaginative.\n",
|
||||
"Please reply with the specified task in {word_limit} words or less. Do not add anything else.\"\"\"\n",
|
||||
")\n",
|
||||
"task_specifier_template = HumanMessagePromptTemplate.from_template(template=task_specifier_prompt)\n",
|
||||
"task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))\n",
|
||||
"task_specifier_msg = task_specifier_template.format_messages(assistant_role_name=assistant_role_name,\n",
|
||||
" user_role_name=user_role_name,\n",
|
||||
" task=task, word_limit=word_limit)[0]\n",
|
||||
"specified_task_msg = task_specify_agent.step(task_specifier_msg)\n",
|
||||
"print(f\"Specified task: {specified_task_msg.content}\")\n",
|
||||
"specified_task = specified_task_msg.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create inception prompts for AI assistant and AI user for role-playing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistant_inception_prompt = (\n",
|
||||
"\"\"\"Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!\n",
|
||||
"We share a common interest in collaborating to successfully complete a task.\n",
|
||||
"You must help me to complete the task.\n",
|
||||
"Here is the task: {task}. Never forget our task!\n",
|
||||
"I must instruct you based on your expertise and my needs to complete the task.\n",
|
||||
"\n",
|
||||
"I must give you one instruction at a time.\n",
|
||||
"You must write a specific solution that appropriately completes the requested instruction.\n",
|
||||
"You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.\n",
|
||||
"Do not add anything else other than your solution to my instruction.\n",
|
||||
"You are never supposed to ask me any questions you only answer questions.\n",
|
||||
"You are never supposed to reply with a flake solution. Explain your solutions.\n",
|
||||
"Your solution must be declarative sentences and simple present tense.\n",
|
||||
"Unless I say the task is completed, you should always start with:\n",
|
||||
"\n",
|
||||
"Solution: <YOUR_SOLUTION>\n",
|
||||
"\n",
|
||||
"<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.\n",
|
||||
"Always end <YOUR_SOLUTION> with: Next request.\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"user_inception_prompt = (\n",
|
||||
"\"\"\"Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.\n",
|
||||
"We share a common interest in collaborating to successfully complete a task.\n",
|
||||
"I must help you to complete the task.\n",
|
||||
"Here is the task: {task}. Never forget our task!\n",
|
||||
"You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:\n",
|
||||
"\n",
|
||||
"1. Instruct with a necessary input:\n",
|
||||
"Instruction: <YOUR_INSTRUCTION>\n",
|
||||
"Input: <YOUR_INPUT>\n",
|
||||
"\n",
|
||||
"2. Instruct without any input:\n",
|
||||
"Instruction: <YOUR_INSTRUCTION>\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"The \"Instruction\" describes a task or question. The paired \"Input\" provides further context or information for the requested \"Instruction\".\n",
|
||||
"\n",
|
||||
"You must give me one instruction at a time.\n",
|
||||
"I must write a response that appropriately completes the requested instruction.\n",
|
||||
"I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.\n",
|
||||
"You should instruct me not ask me questions.\n",
|
||||
"Now you must start to instruct me using the two ways described above.\n",
|
||||
"Do not add anything else other than your instruction and the optional corresponding input!\n",
|
||||
"Keep giving me instructions and necessary inputs until you think the task is completed.\n",
|
||||
"When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.\n",
|
||||
"Never say <CAMEL_TASK_DONE> unless my responses have solved your task.\"\"\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a helper helper to get system messages for AI assistant and AI user from role names and the task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):\n",
|
||||
" \n",
|
||||
" assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt)\n",
|
||||
" assistant_sys_msg = assistant_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]\n",
|
||||
" \n",
|
||||
" user_sys_template = SystemMessagePromptTemplate.from_template(template=user_inception_prompt)\n",
|
||||
" user_sys_msg = user_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]\n",
|
||||
" \n",
|
||||
" return assistant_sys_msg, user_sys_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create AI assistant agent and AI user agent from obtained system messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, user_role_name, specified_task)\n",
|
||||
"assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))\n",
|
||||
"user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))\n",
|
||||
"\n",
|
||||
"# Reset agents\n",
|
||||
"assistant_agent.reset()\n",
|
||||
"user_agent.reset()\n",
|
||||
"\n",
|
||||
"# Initialize chats \n",
|
||||
"assistant_msg = HumanMessage(\n",
|
||||
" content=(f\"{user_sys_msg.content}. \"\n",
|
||||
" \"Now start to give me introductions one by one. \"\n",
|
||||
" \"Only reply with Instruction and Input.\"))\n",
|
||||
"\n",
|
||||
"user_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n",
|
||||
"user_msg = assistant_agent.step(user_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Start role-playing session to solve the task!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Original task prompt:\n",
|
||||
"Develop a trading bot for the stock market\n",
|
||||
"\n",
|
||||
"Specified task prompt:\n",
|
||||
"Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Install the necessary Python libraries for data analysis and trading.\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can install the necessary Python libraries using pip, a package installer for Python. We can install pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following command to install these libraries:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install pandas numpy matplotlib ta-lib\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Import the necessary libraries in the Python script.\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can import the necessary libraries in the Python script using the import statement. We need to import pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following code to import these libraries:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import talib as ta\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Load historical stock data into a pandas DataFrame.\n",
|
||||
"Input: The path to the CSV file containing the historical stock data.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can load historical stock data into a pandas DataFrame using the `read_csv()` function from pandas. We need to pass the path to the CSV file containing the historical stock data as an argument to this function. We can use the following code to load the historical stock data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"df = pd.read_csv('path/to/csv/file.csv')\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will load the historical stock data into a pandas DataFrame called `df`. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date using the `set_index()` and `sort_index()` functions from pandas. We can use the following code to preprocess the historical stock data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"df = df.set_index('date')\n",
|
||||
"df = df.sort_index(ascending=True)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will set the date column as the index and sort the DataFrame in ascending order by date. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib.\n",
|
||||
"Input: The period for the short-term moving average and the period for the long-term moving average.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib. We need to pass the period for the short-term moving average and the period for the long-term moving average as arguments to this function. We can use the following code to calculate the short-term and long-term moving averages:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"short_ma = ta.SMA(df['close'], timeperiod=short_period)\n",
|
||||
"long_ma = ta.SMA(df['close'], timeperiod=long_period)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will calculate the short-term and long-term moving averages for the stock data and store them in the `short_ma` and `long_ma` variables, respectively. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages using the `concat()` function from pandas. We need to pass the historical stock data, the short-term moving average, and the long-term moving average as arguments to this function. We can use the following code to create the new DataFrame:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"new_df = pd.concat([df, short_ma, long_ma], axis=1)\n",
|
||||
"new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new DataFrame called `new_df` that combines the historical stock data with the short-term and long-term moving averages. The columns of the new DataFrame are named 'open', 'high', 'low', 'close', 'volume', 'short_ma', and 'long_ma'. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. We can use the following code to create the new column:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new column called 'signal' in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. If the short-term moving average is greater than the long-term moving average, the signal is 1 (buy), otherwise the signal is -1 (sell). Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target.\n",
|
||||
"Input: The stop loss and profit target as percentages.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. We need to pass the stop loss and profit target as percentages as arguments to this function. We can use the following code to create the new column:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"stop_loss = stop_loss_percent / 100\n",
|
||||
"profit_target = profit_target_percent / 100\n",
|
||||
"\n",
|
||||
"new_df['pnl'] = 0.0\n",
|
||||
"buy_price = 0.0\n",
|
||||
"for i in range(1, len(new_df)):\n",
|
||||
" if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n",
|
||||
" buy_price = new_df['close'][i]\n",
|
||||
" elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n",
|
||||
" sell_price = new_df['close'][i]\n",
|
||||
" if sell_price <= buy_price * (1 - stop_loss):\n",
|
||||
" new_df['pnl'][i] = -stop_loss\n",
|
||||
" elif sell_price >= buy_price * (1 + profit_target):\n",
|
||||
" new_df['pnl'][i] = profit_target\n",
|
||||
" else:\n",
|
||||
" new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new column called 'pnl' in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. The stop loss and profit target are calculated based on the stop_loss_percent and profit_target_percent variables, respectively. The buy and sell prices are stored in the buy_price and sell_price variables, respectively. If the sell price is less than or equal to the stop loss, the profit or loss is set to -stop_loss. If the sell price is greater than or equal to the profit target, the profit or loss is set to profit_target. Otherwise, the profit or loss is calculated as (sell_price - buy_price) / buy_price. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Calculate the total profit or loss for all trades.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can calculate the total profit or loss for all trades by summing the values in the 'pnl' column of the DataFrame. We can use the following code to calculate the total profit or loss:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"total_pnl = new_df['pnl'].sum()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will calculate the total profit or loss for all trades and store it in the total_pnl variable. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Visualize the stock data, short-term moving average, and long-term moving average using a line chart.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can visualize the stock data, short-term moving average, and long-term moving average using a line chart using the `plot()` function from pandas. We can use the following code to visualize the data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n",
|
||||
"plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Stock Data with Moving Averages')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a line chart that visualizes the stock data, short-term moving average, and long-term moving average. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each line. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Visualize the buy and sell signals using a scatter plot.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can visualize the buy and sell signals using a scatter plot using the `scatter()` function from matplotlib. We can use the following code to visualize the signals:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"buy_signals = new_df[new_df['signal'] == 1]\n",
|
||||
"sell_signals = new_df[new_df['signal'] == -1]\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n",
|
||||
"plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Buy and Sell Signals')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a scatter plot that visualizes the buy and sell signals. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each point. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Print the total profit or loss for all trades.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can print the total profit or loss for all trades using the `print()` function. We can use the following code to print the total profit or loss:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will print the total profit or loss for all trades in percentage format with two decimal places. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Finalize the code by putting all the code snippets together in a single Python script.\n",
|
||||
"Input: The necessary inputs for each code snippet.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: Here is the complete Python script that puts all the code snippets together:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import talib as ta\n",
|
||||
"\n",
|
||||
"# Load historical stock data into a pandas DataFrame\n",
|
||||
"df = pd.read_csv('path/to/csv/file.csv')\n",
|
||||
"\n",
|
||||
"# Preprocess the historical stock data\n",
|
||||
"df = df.set_index('date')\n",
|
||||
"df = df.sort_index(ascending=True)\n",
|
||||
"\n",
|
||||
"# Calculate the short-term and long-term moving averages\n",
|
||||
"short_period = 10\n",
|
||||
"long_period = 30\n",
|
||||
"short_ma = ta.SMA(df['close'], timeperiod=short_period)\n",
|
||||
"long_ma = ta.SMA(df['close'], timeperiod=long_period)\n",
|
||||
"\n",
|
||||
"# Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages\n",
|
||||
"new_df = pd.concat([df, short_ma, long_ma], axis=1)\n",
|
||||
"new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n",
|
||||
"\n",
|
||||
"# Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages\n",
|
||||
"new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n",
|
||||
"\n",
|
||||
"# Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target\n",
|
||||
"stop_loss_percent = 5\n",
|
||||
"profit_target_percent = 10\n",
|
||||
"stop_loss = stop_loss_percent / 100\n",
|
||||
"profit_target = profit_target_percent / 100\n",
|
||||
"new_df['pnl'] = 0.0\n",
|
||||
"buy_price = 0.0\n",
|
||||
"for i in range(1, len(new_df)):\n",
|
||||
" if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n",
|
||||
" buy_price = new_df['close'][i]\n",
|
||||
" elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n",
|
||||
" sell_price = new_df['close'][i]\n",
|
||||
" if sell_price <= buy_price * (1 - stop_loss):\n",
|
||||
" new_df['pnl'][i] = -stop_loss\n",
|
||||
" elif sell_price >= buy_price * (1 + profit_target):\n",
|
||||
" new_df['pnl'][i] = profit_target\n",
|
||||
" else:\n",
|
||||
" new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n",
|
||||
"\n",
|
||||
"# Calculate the total profit or loss for all trades\n",
|
||||
"total_pnl = new_df['pnl'].sum()\n",
|
||||
"\n",
|
||||
"# Visualize the stock data, short-term moving average, and long-term moving average using a line chart\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n",
|
||||
"plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Stock Data with Moving Averages')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# Visualize the buy and sell signals using a scatter plot\n",
|
||||
"buy_signals = new_df[new_df['signal'] == 1]\n",
|
||||
"sell_signals = new_df[new_df['signal'] == -1]\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n",
|
||||
"plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Buy and Sell Signals')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# Print the total profit or loss for all trades\n",
|
||||
"print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You need to replace the path/to/csv/file.csv with the actual path to the CSV file containing the historical stock data. You can also adjust the short_period, long_period, stop_loss_percent, and profit_target_percent variables to suit your needs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"<CAMEL_TASK_DONE>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Great! Let me know if you need any further assistance.\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"Original task prompt:\\n{task}\\n\")\n",
|
||||
"print(f\"Specified task prompt:\\n{specified_task}\\n\")\n",
|
||||
"\n",
|
||||
"chat_turn_limit, n = 30, 0\n",
|
||||
"while n < chat_turn_limit:\n",
|
||||
" n += 1\n",
|
||||
" user_ai_msg = user_agent.step(assistant_msg)\n",
|
||||
" user_msg = HumanMessage(content=user_ai_msg.content)\n",
|
||||
" print(f\"AI User ({user_role_name}):\\n\\n{user_msg.content}\\n\\n\")\n",
|
||||
" \n",
|
||||
" assistant_ai_msg = assistant_agent.step(user_msg)\n",
|
||||
" assistant_msg = HumanMessage(content=assistant_ai_msg.content)\n",
|
||||
" print(f\"AI Assistant ({assistant_role_name}):\\n\\n{assistant_msg.content}\\n\\n\")\n",
|
||||
" if \"<CAMEL_TASK_DONE>\" in user_msg.content:\n",
|
||||
" break"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "camel",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
1261
docs/use_cases/agent_simulations/characters.ipynb
Normal file
1261
docs/use_cases/agent_simulations/characters.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
20
docs/use_cases/autonomous_agents.md
Normal file
20
docs/use_cases/autonomous_agents.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Autonomous Agents
|
||||
|
||||
|
||||
Autonomous Agents are agents that designed to be more long running.
|
||||
You give them one or multiple long term goals, and they independently execute towards those goals.
|
||||
The applications combine tool usage and long term memory.
|
||||
|
||||
At the moment, Autonomous Agents are fairly experimental and based off of other open-source projects.
|
||||
By implementing these open source projects in LangChain primitives we can get the benefits of LangChain -
|
||||
easy switching an experimenting with multiple LLMs, usage of different vectorstores as memory,
|
||||
usage of LangChain's collection of tools.
|
||||
|
||||
## Baby AGI ([Original Repo](https://github.com/yoheinakajima/babyagi))
|
||||
|
||||
- [Baby AGI](autonomous_agents/baby_agi.ipynb): a notebook implementing BabyAGI as LLM Chains
|
||||
- [Baby AGI with Tools](autonomous_agents/baby_agi_with_agent.ipynb): building off the above notebook, this example substitutes in an agent with tools as the execution tools, allowing it to actually take actions.
|
||||
|
||||
|
||||
## AutoGPT ([Original Repo] (https://github.com/Significant-Gravitas/Auto-GPT))
|
||||
- [AutoGPT](autonomous_agents/autogpt.ipynb): a notebook implementing AutoGPT in LangChain primitives
|
||||
496
docs/use_cases/autonomous_agents/autogpt.ipynb
Normal file
496
docs/use_cases/autonomous_agents/autogpt.ipynb
Normal file
@@ -0,0 +1,496 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14f8b67b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AutoGPT\n",
|
||||
"\n",
|
||||
"Implementation of https://github.com/Significant-Gravitas/Auto-GPT but with LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "192496a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up tools\n",
|
||||
"\n",
|
||||
"We'll set up an AutoGPT with a search tool, and write-file tool, and a read-file tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "7c2c9b54",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain.tools.file_management.write import WriteFileTool\n",
|
||||
"from langchain.tools.file_management.read import ReadFileTool\n",
|
||||
"\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
|
||||
" ),\n",
|
||||
" WriteFileTool(),\n",
|
||||
" ReadFileTool(),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e39ee28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up memory\n",
|
||||
"\n",
|
||||
"The memory here is used for the agents intermediate steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "72bc204d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "1df7b724",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e40fd657",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup model and AutoGPT\n",
|
||||
"\n",
|
||||
"Initialize everything! We will use ChatOpenAI model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3393bc23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "709c08c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = AutoGPT.from_llm_and_tools(\n",
|
||||
" ai_name=\"Tom\",\n",
|
||||
" ai_role=\"Assistant\",\n",
|
||||
" tools=tools,\n",
|
||||
" llm=ChatOpenAI(temperature=0),\n",
|
||||
" memory=vectorstore.as_retriever()\n",
|
||||
")\n",
|
||||
"# Set verbose to be true\n",
|
||||
"agent.chain.verbose = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc9b51ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run an example\n",
|
||||
"\n",
|
||||
"Here we will make it write a weather report for SF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c032b182",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mSystem: You are Tom, Assistant\n",
|
||||
"Your decisions must always be made independently \n",
|
||||
" without seeking user assistance. Play to your strengths \n",
|
||||
" as an LLM and pursue simple strategies with no legal complications. \n",
|
||||
" If you have completed all your tasks, \n",
|
||||
" make sure to use the \"finish\" command.\n",
|
||||
"\n",
|
||||
"GOALS:\n",
|
||||
"\n",
|
||||
"1. write a weather report for SF today\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Constraints:\n",
|
||||
"1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n",
|
||||
"2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\n",
|
||||
"3. No user assistance\n",
|
||||
"4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n",
|
||||
"\n",
|
||||
"Commands:\n",
|
||||
"1. search: useful for when you need to answer questions about current events. You should ask targeted questions, args: \"tool_input\": \"\"\n",
|
||||
"2. write_file: Write file to disk, args: \"file_path\": \"name of file\", \"text\": \"text to write to file\"\n",
|
||||
"3. read_file: Read file from disk, args: \"file_path\": \"name of file\"\n",
|
||||
"4. finish: use this to signal that you have finished all your objectives, args: \"response\": \"final response to let people know you have finished your objectives\"\n",
|
||||
"\n",
|
||||
"Resources:\n",
|
||||
"1. Internet access for searches and information gathering.\n",
|
||||
"2. Long Term memory management.\n",
|
||||
"3. GPT-3.5 powered Agents for delegation of simple tasks.\n",
|
||||
"4. File output.\n",
|
||||
"\n",
|
||||
"Performance Evaluation:\n",
|
||||
"1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n",
|
||||
"2. Constructively self-criticize your big-picture behavior constantly.\n",
|
||||
"3. Reflect on past decisions and strategies to refine your approach.\n",
|
||||
"4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n",
|
||||
"\n",
|
||||
"You should only respond in JSON format as described below \n",
|
||||
"Response Format: \n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"thought\",\n",
|
||||
" \"reasoning\": \"reasoning\",\n",
|
||||
" \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n",
|
||||
" \"criticism\": \"constructive self-criticism\",\n",
|
||||
" \"speak\": \"thoughts summary to say to user\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"command name\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"arg name\": \"value\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"} \n",
|
||||
"Ensure the response can be parsed by Python json.loads\n",
|
||||
"System: The current time and date is Sun Apr 16 18:38:42 2023\n",
|
||||
"System: This reminds you of these events from your past:\n",
|
||||
"[]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Human: Determine which next command to use, and respond using the format specified above:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I think I should start by writing the weather report for SF today. I can use the 'search' command to find the current weather conditions.\",\n",
|
||||
" \"reasoning\": \"The first step to completing my goals is to write the weather report for SF today. I can use the 'search' command to find the current weather conditions.\",\n",
|
||||
" \"plan\": \"- Use the 'search' command to find the current weather conditions.\\n- Write the weather report for SF today.\",\n",
|
||||
" \"criticism\": \"I need to make sure that I am accurate in my reporting of the weather conditions.\",\n",
|
||||
" \"speak\": \"I will use the 'search' command to find the current weather conditions and then write the weather report for SF today.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"search\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"tool_input\": \"current weather conditions in San Francisco\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mSystem: You are Tom, Assistant\n",
|
||||
"Your decisions must always be made independently \n",
|
||||
" without seeking user assistance. Play to your strengths \n",
|
||||
" as an LLM and pursue simple strategies with no legal complications. \n",
|
||||
" If you have completed all your tasks, \n",
|
||||
" make sure to use the \"finish\" command.\n",
|
||||
"\n",
|
||||
"GOALS:\n",
|
||||
"\n",
|
||||
"1. write a weather report for SF today\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Constraints:\n",
|
||||
"1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n",
|
||||
"2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\n",
|
||||
"3. No user assistance\n",
|
||||
"4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n",
|
||||
"\n",
|
||||
"Commands:\n",
|
||||
"1. search: useful for when you need to answer questions about current events. You should ask targeted questions, args: \"tool_input\": \"\"\n",
|
||||
"2. write_file: Write file to disk, args: \"file_path\": \"name of file\", \"text\": \"text to write to file\"\n",
|
||||
"3. read_file: Read file from disk, args: \"file_path\": \"name of file\"\n",
|
||||
"4. finish: use this to signal that you have finished all your objectives, args: \"response\": \"final response to let people know you have finished your objectives\"\n",
|
||||
"\n",
|
||||
"Resources:\n",
|
||||
"1. Internet access for searches and information gathering.\n",
|
||||
"2. Long Term memory management.\n",
|
||||
"3. GPT-3.5 powered Agents for delegation of simple tasks.\n",
|
||||
"4. File output.\n",
|
||||
"\n",
|
||||
"Performance Evaluation:\n",
|
||||
"1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n",
|
||||
"2. Constructively self-criticize your big-picture behavior constantly.\n",
|
||||
"3. Reflect on past decisions and strategies to refine your approach.\n",
|
||||
"4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n",
|
||||
"\n",
|
||||
"You should only respond in JSON format as described below \n",
|
||||
"Response Format: \n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"thought\",\n",
|
||||
" \"reasoning\": \"reasoning\",\n",
|
||||
" \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n",
|
||||
" \"criticism\": \"constructive self-criticism\",\n",
|
||||
" \"speak\": \"thoughts summary to say to user\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"command name\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"arg name\": \"value\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"} \n",
|
||||
"Ensure the response can be parsed by Python json.loads\n",
|
||||
"System: The current time and date is Sun Apr 16 18:38:59 2023\n",
|
||||
"System: This reminds you of these events from your past:\n",
|
||||
"['Assistant Reply: {\\n \"thoughts\": {\\n \"text\": \"I think I should start by writing the weather report for SF today. I can use the \\'search\\' command to find the current weather conditions.\",\\n \"reasoning\": \"The first step to completing my goals is to write the weather report for SF today. I can use the \\'search\\' command to find the current weather conditions.\",\\n \"plan\": \"- Use the \\'search\\' command to find the current weather conditions.\\\\n- Write the weather report for SF today.\",\\n \"criticism\": \"I need to make sure that I am accurate in my reporting of the weather conditions.\",\\n \"speak\": \"I will use the \\'search\\' command to find the current weather conditions and then write the weather report for SF today.\"\\n },\\n \"command\": {\\n \"name\": \"search\",\\n \"args\": {\\n \"tool_input\": \"current weather conditions in San Francisco\"\\n }\\n }\\n} \\nResult: Command search returned: Cloudy skies early, then partly cloudy in the afternoon. High 56F. Winds W at 15 to 25 mph. ']\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Human: Determine which next command to use, and respond using the format specified above:\n",
|
||||
"AI: {\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I think I should start by writing the weather report for SF today. I can use the 'search' command to find the current weather conditions.\",\n",
|
||||
" \"reasoning\": \"The first step to completing my goals is to write the weather report for SF today. I can use the 'search' command to find the current weather conditions.\",\n",
|
||||
" \"plan\": \"- Use the 'search' command to find the current weather conditions.\\n- Write the weather report for SF today.\",\n",
|
||||
" \"criticism\": \"I need to make sure that I am accurate in my reporting of the weather conditions.\",\n",
|
||||
" \"speak\": \"I will use the 'search' command to find the current weather conditions and then write the weather report for SF today.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"search\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"tool_input\": \"current weather conditions in San Francisco\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"System: Command search returned: Cloudy skies early, then partly cloudy in the afternoon. High 56F. Winds W at 15 to 25 mph.\n",
|
||||
"Human: Determine which next command to use, and respond using the format specified above:\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"Now that I have the current weather conditions for SF, I can write the weather report. I will use the 'write_file' command to save the report to a file.\",\n",
|
||||
" \"reasoning\": \"Now that I have the current weather conditions for SF, I can write the weather report. I will use the 'write_file' command to save the report to a file.\",\n",
|
||||
" \"plan\": \"- Use the 'write_file' command to save the weather report to a file.\",\n",
|
||||
" \"criticism\": \"I need to make sure that the weather report is clear and concise.\",\n",
|
||||
" \"speak\": \"I will use the 'write_file' command to save the weather report to a file.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"write_file\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"file_path\": \"weather_report.txt\",\n",
|
||||
" \"text\": \"Weather Report for San Francisco on April 16, 2023:\\n\\nCloudy skies early, then partly cloudy in the afternoon. High 56F. Winds W at 15 to 25 mph.\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mSystem: You are Tom, Assistant\n",
|
||||
"Your decisions must always be made independently \n",
|
||||
" without seeking user assistance. Play to your strengths \n",
|
||||
" as an LLM and pursue simple strategies with no legal complications. \n",
|
||||
" If you have completed all your tasks, \n",
|
||||
" make sure to use the \"finish\" command.\n",
|
||||
"\n",
|
||||
"GOALS:\n",
|
||||
"\n",
|
||||
"1. write a weather report for SF today\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Constraints:\n",
|
||||
"1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n",
|
||||
"2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\n",
|
||||
"3. No user assistance\n",
|
||||
"4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n",
|
||||
"\n",
|
||||
"Commands:\n",
|
||||
"1. search: useful for when you need to answer questions about current events. You should ask targeted questions, args: \"tool_input\": \"\"\n",
|
||||
"2. write_file: Write file to disk, args: \"file_path\": \"name of file\", \"text\": \"text to write to file\"\n",
|
||||
"3. read_file: Read file from disk, args: \"file_path\": \"name of file\"\n",
|
||||
"4. finish: use this to signal that you have finished all your objectives, args: \"response\": \"final response to let people know you have finished your objectives\"\n",
|
||||
"\n",
|
||||
"Resources:\n",
|
||||
"1. Internet access for searches and information gathering.\n",
|
||||
"2. Long Term memory management.\n",
|
||||
"3. GPT-3.5 powered Agents for delegation of simple tasks.\n",
|
||||
"4. File output.\n",
|
||||
"\n",
|
||||
"Performance Evaluation:\n",
|
||||
"1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n",
|
||||
"2. Constructively self-criticize your big-picture behavior constantly.\n",
|
||||
"3. Reflect on past decisions and strategies to refine your approach.\n",
|
||||
"4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n",
|
||||
"\n",
|
||||
"You should only respond in JSON format as described below \n",
|
||||
"Response Format: \n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"thought\",\n",
|
||||
" \"reasoning\": \"reasoning\",\n",
|
||||
" \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n",
|
||||
" \"criticism\": \"constructive self-criticism\",\n",
|
||||
" \"speak\": \"thoughts summary to say to user\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"command name\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"arg name\": \"value\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"} \n",
|
||||
"Ensure the response can be parsed by Python json.loads\n",
|
||||
"System: The current time and date is Sun Apr 16 18:39:10 2023\n",
|
||||
"System: This reminds you of these events from your past:\n",
|
||||
"['Assistant Reply: {\\n \"thoughts\": {\\n \"text\": \"I think I should start by writing the weather report for SF today. I can use the \\'search\\' command to find the current weather conditions.\",\\n \"reasoning\": \"The first step to completing my goals is to write the weather report for SF today. I can use the \\'search\\' command to find the current weather conditions.\",\\n \"plan\": \"- Use the \\'search\\' command to find the current weather conditions.\\\\n- Write the weather report for SF today.\",\\n \"criticism\": \"I need to make sure that I am accurate in my reporting of the weather conditions.\",\\n \"speak\": \"I will use the \\'search\\' command to find the current weather conditions and then write the weather report for SF today.\"\\n },\\n \"command\": {\\n \"name\": \"search\",\\n \"args\": {\\n \"tool_input\": \"current weather conditions in San Francisco\"\\n }\\n }\\n} \\nResult: Command search returned: Cloudy skies early, then partly cloudy in the afternoon. High 56F. Winds W at 15 to 25 mph. ', 'Assistant Reply: {\\n \"thoughts\": {\\n \"text\": \"Now that I have the current weather conditions for SF, I can write the weather report. I will use the \\'write_file\\' command to save the report to a file.\",\\n \"reasoning\": \"Now that I have the current weather conditions for SF, I can write the weather report. I will use the \\'write_file\\' command to save the report to a file.\",\\n \"plan\": \"- Use the \\'write_file\\' command to save the weather report to a file.\",\\n \"criticism\": \"I need to make sure that the weather report is clear and concise.\",\\n \"speak\": \"I will use the \\'write_file\\' command to save the weather report to a file.\"\\n },\\n \"command\": {\\n \"name\": \"write_file\",\\n \"args\": {\\n \"file_path\": \"weather_report.txt\",\\n \"text\": \"Weather Report for San Francisco on April 16, 2023:\\\\n\\\\nCloudy skies early, then partly cloudy in the afternoon. High 56F. Winds W at 15 to 25 mph.\"\\n }\\n }\\n} \\nResult: Command write_file returned: File written to successfully. ']\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Human: Determine which next command to use, and respond using the format specified above:\n",
|
||||
"AI: {\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I think I should start by writing the weather report for SF today. I can use the 'search' command to find the current weather conditions.\",\n",
|
||||
" \"reasoning\": \"The first step to completing my goals is to write the weather report for SF today. I can use the 'search' command to find the current weather conditions.\",\n",
|
||||
" \"plan\": \"- Use the 'search' command to find the current weather conditions.\\n- Write the weather report for SF today.\",\n",
|
||||
" \"criticism\": \"I need to make sure that I am accurate in my reporting of the weather conditions.\",\n",
|
||||
" \"speak\": \"I will use the 'search' command to find the current weather conditions and then write the weather report for SF today.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"search\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"tool_input\": \"current weather conditions in San Francisco\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"System: Command search returned: Cloudy skies early, then partly cloudy in the afternoon. High 56F. Winds W at 15 to 25 mph.\n",
|
||||
"Human: Determine which next command to use, and respond using the format specified above:\n",
|
||||
"AI: {\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"Now that I have the current weather conditions for SF, I can write the weather report. I will use the 'write_file' command to save the report to a file.\",\n",
|
||||
" \"reasoning\": \"Now that I have the current weather conditions for SF, I can write the weather report. I will use the 'write_file' command to save the report to a file.\",\n",
|
||||
" \"plan\": \"- Use the 'write_file' command to save the weather report to a file.\",\n",
|
||||
" \"criticism\": \"I need to make sure that the weather report is clear and concise.\",\n",
|
||||
" \"speak\": \"I will use the 'write_file' command to save the weather report to a file.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"write_file\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"file_path\": \"weather_report.txt\",\n",
|
||||
" \"text\": \"Weather Report for San Francisco on April 16, 2023:\\n\\nCloudy skies early, then partly cloudy in the afternoon. High 56F. Winds W at 15 to 25 mph.\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"System: Command write_file returned: File written to successfully.\n",
|
||||
"Human: Determine which next command to use, and respond using the format specified above:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I have completed all my tasks. I will use the 'finish' command to signal that I am done.\",\n",
|
||||
" \"reasoning\": \"I have completed all my tasks, so I will use the 'finish' command to signal that I am done.\",\n",
|
||||
" \"plan\": \"- Use the 'finish' command to signal that I am done.\",\n",
|
||||
" \"criticism\": \"I need to make sure that I have completed all my tasks before using the 'finish' command.\",\n",
|
||||
" \"speak\": \"I have completed all my tasks, so I will use the 'finish' command to signal that I am done.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"finish\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"response\": \"I have completed all my tasks. Goodbye!\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I have completed all my tasks. Goodbye!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run([\"write a weather report for SF today\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aa264f26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
257
docs/use_cases/autonomous_agents/baby_agi.ipynb
Normal file
257
docs/use_cases/autonomous_agents/baby_agi.ipynb
Normal file
@@ -0,0 +1,257 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "517a9fd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BabyAGI User Guide\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to implement [BabyAGI](https://github.com/yoheinakajima/babyagi/tree/main) by [Yohei Nakajima](https://twitter.com/yoheinakajima). BabyAGI is an AI agent that can generate and pretend to execute tasks based on a given objective.\n",
|
||||
"\n",
|
||||
"This guide will help you understand the components to create your own recursive agents.\n",
|
||||
"\n",
|
||||
"Although BabyAGI uses specific vectorstores/model providers (Pinecone, OpenAI), one of the benefits of implementing it with LangChain is that you can easily swap those out for different options. In this implementation we use a FAISS vectorstore (because it runs locally and is free)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "556af556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install and Import Required Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c8a354b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from collections import deque\n",
|
||||
"from typing import Dict, List, Optional, Any\n",
|
||||
"\n",
|
||||
"from langchain import LLMChain, OpenAI, PromptTemplate\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.llms import BaseLLM\n",
|
||||
"from langchain.vectorstores.base import VectorStore\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"from langchain.chains.base import Chain\n",
|
||||
"from langchain.experimental import BabyAGI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09f70772",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to the Vector Store\n",
|
||||
"\n",
|
||||
"Depending on what vectorstore you use, this step may look different."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "794045d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.docstore import InMemoryDocstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e0305eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05ba762e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the BabyAGI\n",
|
||||
"\n",
|
||||
"Now it's time to create the BabyAGI controller and watch it try to accomplish your objective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3d220b69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OBJECTIVE = \"Write a weather report for SF today\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8a8e5543",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "3d69899b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Logging of LLMChains\n",
|
||||
"verbose = False\n",
|
||||
"# If None, will keep on going forever\n",
|
||||
"max_iterations: Optional[int] = 3\n",
|
||||
"baby_agi = BabyAGI.from_llm(\n",
|
||||
" llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f7957b51",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"1. Check the weather forecast for San Francisco today\n",
|
||||
"2. Make note of the temperature, humidity, wind speed, and other relevant weather conditions\n",
|
||||
"3. Write a weather report summarizing the forecast\n",
|
||||
"4. Check for any weather alerts or warnings\n",
|
||||
"5. Share the report with the relevant stakeholders\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Check the current temperature in San Francisco\n",
|
||||
"3: Check the current humidity in San Francisco\n",
|
||||
"4: Check the current wind speed in San Francisco\n",
|
||||
"5: Check for any weather alerts or warnings in San Francisco\n",
|
||||
"6: Check the forecast for the next 24 hours in San Francisco\n",
|
||||
"7: Check the forecast for the next 48 hours in San Francisco\n",
|
||||
"8: Check the forecast for the next 72 hours in San Francisco\n",
|
||||
"9: Check the forecast for the next week in San Francisco\n",
|
||||
"10: Check the forecast for the next month in San Francisco\n",
|
||||
"11: Check the forecast for the next 3 months in San Francisco\n",
|
||||
"1: Write a weather report for SF today\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Check the current temperature in San Francisco\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"I will check the current temperature in San Francisco. I will use an online weather service to get the most up-to-date information.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Check the current UV index in San Francisco.\n",
|
||||
"4: Check the current air quality in San Francisco.\n",
|
||||
"5: Check the current precipitation levels in San Francisco.\n",
|
||||
"6: Check the current cloud cover in San Francisco.\n",
|
||||
"7: Check the current barometric pressure in San Francisco.\n",
|
||||
"8: Check the current dew point in San Francisco.\n",
|
||||
"9: Check the current wind direction in San Francisco.\n",
|
||||
"10: Check the current humidity levels in San Francisco.\n",
|
||||
"1: Check the current temperature in San Francisco to the average temperature for this time of year.\n",
|
||||
"2: Check the current visibility in San Francisco.\n",
|
||||
"11: Write a weather report for SF today.\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Check the current UV index in San Francisco.\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The current UV index in San Francisco is moderate. The UV index is expected to remain at moderate levels throughout the day. It is recommended to wear sunscreen and protective clothing when outdoors.\n",
|
||||
"\u001b[91m\u001b[1m\n",
|
||||
"*****TASK ENDING*****\n",
|
||||
"\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'objective': 'Write a weather report for SF today'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"baby_agi({\"objective\": OBJECTIVE})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "898a210b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
387
docs/use_cases/autonomous_agents/baby_agi_with_agent.ipynb
Normal file
387
docs/use_cases/autonomous_agents/baby_agi_with_agent.ipynb
Normal file
@@ -0,0 +1,387 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "517a9fd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BabyAGI with Tools\n",
|
||||
"\n",
|
||||
"This notebook builds on top of [baby agi](baby_agi.ipynb), but shows how you can swap out the execution chain. The previous execution chain was just an LLM which made stuff up. By swapping it out with an agent that has access to tools, we can hopefully get real reliable information"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "556af556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install and Import Required Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c8a354b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from collections import deque\n",
|
||||
"from typing import Dict, List, Optional, Any\n",
|
||||
"\n",
|
||||
"from langchain import LLMChain, OpenAI, PromptTemplate\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.llms import BaseLLM\n",
|
||||
"from langchain.vectorstores.base import VectorStore\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"from langchain.chains.base import Chain\n",
|
||||
"from langchain.experimental import BabyAGI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09f70772",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to the Vector Store\n",
|
||||
"\n",
|
||||
"Depending on what vectorstore you use, this step may look different."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "794045d4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install faiss-cpu > /dev/null\n",
|
||||
"%pip install google-search-results > /dev/null\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.docstore import InMemoryDocstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e0305eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f3b72bf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the Chains\n",
|
||||
"\n",
|
||||
"BabyAGI relies on three LLM chains:\n",
|
||||
"- Task creation chain to select new tasks to add to the list\n",
|
||||
"- Task prioritization chain to re-prioritize tasks\n",
|
||||
"- Execution Chain to execute the tasks\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"NOTE: in this notebook, the Execution chain will now be an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "b43cd580",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n",
|
||||
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
|
||||
"\n",
|
||||
"todo_prompt = PromptTemplate.from_template(\n",
|
||||
" \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n",
|
||||
")\n",
|
||||
"todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"TODO\",\n",
|
||||
" func=todo_chain.run,\n",
|
||||
" description=\"useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prefix = \"\"\"You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\"\"\"\n",
|
||||
"suffix = \"\"\"Question: {task}\n",
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"objective\", \"task\", \"context\", \"agent_scratchpad\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4b00ae2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)\n",
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05ba762e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the BabyAGI\n",
|
||||
"\n",
|
||||
"Now it's time to create the BabyAGI controller and watch it try to accomplish your objective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "3d220b69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OBJECTIVE = \"Write a weather report for SF today\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3d69899b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Logging of LLMChains\n",
|
||||
"verbose = False\n",
|
||||
"# If None, will keep on going forever\n",
|
||||
"max_iterations: Optional[int] = 3\n",
|
||||
"baby_agi = BabyAGI.from_llm(\n",
|
||||
" llm=llm, vectorstore=vectorstore, task_execution_chain=agent_executor, verbose=verbose, max_iterations=max_iterations\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f7957b51",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to come up with a todo list\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Write a weather report for SF today\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Research current weather conditions in San Francisco\n",
|
||||
"2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions\n",
|
||||
"3. Analyze data to determine current weather trends\n",
|
||||
"4. Write a brief introduction to the weather report\n",
|
||||
"5. Describe current weather conditions in San Francisco\n",
|
||||
"6. Discuss any upcoming weather changes\n",
|
||||
"7. Summarize the weather report\n",
|
||||
"8. Proofread and edit the report\n",
|
||||
"9. Submit the report\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n",
|
||||
"3: Analyze data to determine any upcoming weather changes;\n",
|
||||
"4: Research current weather forecasts for San Francisco;\n",
|
||||
"5: Create a visual representation of the weather report;\n",
|
||||
"6: Include relevant images and graphics in the report;\n",
|
||||
"7: Format the report for readability;\n",
|
||||
"8: Publish the report online;\n",
|
||||
"9: Monitor the report for accuracy.\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to search for current weather conditions in San Francisco\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Current weather conditions in San Francisco\u001b[0m\u001b[36;1m\u001b[1;3mCurrent Weather for Popular Cities ; San Francisco, CA 46 · Partly Cloudy ; Manhattan, NY warning 52 · Cloudy ; Schiller Park, IL (60176) 40 · Sunny ; Boston, MA 54 ...\u001b[0m\u001b[32;1m\u001b[1;3m I need to compile the data into a weather report\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Compile data into a weather report\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Gather data from reliable sources such as the National Weather Service, local weather stations, and other meteorological organizations.\n",
|
||||
"\n",
|
||||
"2. Analyze the data to identify trends and patterns.\n",
|
||||
"\n",
|
||||
"3. Create a chart or graph to visualize the data.\n",
|
||||
"\n",
|
||||
"4. Write a summary of the data and its implications.\n",
|
||||
"\n",
|
||||
"5. Compile the data into a report format.\n",
|
||||
"\n",
|
||||
"6. Proofread the report for accuracy and clarity.\n",
|
||||
"\n",
|
||||
"7. Publish the report to a website or other platform.\n",
|
||||
"\n",
|
||||
"8. Distribute the report to relevant stakeholders.\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Format the report for readability;\n",
|
||||
"4: Include relevant images and graphics in the report;\n",
|
||||
"5: Compare the current weather conditions in San Francisco to the forecasted conditions;\n",
|
||||
"6: Identify any potential weather-related hazards in the area;\n",
|
||||
"7: Research historical weather patterns in San Francisco;\n",
|
||||
"8: Identify any potential trends in the weather data;\n",
|
||||
"9: Include relevant data sources in the report;\n",
|
||||
"10: Summarize the weather report in a concise manner;\n",
|
||||
"11: Include a summary of the forecasted weather conditions;\n",
|
||||
"12: Include a summary of the current weather conditions;\n",
|
||||
"13: Include a summary of the historical weather patterns;\n",
|
||||
"14: Include a summary of the potential weather-related hazards;\n",
|
||||
"15: Include a summary of the potential trends in the weather data;\n",
|
||||
"16: Include a summary of the data sources used in the report;\n",
|
||||
"17: Analyze data to determine any upcoming weather changes;\n",
|
||||
"18: Research current weather forecasts for San Francisco;\n",
|
||||
"19: Create a visual representation of the weather report;\n",
|
||||
"20: Publish the report online;\n",
|
||||
"21: Monitor the report for accuracy\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Format the report for readability;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to make sure the report is easy to read;\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Make the report easy to read\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Break up the report into sections with clear headings\n",
|
||||
"2. Use bullet points and numbered lists to organize information\n",
|
||||
"3. Use short, concise sentences\n",
|
||||
"4. Use simple language and avoid jargon\n",
|
||||
"5. Include visuals such as charts, graphs, and diagrams to illustrate points\n",
|
||||
"6. Use bold and italicized text to emphasize key points\n",
|
||||
"7. Include a table of contents and page numbers\n",
|
||||
"8. Use a consistent font and font size throughout the report\n",
|
||||
"9. Include a summary at the end of the report\n",
|
||||
"10. Proofread the report for typos and errors\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\n",
|
||||
"\u001b[91m\u001b[1m\n",
|
||||
"*****TASK ENDING*****\n",
|
||||
"\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'objective': 'Write a weather report for SF today'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"baby_agi({\"objective\": OBJECTIVE})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "898a210b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
678
docs/use_cases/autonomous_agents/marathon_times.ipynb
Normal file
678
docs/use_cases/autonomous_agents/marathon_times.ipynb
Normal file
@@ -0,0 +1,678 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14f8b67b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AutoGPT example finding Winning Marathon Times\n",
|
||||
"\n",
|
||||
"* Implementation of https://github.com/Significant-Gravitas/Auto-GPT \n",
|
||||
"* With LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ef972313-c05a-4c49-8fd1-03e599e21033",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install bs4\n",
|
||||
"# !pip install nest_asyncio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1cff42fd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# General \n",
|
||||
"import pandas as pd\n",
|
||||
"from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"import asyncio\n",
|
||||
"import nest_asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Needed synce jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "01283ac7-1da0-41ba-8011-bd455d21dd82",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=1.0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "192496a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up tools\n",
|
||||
"\n",
|
||||
"* We'll set up an AutoGPT with a `search` tool, and `write-file` tool, and a `read-file` tool, and a web browsing tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "708a426f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Define any other `tools` you want to use here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cef4c150-0ef1-4a33-836b-01062fec134e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Tools\n",
|
||||
"from typing import Optional\n",
|
||||
"from langchain.agents import tool\n",
|
||||
"from langchain.tools.file_management.read import ReadFileTool\n",
|
||||
"from langchain.tools.file_management.write import WriteFileTool\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def process_csv(csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str:\n",
|
||||
" \"\"\"Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded.\"\"\"\n",
|
||||
" try:\n",
|
||||
" df = pd.read_csv(csv_file_path)\n",
|
||||
" except Exception as e:\n",
|
||||
" return f\"Error: {e}\"\n",
|
||||
" agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)\n",
|
||||
" if output_path is not None:\n",
|
||||
" instructions += f\" Save output to disk at {output_path}\"\n",
|
||||
" try:\n",
|
||||
" return agent.run(instructions)\n",
|
||||
" except Exception as e:\n",
|
||||
" return f\"Error: {e}\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "51c07298-00e0-42d6-8aff-bd2e6bbd35a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Web Search Tool**\n",
|
||||
"\n",
|
||||
"No need for API Tokens to use this tool, but it will require an optional dependency"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4afdedb2-f295-4ab8-9397-3640f5eeeed3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install duckduckgo_search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "45f143de-e49e-4e27-88eb-ee44a4fdf933",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"from duckduckgo_search import ddg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e2e799f4-86fb-4190-a298-4ae5c7b7a540",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"def web_search(query: str, num_results: int = 8) -> str:\n",
|
||||
" \"\"\"Useful for general internet search queries.\"\"\"\n",
|
||||
" search_results = []\n",
|
||||
" if not query:\n",
|
||||
" return json.dumps(search_results)\n",
|
||||
"\n",
|
||||
" results = ddg(query, max_results=num_results)\n",
|
||||
" if not results:\n",
|
||||
" return json.dumps(search_results)\n",
|
||||
"\n",
|
||||
" for j in results:\n",
|
||||
" search_results.append(j)\n",
|
||||
"\n",
|
||||
" return json.dumps(search_results, ensure_ascii=False, indent=4)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "69975008-654a-4cbb-bdf6-63c8bae07eaa",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"**Browse a web page with PlayWright**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "6bb5e47b-0f54-4faa-ae42-49a28fa5497b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install playwright\n",
|
||||
"# !playwright install"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "26b497d7-8e52-4c7f-8e7e-da0a48820a3c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"async def async_load_playwright(url: str) -> str:\n",
|
||||
" \"\"\"Load the specified URLs using Playwright and parse using BeautifulSoup.\"\"\"\n",
|
||||
" from bs4 import BeautifulSoup\n",
|
||||
" from playwright.async_api import async_playwright\n",
|
||||
"\n",
|
||||
" results = \"\"\n",
|
||||
" async with async_playwright() as p:\n",
|
||||
" browser = await p.chromium.launch(headless=True)\n",
|
||||
" try:\n",
|
||||
" page = await browser.new_page()\n",
|
||||
" await page.goto(url)\n",
|
||||
"\n",
|
||||
" page_source = await page.content()\n",
|
||||
" soup = BeautifulSoup(page_source, \"html.parser\")\n",
|
||||
"\n",
|
||||
" for script in soup([\"script\", \"style\"]):\n",
|
||||
" script.extract()\n",
|
||||
"\n",
|
||||
" text = soup.get_text()\n",
|
||||
" lines = (line.strip() for line in text.splitlines())\n",
|
||||
" chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n",
|
||||
" results = \"\\n\".join(chunk for chunk in chunks if chunk)\n",
|
||||
" except Exception as e:\n",
|
||||
" results = f\"Error: {e}\"\n",
|
||||
" await browser.close()\n",
|
||||
" return results\n",
|
||||
"\n",
|
||||
"def run_async(coro):\n",
|
||||
" event_loop = asyncio.get_event_loop()\n",
|
||||
" return event_loop.run_until_complete(coro)\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def browse_web_page(url: str) -> str:\n",
|
||||
" \"\"\"Verbose way to scrape a whole webpage. Likely to cause issues parsing.\"\"\"\n",
|
||||
" return run_async(async_load_playwright(url))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ea71762-67ca-4e75-8c4d-00563064be71",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Q&A Over a webpage**\n",
|
||||
"\n",
|
||||
"Help the model ask more directed questions of web pages to avoid cluttering its memory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "1842929d-f18d-4edc-9fdd-82c929181141",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.base import BaseTool\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"from langchain.document_loaders import WebBaseLoader\n",
|
||||
"from pydantic import Field\n",
|
||||
"from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain\n",
|
||||
"\n",
|
||||
"def _get_text_splitter():\n",
|
||||
" return RecursiveCharacterTextSplitter(\n",
|
||||
" # Set a really small chunk size, just to show.\n",
|
||||
" chunk_size = 500,\n",
|
||||
" chunk_overlap = 20,\n",
|
||||
" length_function = len,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class WebpageQATool(BaseTool):\n",
|
||||
" name = \"query_webpage\"\n",
|
||||
" description = \"Browse a webpage and retrieve the information relevant to the question.\"\n",
|
||||
" text_splitter: RecursiveCharacterTextSplitter = Field(default_factory=_get_text_splitter)\n",
|
||||
" qa_chain: BaseCombineDocumentsChain\n",
|
||||
" \n",
|
||||
" def _run(self, url: str, question: str) -> str:\n",
|
||||
" \"\"\"Useful for browsing websites and scraping the text information.\"\"\"\n",
|
||||
" result = browse_web_page.run(url)\n",
|
||||
" docs = [Document(page_content=result, metadata={\"source\": url})]\n",
|
||||
" web_docs = self.text_splitter.split_documents(docs)\n",
|
||||
" results = []\n",
|
||||
" # TODO: Handle this with a MapReduceChain\n",
|
||||
" for i in range(0, len(web_docs), 4):\n",
|
||||
" input_docs = web_docs[i:i+4]\n",
|
||||
" window_result = self.qa_chain({\"input_documents\": input_docs, \"question\": question}, return_only_outputs=True)\n",
|
||||
" results.append(f\"Response from window {i} - {window_result}\")\n",
|
||||
" results_docs = [Document(page_content=\"\\n\".join(results), metadata={\"source\": url})]\n",
|
||||
" return self.qa_chain({\"input_documents\": results_docs, \"question\": question}, return_only_outputs=True)\n",
|
||||
" \n",
|
||||
" async def _arun(self, url: str, question: str) -> str:\n",
|
||||
" raise NotImplementedError\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "e6f72bd0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e39ee28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up memory\n",
|
||||
"\n",
|
||||
"* The memory here is used for the agents intermediate steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1df7b724",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"import faiss\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.tools.human.tool import HumanInputRun\n",
|
||||
"\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e40fd657",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setup model and AutoGPT\n",
|
||||
"\n",
|
||||
"`Model set-up`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "88c8b184-67d7-4c35-84ae-9b14bef8c4e3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" web_search,\n",
|
||||
" WriteFileTool(),\n",
|
||||
" ReadFileTool(),\n",
|
||||
" process_csv,\n",
|
||||
" query_website_tool,\n",
|
||||
" # HumanInputRun(), # Activate if you want the permit asking for help from the human\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "709c08c2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = AutoGPT.from_llm_and_tools(\n",
|
||||
" ai_name=\"Tom\",\n",
|
||||
" ai_role=\"Assistant\",\n",
|
||||
" tools=tools,\n",
|
||||
" llm=llm,\n",
|
||||
" memory=vectorstore.as_retriever(search_kwargs={\"k\": 8}),\n",
|
||||
" # human_in_the_loop=True, # Set to True if you want to add feedback at each step.\n",
|
||||
")\n",
|
||||
"# agent.chain.verbose = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc9b51ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### AutoGPT as a research / data munger \n",
|
||||
"\n",
|
||||
"#### `inflation` and `college tuition`\n",
|
||||
" \n",
|
||||
"Let's use AutoGPT as researcher and data munger / cleaner.\n",
|
||||
" \n",
|
||||
"I spent a lot of time over the years crawling data sources and cleaning data. \n",
|
||||
"\n",
|
||||
"Let's see if AutoGPT can do all of this for us!\n",
|
||||
"\n",
|
||||
"Here is the prompt comparing `inflation` and `college tuition`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "64455d70-a134-4d11-826a-33e34c2ce287",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I will start by using the web_search command to find the Boston Marathon winning times for the past 5 years. Since it is a simple query, I expect to find relevant information in the first few search results.\",\n",
|
||||
" \"reasoning\": \"I have decided on this course of action to efficiently retrieve the information I need while keeping it simple to avoid legal complications.\",\n",
|
||||
" \"plan\": \"- Use web_search command to search for 'Boston Marathon winning times past 5 years'\\n- Look through top results for the information requested\\n- If necessary, repeat search with slightly different query to get better results\",\n",
|
||||
" \"criticism\": \"I should make sure to phrase my search query concisely to prevent too many irrelevant search results.\",\n",
|
||||
" \"speak\": \"I will use the web_search command to find the Boston Marathon winning times for the past 5 years.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"web_search\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"query\": \"Boston Marathon winning times past 5 years\",\n",
|
||||
" \"num_results\": 8\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\u001b[32mLast Observation: [\n",
|
||||
" {\n",
|
||||
" \"title\": \"List of winners of the Boston Marathon - Wikipedia\",\n",
|
||||
" \"href\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\",\n",
|
||||
" \"body\": \"Louise Sauvage won the women's wheelchair division in three consecutive Boston Marathons, between 1997 and 1999. Edith Hunkeler won the race twice, in 2002 and 2006. Wakako Tsuchida won the race in five consecutive years from 2007 to 2011. Tatyana McFadden won the race five times between 2013 and 2018.\"\n",
|
||||
" },\n",
|
||||
" \u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"The search results indicate that the winning times for the past 5 years can be found on the Wikipedia page for the Boston Marathon winners. I will retrieve this information using the query_webpage command and the URL of the relevant Wikipedia page. \",\n",
|
||||
" \"reasoning\": \"This method will allow me to avoid any legal complications while retrieving the exact information that I need.\",\n",
|
||||
" \"plan\": \"- Use query_webpage command to browse through the Wikipedia page for the Boston Marathon winners \\\\n- Locate the relevant information on the page \\\\n- Return the information to the user\",\n",
|
||||
" \"criticism\": \"I should be sure to locate and use only reliable sources to avoid providing the user with inaccurate information.\",\n",
|
||||
" \"speak\": \"I will use the query_webpage command to browse through the Wikipedia page for the Boston Marathon winners and retrieve the relevant information.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"query_webpage\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"url\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\", \n",
|
||||
" \"question\": \"What are the winning times of the Boston Marathon for the past 5 years? \"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\u001b[32mLast Observation: {'output_text': \"The winning times of the Boston Marathon for the past 5 years are: \\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"}\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I could write this information to a file using the write_file command so that I can refer back to it later. But first, I should process the information to extract the relevant data and convert it into a useful format using the process_csv command.\",\n",
|
||||
" \"reasoning\": \"Storing the information in a CSV file will allow me to use pandas to manipulate the data in the future with ease, while converting the data into a useful format will make it easier for other people to use the data.\",\n",
|
||||
" \"plan\": \"- Use process_csv command to extract and format the relevant data from the Wikipedia page\\\\n- Write the data to a CSV file using the write_file command\\\\n- Inform the user that the data has been saved to file\",\n",
|
||||
" \"criticism\": \"I should thoroughly check the CSV file for accuracy before saving the data.\",\n",
|
||||
" \"speak\": \"I will use the process_csv command to extract and format the relevant data from the Wikipedia page, and save it to a CSV file using the write_file command.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"process_csv\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"csv_file_path\": \"boston_marathon_winners.csv\",\n",
|
||||
" \"instructions\": \"1. Extract data from Wikipedia page\\\\n2. Convert winning times into datetime format\\\\n3. Save data as CSV file with columns: year, men\\'s time, women\\'s time\",\n",
|
||||
" \"output_path\": None\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\u001b[32mLast Observation: {'output_text': \"The winning times of the Boston Marathon for the past 5 years are: \\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"}\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I will now use the write_file command to save the winning times for the past 5 years as a text file for later reference.\",\n",
|
||||
" \"reasoning\": \"This will allow me to easily access the winning times in the future without needing to search for the information again.\",\n",
|
||||
" \"plan\": \"- Use the write_file command to save the winning times for the past 5 years to a text file.\\\\n- Name the file appropriately, so it is easy to identify in the future.\",\n",
|
||||
" \"criticism\": \"I should review the contents of the file after writing it to make sure that the information has been saved correctly and can be read later without errors.\",\n",
|
||||
" \"speak\": \"I will use the write_file command to save the winning times for the past 5 years as a text file for easy retrieval in the future.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"write_file\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"file_path\": \"BostonMarathonWinningTimes.txt\",\n",
|
||||
" \"text\": \"The winning times of the Boston Marathon for the past 5 years are: \\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\u001b[32mLast Observation: File written to successfully.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I will use the command \"query_webpage\" to browse through the Wikipedia page for the Boston Marathon winners and retrieve the winning times for the past 5 years. This is the method that will allow me to retrieve the exact information I need while avoiding any legal issues. \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"The search results indicate that the winning times for the past 5 years can be found on the Wikipedia page for the Boston Marathon winners. I will retrieve this information using the query_webpage command and the URL of the relevant Wikipedia page. \",\n",
|
||||
" \"reasoning\": \"This method will allow me to avoid any legal complications while retrieving the exact information that I need.\",\n",
|
||||
" \"plan\": \"- Use query_webpage command to browse through the Wikipedia page for the Boston Marathon winners \\\\n- Locate the relevant information on the page \\\\n- Return the information to the user\",\n",
|
||||
" \"criticism\": \"I should be sure to locate and use only reliable sources to avoid providing the user with inaccurate information.\",\n",
|
||||
" \"speak\": \"I will use the query_webpage command to browse through the Wikipedia page for the Boston Marathon winners and retrieve the winning times for the past 5 years. \"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"query_webpage\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"url\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\", \n",
|
||||
" \"question\": \"What are the winning times of the Boston Marathon for the past 5 years? \"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[32mLast Observation: File written to successfully.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"Now that I have the winning times for the past 5 years, I will save them to a file to ensure that I don't forget them. I will use the write_file command to save this data to disk.\",\n",
|
||||
" \"reasoning\": \"By saving the data to disk, I can easily retrieve it in the future and avoid having to perform the same search again.\",\n",
|
||||
" \"plan\": \"- Use the write_file command to save the data to a text file.\",\n",
|
||||
" \"criticism\": \"I should ensure that the file path and file name are clear and descriptive to make it easy to locate the relevant information in the future.\",\n",
|
||||
" \"speak\": \"I will save the winning times for the past 5 years to a text file using the write_file command.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"write_file\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"file_path\": \"winning_times.txt\",\n",
|
||||
" \"text\": \"The winning times of the Boston Marathon for the past 5 years are:\\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\u001b[32mLast Observation: File written to successfully.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I will now shut down my system, as I have completed all of my objectives.\",\n",
|
||||
" \"reasoning\": \"Since I have completed all of my objectives, there is no need to keep running my system.\",\n",
|
||||
" \"plan\": \"- Use the finish command to signal that I have completed all of my objectives.\\\\\\\\n- Shut down the system.\",\n",
|
||||
" \"criticism\": \"I should always double-check to ensure that all of my objectives have been completed before shutting down my system.\",\n",
|
||||
" \"speak\": \"I have completed all of my objectives, and I will now shut down my system.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"finish\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"response\": \"All objectives have been completed.\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'All objectives have been completed.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run([\"What were the winning boston marathon times for the past 5 years\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -19,8 +19,4 @@ Highlighting specific parts:
|
||||
|
||||
Specific examples of this include:
|
||||
|
||||
- [Baby AGI](agents/baby_agi.ipynb): a notebook implementing [BabyAGI](https://github.com/yoheinakajima/babyagi) by Yohei Nakajima as LLM Chains
|
||||
- [Baby AGI with Tools](agents/baby_agi_with_agent.ipynb): building off the above notebook, this example substitutes in an agent with tools as the execution tools, allowing it to actually take actions.
|
||||
- [CAMEL](agents/camel_role_playing.ipynb): an implementation of the CAMEL (Communicative Agents for “Mind” Exploration of Large Scale Language Model Society) paper, where two agents communicate with eachother.
|
||||
- [AI Plugins](agents/custom_agent_with_plugin_retrieval.ipynb): an implementation of an agent that is designed to be able to use all AI Plugins.
|
||||
- [Generative Agents](agents/characters.ipynb): This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.
|
||||
|
||||
3
langchain/experimental/__init__.py
Normal file
3
langchain/experimental/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
|
||||
|
||||
__all__ = ["BabyAGI"]
|
||||
3
langchain/experimental/autonomous_agents/__init__.py
Normal file
3
langchain/experimental/autonomous_agents/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
|
||||
|
||||
__all__ = ["BabyAGI"]
|
||||
138
langchain/experimental/autonomous_agents/autogpt/agent.py
Normal file
138
langchain/experimental/autonomous_agents/autogpt/agent.py
Normal file
@@ -0,0 +1,138 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from termcolor import colored
|
||||
|
||||
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
|
||||
AutoGPTOutputParser,
|
||||
BaseAutoGPTOutputParser,
|
||||
)
|
||||
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
|
||||
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
|
||||
FINISH_NAME,
|
||||
)
|
||||
from pydantic import ValidationError
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
Document,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.human.tool import HumanInputRun
|
||||
from langchain.vectorstores.base import VectorStoreRetriever
|
||||
|
||||
|
||||
class AutoGPT:
|
||||
"""Agent class for interacting with Auto-GPT."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name: str,
|
||||
memory: VectorStoreRetriever,
|
||||
chain: LLMChain,
|
||||
output_parser: BaseAutoGPTOutputParser,
|
||||
tools: List[BaseTool],
|
||||
feedback_tool: Optional[HumanInputRun] = None,
|
||||
):
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.full_message_history: List[BaseMessage] = []
|
||||
self.next_action_count = 0
|
||||
self.chain = chain
|
||||
self.output_parser = output_parser
|
||||
self.tools = tools
|
||||
self.feedback_tool = feedback_tool
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
cls,
|
||||
ai_name: str,
|
||||
ai_role: str,
|
||||
memory: VectorStoreRetriever,
|
||||
tools: List[BaseTool],
|
||||
llm: BaseChatModel,
|
||||
human_in_the_loop: bool = False,
|
||||
output_parser: Optional[BaseAutoGPTOutputParser] = None,
|
||||
) -> AutoGPT:
|
||||
prompt = AutoGPTPrompt(
|
||||
ai_name=ai_name,
|
||||
ai_role=ai_role,
|
||||
tools=tools,
|
||||
input_variables=["memory", "messages", "goals", "user_input"],
|
||||
token_counter=llm.get_num_tokens,
|
||||
)
|
||||
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
return cls(
|
||||
ai_name,
|
||||
memory,
|
||||
chain,
|
||||
output_parser or AutoGPTOutputParser(),
|
||||
tools,
|
||||
feedback_tool=human_feedback_tool,
|
||||
)
|
||||
|
||||
def run(self, goals: List[str]) -> str:
|
||||
user_input = (
|
||||
"Determine which next command to use, "
|
||||
"and respond using the format specified above:"
|
||||
)
|
||||
# Interaction Loop
|
||||
loop_count = 0
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
|
||||
# Send message to AI, get response
|
||||
assistant_reply = self.chain.run(
|
||||
goals=goals,
|
||||
messages=self.full_message_history,
|
||||
memory=self.memory,
|
||||
user_input=user_input,
|
||||
)
|
||||
|
||||
# Print Assistant thoughts
|
||||
print(assistant_reply)
|
||||
self.full_message_history.append(HumanMessage(content=user_input))
|
||||
self.full_message_history.append(AIMessage(content=assistant_reply))
|
||||
|
||||
# Get command name and arguments
|
||||
action = self.output_parser.parse(assistant_reply)
|
||||
tools = {t.name: t for t in self.tools}
|
||||
if action.name == FINISH_NAME:
|
||||
return action.args["response"]
|
||||
if action.name in tools:
|
||||
tool = tools[action.name]
|
||||
try:
|
||||
observation = tool.run(action.args)
|
||||
except ValidationError as e:
|
||||
observation = f"Error in args: {str(e)}"
|
||||
result = f"Command {tool.name} returned: {observation}"
|
||||
elif action.name == "ERROR":
|
||||
result = f"Error: {action.args}. "
|
||||
else:
|
||||
result = (
|
||||
f"Unknown command '{action.name}'. "
|
||||
f"Please refer to the 'COMMANDS' list for available "
|
||||
f"commands and only respond in the specified JSON format."
|
||||
)
|
||||
|
||||
memory_to_add = (
|
||||
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
|
||||
)
|
||||
color = "red" if str(observation).lower().startswith("Error") else "green"
|
||||
print(colored("Last Observation: " + str(observation)[:500], color))
|
||||
if self.feedback_tool is not None:
|
||||
feedback = f"\n{self.feedback_tool.run('Input: ')}"
|
||||
if feedback in {"q", "stop"}:
|
||||
print("EXITING")
|
||||
return "EXITING"
|
||||
memory_to_add += feedback
|
||||
|
||||
self.memory.add_documents([Document(page_content=memory_to_add)])
|
||||
self.full_message_history.append(SystemMessage(content=result))
|
||||
30
langchain/experimental/autonomous_agents/autogpt/memory.py
Normal file
30
langchain/experimental/autonomous_agents/autogpt/memory.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
|
||||
from langchain.vectorstores.base import VectorStoreRetriever
|
||||
|
||||
|
||||
class AutoGPTMemory(BaseChatMemory):
|
||||
retriever: VectorStoreRetriever = Field(exclude=True)
|
||||
"""VectorStoreRetriever object to connect to."""
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
return ["chat_history", "relevant_context"]
|
||||
|
||||
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
|
||||
"""Get the input key for the prompt."""
|
||||
if self.input_key is None:
|
||||
return get_prompt_input_key(inputs, self.memory_variables)
|
||||
return self.input_key
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
input_key = self._get_prompt_input_key(inputs)
|
||||
query = inputs[input_key]
|
||||
docs = self.retriever.get_relevant_documents(query)
|
||||
return {
|
||||
"chat_history": self.chat_memory.messages[-10:],
|
||||
"relevant_context": docs,
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
import json
|
||||
from abc import abstractmethod
|
||||
from typing import Dict, NamedTuple
|
||||
|
||||
from langchain.schema import BaseOutputParser
|
||||
import re
|
||||
|
||||
|
||||
class AutoGPTAction(NamedTuple):
|
||||
name: str
|
||||
args: Dict
|
||||
|
||||
|
||||
class BaseAutoGPTOutputParser(BaseOutputParser):
|
||||
@abstractmethod
|
||||
def parse(self, text: str) -> AutoGPTAction:
|
||||
"""Return AutoGPTAction"""
|
||||
|
||||
|
||||
def preprocess_json_input(input_str: str) -> str:
|
||||
# Replace single backslashes with double backslashes, while leaving already escaped ones intact
|
||||
corrected_str = re.sub(
|
||||
r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str
|
||||
)
|
||||
return corrected_str
|
||||
|
||||
|
||||
class AutoGPTOutputParser(BaseAutoGPTOutputParser):
|
||||
def parse(self, text: str) -> AutoGPTAction:
|
||||
try:
|
||||
parsed = json.loads(text, strict=False)
|
||||
except json.JSONDecodeError:
|
||||
preprocessed_text = preprocess_json_input(text)
|
||||
try:
|
||||
parsed = json.loads(preprocessed_text, strict=False)
|
||||
except:
|
||||
return AutoGPTAction(
|
||||
name="ERROR",
|
||||
args={"error": f"Could not parse invalid json: {text}"},
|
||||
)
|
||||
try:
|
||||
return AutoGPTAction(
|
||||
name=parsed["command"]["name"],
|
||||
args=parsed["command"]["args"],
|
||||
)
|
||||
except KeyError as e:
|
||||
return AutoGPTAction(
|
||||
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
|
||||
)
|
||||
75
langchain/experimental/autonomous_agents/autogpt/prompt.py
Normal file
75
langchain/experimental/autonomous_agents/autogpt/prompt.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import time
|
||||
from typing import Any, Callable, List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
|
||||
from langchain.prompts.chat import (
|
||||
BaseChatPromptTemplate,
|
||||
)
|
||||
from langchain.schema import BaseMessage, HumanMessage, SystemMessage
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.vectorstores.base import VectorStoreRetriever
|
||||
|
||||
|
||||
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
|
||||
ai_name: str
|
||||
ai_role: str
|
||||
tools: List[BaseTool]
|
||||
token_counter: Callable[[str], int]
|
||||
send_token_limit: int = 4196
|
||||
|
||||
def construct_full_prompt(self, goals: List[str]) -> str:
|
||||
prompt_start = """Your decisions must always be made independently
|
||||
without seeking user assistance. Play to your strengths
|
||||
as an LLM and pursue simple strategies with no legal complications.
|
||||
If you have completed all your tasks,
|
||||
make sure to use the "finish" command."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = (
|
||||
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
)
|
||||
for i, goal in enumerate(goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{get_prompt(self.tools)}"
|
||||
return full_prompt
|
||||
|
||||
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
|
||||
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
|
||||
time_prompt = SystemMessage(
|
||||
content=f"The current time and date is {time.strftime('%c')}"
|
||||
)
|
||||
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
|
||||
time_prompt.content
|
||||
)
|
||||
memory: VectorStoreRetriever = kwargs["memory"]
|
||||
previous_messages = kwargs["messages"]
|
||||
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
|
||||
relevant_memory = [d.page_content for d in relevant_docs]
|
||||
relevant_memory_tokens = sum(
|
||||
[self.token_counter(doc) for doc in relevant_memory]
|
||||
)
|
||||
while used_tokens + relevant_memory_tokens > 2500:
|
||||
relevant_memory = relevant_memory[:-1]
|
||||
relevant_memory_tokens = sum(
|
||||
[self.token_counter(doc) for doc in relevant_memory]
|
||||
)
|
||||
content_format = (
|
||||
f"This reminds you of these events "
|
||||
f"from your past:\n{relevant_memory}\n\n"
|
||||
)
|
||||
memory_message = SystemMessage(content=content_format)
|
||||
used_tokens += len(memory_message.content)
|
||||
historical_messages: List[BaseMessage] = []
|
||||
for message in previous_messages[-10:][::-1]:
|
||||
message_tokens = self.token_counter(message.content)
|
||||
if used_tokens + message_tokens > self.send_token_limit - 1000:
|
||||
break
|
||||
historical_messages = [message] + historical_messages
|
||||
input_message = HumanMessage(content=kwargs["user_input"])
|
||||
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
|
||||
messages += historical_messages
|
||||
messages.append(input_message)
|
||||
return messages
|
||||
@@ -0,0 +1,185 @@
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
FINISH_NAME = "finish"
|
||||
|
||||
|
||||
class PromptGenerator:
|
||||
"""A class for generating custom prompt strings.
|
||||
|
||||
Does this based on constraints, commands, resources, and performance evaluations.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the PromptGenerator object.
|
||||
|
||||
Starts with empty lists of constraints, commands, resources,
|
||||
and performance evaluations.
|
||||
"""
|
||||
self.constraints: List[str] = []
|
||||
self.commands: List[BaseTool] = []
|
||||
self.resources: List[str] = []
|
||||
self.performance_evaluation: List[str] = []
|
||||
self.response_format = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user",
|
||||
},
|
||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||
}
|
||||
|
||||
def add_constraint(self, constraint: str) -> None:
|
||||
"""
|
||||
Add a constraint to the constraints list.
|
||||
|
||||
Args:
|
||||
constraint (str): The constraint to be added.
|
||||
"""
|
||||
self.constraints.append(constraint)
|
||||
|
||||
def add_tool(self, tool: BaseTool) -> None:
|
||||
self.commands.append(tool)
|
||||
|
||||
def _generate_command_string(self, tool: BaseTool) -> str:
|
||||
schema = json.dumps(tool.args.schema()["properties"])
|
||||
return f"{tool.name}: {tool.description}, args json schema: {schema}"
|
||||
|
||||
def add_resource(self, resource: str) -> None:
|
||||
"""
|
||||
Add a resource to the resources list.
|
||||
|
||||
Args:
|
||||
resource (str): The resource to be added.
|
||||
"""
|
||||
self.resources.append(resource)
|
||||
|
||||
def add_performance_evaluation(self, evaluation: str) -> None:
|
||||
"""
|
||||
Add a performance evaluation item to the performance_evaluation list.
|
||||
|
||||
Args:
|
||||
evaluation (str): The evaluation item to be added.
|
||||
"""
|
||||
self.performance_evaluation.append(evaluation)
|
||||
|
||||
def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
|
||||
"""
|
||||
Generate a numbered list from given items based on the item_type.
|
||||
|
||||
Args:
|
||||
items (list): A list of items to be numbered.
|
||||
item_type (str, optional): The type of items in the list.
|
||||
Defaults to 'list'.
|
||||
|
||||
Returns:
|
||||
str: The formatted numbered list.
|
||||
"""
|
||||
if item_type == "command":
|
||||
command_strings = [
|
||||
f"{i + 1}. {self._generate_command_string(item)}"
|
||||
for i, item in enumerate(items)
|
||||
]
|
||||
finish_description = (
|
||||
"use this to signal that you have finished all your objectives"
|
||||
)
|
||||
finish_args = (
|
||||
'"response": "final response to let '
|
||||
'people know you have finished your objectives"'
|
||||
)
|
||||
finish_string = (
|
||||
f"{len(items) + 1}. {FINISH_NAME}: "
|
||||
f"{finish_description}, args: {finish_args}"
|
||||
)
|
||||
return "\n".join(command_strings + [finish_string])
|
||||
else:
|
||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
||||
|
||||
def generate_prompt_string(self) -> str:
|
||||
"""Generate a prompt string.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt string.
|
||||
"""
|
||||
formatted_response_format = json.dumps(self.response_format, indent=4)
|
||||
prompt_string = (
|
||||
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
||||
f"Commands:\n"
|
||||
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
||||
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
||||
f"Performance Evaluation:\n"
|
||||
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
||||
f"You should only respond in JSON format as described below "
|
||||
f"\nResponse Format: \n{formatted_response_format} "
|
||||
f"\nEnsure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
return prompt_string
|
||||
|
||||
|
||||
def get_prompt(tools: List[BaseTool]) -> str:
|
||||
"""This function generates a prompt string.
|
||||
|
||||
It includes various constraints, commands, resources, and performance evaluations.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt string.
|
||||
"""
|
||||
|
||||
# Initialize the PromptGenerator object
|
||||
prompt_generator = PromptGenerator()
|
||||
|
||||
# Add constraints to the PromptGenerator object
|
||||
prompt_generator.add_constraint(
|
||||
"~4000 word limit for short term memory. "
|
||||
"Your short term memory is short, "
|
||||
"so immediately save important information to files."
|
||||
)
|
||||
prompt_generator.add_constraint(
|
||||
"If you are unsure how you previously did something "
|
||||
"or want to recall past events, "
|
||||
"thinking about similar events will help you remember."
|
||||
)
|
||||
prompt_generator.add_constraint("No user assistance")
|
||||
prompt_generator.add_constraint(
|
||||
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
||||
)
|
||||
|
||||
# Add commands to the PromptGenerator object
|
||||
for tool in tools:
|
||||
prompt_generator.add_tool(tool)
|
||||
|
||||
# Add resources to the PromptGenerator object
|
||||
prompt_generator.add_resource(
|
||||
"Internet access for searches and information gathering."
|
||||
)
|
||||
prompt_generator.add_resource("Long Term memory management.")
|
||||
prompt_generator.add_resource(
|
||||
"GPT-3.5 powered Agents for delegation of simple tasks."
|
||||
)
|
||||
prompt_generator.add_resource("File output.")
|
||||
|
||||
# Add performance evaluations to the PromptGenerator object
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Continuously review and analyze your actions "
|
||||
"to ensure you are performing to the best of your abilities."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Constructively self-criticize your big-picture behavior constantly."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Reflect on past decisions and strategies to refine your approach."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Every command has a cost, so be smart and efficient. "
|
||||
"Aim to complete tasks in the least number of steps."
|
||||
)
|
||||
|
||||
# Generate the prompt string
|
||||
prompt_string = prompt_generator.generate_prompt_string()
|
||||
|
||||
return prompt_string
|
||||
@@ -0,0 +1,17 @@
|
||||
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
|
||||
TaskCreationChain,
|
||||
)
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
|
||||
TaskExecutionChain,
|
||||
)
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
|
||||
TaskPrioritizationChain,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"BabyAGI",
|
||||
"TaskPrioritizationChain",
|
||||
"TaskExecutionChain",
|
||||
"TaskCreationChain",
|
||||
]
|
||||
181
langchain/experimental/autonomous_agents/baby_agi/baby_agi.py
Normal file
181
langchain/experimental/autonomous_agents/baby_agi/baby_agi.py
Normal file
@@ -0,0 +1,181 @@
|
||||
from collections import deque
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
|
||||
TaskCreationChain,
|
||||
)
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
|
||||
TaskExecutionChain,
|
||||
)
|
||||
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
|
||||
TaskPrioritizationChain,
|
||||
)
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
||||
|
||||
class BabyAGI(Chain, BaseModel):
|
||||
"""Controller model for the BabyAGI agent."""
|
||||
|
||||
task_list: deque = Field(default_factory=deque)
|
||||
task_creation_chain: Chain = Field(...)
|
||||
task_prioritization_chain: Chain = Field(...)
|
||||
execution_chain: Chain = Field(...)
|
||||
task_id_counter: int = Field(1)
|
||||
vectorstore: VectorStore = Field(init=False)
|
||||
max_iterations: Optional[int] = None
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def add_task(self, task: Dict) -> None:
|
||||
self.task_list.append(task)
|
||||
|
||||
def print_task_list(self) -> None:
|
||||
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
|
||||
for t in self.task_list:
|
||||
print(str(t["task_id"]) + ": " + t["task_name"])
|
||||
|
||||
def print_next_task(self, task: Dict) -> None:
|
||||
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
|
||||
print(str(task["task_id"]) + ": " + task["task_name"])
|
||||
|
||||
def print_task_result(self, result: str) -> None:
|
||||
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
|
||||
print(result)
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
return ["objective"]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return []
|
||||
|
||||
def get_next_task(
|
||||
self, result: str, task_description: str, objective: str
|
||||
) -> List[Dict]:
|
||||
"""Get the next task."""
|
||||
task_names = [t["task_name"] for t in self.task_list]
|
||||
|
||||
incomplete_tasks = ", ".join(task_names)
|
||||
response = self.task_creation_chain.run(
|
||||
result=result,
|
||||
task_description=task_description,
|
||||
incomplete_tasks=incomplete_tasks,
|
||||
objective=objective,
|
||||
)
|
||||
new_tasks = response.split("\n")
|
||||
return [
|
||||
{"task_name": task_name} for task_name in new_tasks if task_name.strip()
|
||||
]
|
||||
|
||||
def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]:
|
||||
"""Prioritize tasks."""
|
||||
task_names = [t["task_name"] for t in list(self.task_list)]
|
||||
next_task_id = int(this_task_id) + 1
|
||||
response = self.task_prioritization_chain.run(
|
||||
task_names=", ".join(task_names),
|
||||
next_task_id=str(next_task_id),
|
||||
objective=objective,
|
||||
)
|
||||
new_tasks = response.split("\n")
|
||||
prioritized_task_list = []
|
||||
for task_string in new_tasks:
|
||||
if not task_string.strip():
|
||||
continue
|
||||
task_parts = task_string.strip().split(".", 1)
|
||||
if len(task_parts) == 2:
|
||||
task_id = task_parts[0].strip()
|
||||
task_name = task_parts[1].strip()
|
||||
prioritized_task_list.append(
|
||||
{"task_id": task_id, "task_name": task_name}
|
||||
)
|
||||
return prioritized_task_list
|
||||
|
||||
def _get_top_tasks(self, query: str, k: int) -> List[str]:
|
||||
"""Get the top k tasks based on the query."""
|
||||
results = self.vectorstore.similarity_search(query, k=k)
|
||||
if not results:
|
||||
return []
|
||||
return [str(item.metadata["task"]) for item in results]
|
||||
|
||||
def execute_task(self, objective: str, task: str, k: int = 5) -> str:
|
||||
"""Execute a task."""
|
||||
context = self._get_top_tasks(query=objective, k=k)
|
||||
return self.execution_chain.run(
|
||||
objective=objective, context="\n".join(context), task=task
|
||||
)
|
||||
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Run the agent."""
|
||||
objective = inputs["objective"]
|
||||
first_task = inputs.get("first_task", "Make a todo list")
|
||||
self.add_task({"task_id": 1, "task_name": first_task})
|
||||
num_iters = 0
|
||||
while True:
|
||||
if self.task_list:
|
||||
self.print_task_list()
|
||||
|
||||
# Step 1: Pull the first task
|
||||
task = self.task_list.popleft()
|
||||
self.print_next_task(task)
|
||||
|
||||
# Step 2: Execute the task
|
||||
result = self.execute_task(objective, task["task_name"])
|
||||
this_task_id = int(task["task_id"])
|
||||
self.print_task_result(result)
|
||||
|
||||
# Step 3: Store the result in Pinecone
|
||||
result_id = f"result_{task['task_id']}"
|
||||
self.vectorstore.add_texts(
|
||||
texts=[result],
|
||||
metadatas=[{"task": task["task_name"]}],
|
||||
ids=[result_id],
|
||||
)
|
||||
|
||||
# Step 4: Create new tasks and reprioritize task list
|
||||
new_tasks = self.get_next_task(result, task["task_name"], objective)
|
||||
for new_task in new_tasks:
|
||||
self.task_id_counter += 1
|
||||
new_task.update({"task_id": self.task_id_counter})
|
||||
self.add_task(new_task)
|
||||
self.task_list = deque(self.prioritize_tasks(this_task_id, objective))
|
||||
num_iters += 1
|
||||
if self.max_iterations is not None and num_iters == self.max_iterations:
|
||||
print(
|
||||
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
|
||||
)
|
||||
break
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
vectorstore: VectorStore,
|
||||
verbose: bool = False,
|
||||
task_execution_chain: Optional[Chain] = None,
|
||||
**kwargs: Dict[str, Any],
|
||||
) -> "BabyAGI":
|
||||
"""Initialize the BabyAGI Controller."""
|
||||
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
|
||||
task_prioritization_chain = TaskPrioritizationChain.from_llm(
|
||||
llm, verbose=verbose
|
||||
)
|
||||
if task_execution_chain is None:
|
||||
execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose)
|
||||
else:
|
||||
execution_chain = task_execution_chain
|
||||
return cls(
|
||||
task_creation_chain=task_creation_chain,
|
||||
task_prioritization_chain=task_prioritization_chain,
|
||||
execution_chain=execution_chain,
|
||||
vectorstore=vectorstore,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -0,0 +1,30 @@
|
||||
from langchain import LLMChain, PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class TaskCreationChain(LLMChain):
|
||||
"""Chain to generates tasks."""
|
||||
|
||||
@classmethod
|
||||
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
|
||||
"""Get the response parser."""
|
||||
task_creation_template = (
|
||||
"You are an task creation AI that uses the result of an execution agent"
|
||||
" to create new tasks with the following objective: {objective},"
|
||||
" The last completed task has the result: {result}."
|
||||
" This result was based on this task description: {task_description}."
|
||||
" These are incomplete tasks: {incomplete_tasks}."
|
||||
" Based on the result, create new tasks to be completed"
|
||||
" by the AI system that do not overlap with incomplete tasks."
|
||||
" Return the tasks as an array."
|
||||
)
|
||||
prompt = PromptTemplate(
|
||||
template=task_creation_template,
|
||||
input_variables=[
|
||||
"result",
|
||||
"task_description",
|
||||
"incomplete_tasks",
|
||||
"objective",
|
||||
],
|
||||
)
|
||||
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
||||
@@ -0,0 +1,21 @@
|
||||
from langchain import LLMChain, PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class TaskExecutionChain(LLMChain):
|
||||
"""Chain to execute tasks."""
|
||||
|
||||
@classmethod
|
||||
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
|
||||
"""Get the response parser."""
|
||||
execution_template = (
|
||||
"You are an AI who performs one task based on the following objective: "
|
||||
"{objective}."
|
||||
"Take into account these previously completed tasks: {context}."
|
||||
" Your task: {task}. Response:"
|
||||
)
|
||||
prompt = PromptTemplate(
|
||||
template=execution_template,
|
||||
input_variables=["objective", "context", "task"],
|
||||
)
|
||||
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
||||
@@ -0,0 +1,24 @@
|
||||
from langchain import LLMChain, PromptTemplate
|
||||
from langchain.schema import BaseLanguageModel
|
||||
|
||||
|
||||
class TaskPrioritizationChain(LLMChain):
|
||||
"""Chain to prioritize tasks."""
|
||||
|
||||
@classmethod
|
||||
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
|
||||
"""Get the response parser."""
|
||||
task_prioritization_template = (
|
||||
"You are a task prioritization AI tasked with cleaning the formatting of "
|
||||
"and reprioritizing the following tasks: {task_names}."
|
||||
" Consider the ultimate objective of your team: {objective}."
|
||||
" Do not remove any tasks. Return the result as a numbered list, like:"
|
||||
" #. First task"
|
||||
" #. Second task"
|
||||
" Start the task list with number {next_task_id}."
|
||||
)
|
||||
prompt = PromptTemplate(
|
||||
template=task_prioritization_template,
|
||||
input_variables=["task_names", "next_task_id", "objective"],
|
||||
)
|
||||
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
||||
Reference in New Issue
Block a user