This commit is contained in:
Harrison Chase
2023-04-14 15:31:26 -07:00
parent 30573b2e30
commit 9860c09fa2
8 changed files with 457 additions and 0 deletions

View File

@@ -0,0 +1,142 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "7c2c9b54",
"metadata": {},
"outputs": [],
"source": [
"from langchain.auto_agents.autogpt.agent import Agent\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.utilities import SerpAPIWrapper\n",
"from langchain.agents import Tool\n",
"\n",
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
" ),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "72bc204d",
"metadata": {},
"outputs": [],
"source": [
"from langchain.vectorstores import FAISS\n",
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.embeddings import OpenAIEmbeddings"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "1df7b724",
"metadata": {},
"outputs": [],
"source": [
"# Define your embedding model\n",
"embeddings_model = OpenAIEmbeddings()\n",
"# Initialize the vectorstore as empty\n",
"import faiss\n",
"\n",
"embedding_size = 1536\n",
"index = faiss.IndexFlatL2(embedding_size)\n",
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "709c08c2",
"metadata": {},
"outputs": [],
"source": [
"agent = Agent.from_llm_and_tools(\n",
" ai_name=\"Tom\",\n",
" ai_role=\"Assistant\",\n",
" tools=tools,\n",
" llm=ChatOpenAI(temperature=0),\n",
" memory=vectorstore.as_retriever()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "c032b182",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
" \"text\": \"I should start by reviewing my current businesses and their performance. This will help me identify areas that need improvement and opportunities for growth.\",\n",
" \"reasoning\": \"Before I can make any decisions about how to increase my net worth or grow my Twitter account, I need to have a clear understanding of my current situation. By reviewing my businesses, I can identify areas that need improvement and opportunities for growth.\",\n",
" \"plan\": \"- Review each of my businesses and their financial performance\\n- Identify areas that need improvement and opportunities for growth\\n- Develop a plan to address these areas and capitalize on opportunities\",\n",
" \"criticism\": \"I need to make sure that I am not spending too much time on this review and that I am focusing on actionable insights. I also need to make sure that I am not getting bogged down in details that are not relevant to my goals.\",\n",
" \"speak\": \"I am going to review my current businesses and their performance to identify areas that need improvement and opportunities for growth.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"file output\",\n",
" \"input\": \"Review of current businesses and their performance\"\n",
" }\n",
"}\n"
]
},
{
"ename": "KeyError",
"evalue": "'file output'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[5], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/workplace/langchain/langchain/auto_agents/autogpt/agent.py:75\u001b[0m, in \u001b[0;36mAgent.run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[38;5;66;03m# Get command name and arguments\u001b[39;00m\n\u001b[1;32m 74\u001b[0m action \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_parser\u001b[38;5;241m.\u001b[39mparse(assistant_reply)\n\u001b[0;32m---> 75\u001b[0m tool \u001b[38;5;241m=\u001b[39m \u001b[43m{\u001b[49m\u001b[43mt\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mt\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mt\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtools\u001b[49m\u001b[43m}\u001b[49m\u001b[43m[\u001b[49m\u001b[43maction\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtool\u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 76\u001b[0m \u001b[38;5;66;03m# Execute command\u001b[39;00m\n\u001b[1;32m 77\u001b[0m observation \u001b[38;5;241m=\u001b[39m tool\u001b[38;5;241m.\u001b[39mrun(action\u001b[38;5;241m.\u001b[39mtool_input)\n",
"\u001b[0;31mKeyError\u001b[0m: 'file output'"
]
}
],
"source": [
"agent.run()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "32710d40",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

View File

@@ -0,0 +1,86 @@
from langchain.auto_agents.autogpt.prompt import AutoGPTPrompt
from langchain.chat_models.base import BaseChatModel
from langchain.tools.base import BaseTool
from langchain.chains.llm import LLMChain
from langchain.agents.agent import AgentOutputParser
from typing import List, Optional
from langchain.schema import SystemMessage, Document
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.auto_agents.autogpt.output_parser import AutoGPTOutputParser
class Agent:
"""Agent class for interacting with Auto-GPT.
Attributes:
ai_name: The name of the agent.
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
prompt: The prompt to use.
user_input: The user input.
"""
def __init__(self,
ai_name,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: AgentOutputParser,
tools: List[BaseTool],
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history = []
self.next_action_count = 0
self.user_input = "Determine which next command to use, and respond using the format specified above:"
self.chain = chain
self.output_parser = output_parser
self.tools = tools
@classmethod
def from_llm_and_tools(cls, ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool], llm: BaseChatModel, output_parser: Optional[AgentOutputParser] = None):
prompt = AutoGPTPrompt(ai_name=ai_name, ai_role=ai_role, tools=tools, input_variables=["memory", "messages", "user_input"])
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools
)
def run(self):
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
user_input=self.user_input,
messages=self.full_message_history,
memory=self.memory
)
# Print Assistant thoughts
print(assistant_reply)
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tool = {t.name: t for t in self.tools}[action.tool]
# Execute command
observation = tool.run(action.tool_input)
result = f"Command {tool.name} returned: {observation}"
memory_to_add = (f"Assistant Reply: {assistant_reply} "
f"\nResult: {result} "
f"\nHuman Feedback: {self.user_input} "
)
self.memory.add_documents([Document(page_content=memory_to_add)])
self.full_message_history.append(SystemMessage(content=result))

View File

@@ -0,0 +1,29 @@
from typing import Dict, Any, List
from pydantic import Field
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever
class AutoGPTMemory(BaseChatMemory):
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
@property
def memory_variables(self) -> List[str]:
return ["chat_history", "relevant_context"]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
return {
"chat_history": self.chat_memory.messages[-10:],
"relevant_context": docs,
}

View File

@@ -0,0 +1,11 @@
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish
import json
class AutoGPTOutputParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
parsed = json.loads(text)
return AgentAction(tool=parsed["command"]["name"], tool_input=parsed["command"]["input"], log=text)

View File

@@ -0,0 +1,54 @@
from typing import List, Any
from pydantic import BaseModel, Field
from langchain.prompts.chat import BaseChatPromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ChatMessagePromptTemplate
from langchain.auto_agents.autogpt.prompt_generator import get_prompt
from langchain.schema import BaseMessage, SystemMessage, HumanMessage, ChatMessage
from langchain.tools.base import BaseTool
from langchain.vectorstores.base import VectorStoreRetriever
import time
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
ai_name: str
ai_role: str
tools: List[BaseTool]
ai_goals: List[str] = Field(default=["Increase net worth", "Grow Twitter Account",
"Develop and manage multiple businesses autonomously"])
def construct_full_prompt(self) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
Parameters:
None
Returns:
full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals.
"""
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
# Construct full prompt
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{get_prompt(self.tools)}"
return full_prompt
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
messages = []
memory: VectorStoreRetriever = kwargs["memory"]
previous_messages = kwargs["messages"]
relevant_memory = memory.get_relevant_documents(str(previous_messages[-2:]))
messages.append(SystemMessage(content=self.construct_full_prompt()))
messages.append(SystemMessage(content=f"The current time and date is {time.strftime('%c')}"))
messages.append(SystemMessage(content=f"This reminds you of these events from your past:\n{relevant_memory}\n\n"))
messages.extend(previous_messages[-2:])
messages.append(HumanMessage(content=kwargs["user_input"]))
return messages

View File

@@ -0,0 +1,135 @@
import json
from langchain.tools.base import BaseTool
class PromptGenerator:
"""
A class for generating custom prompt strings based on constraints, commands, resources, and performance evaluations.
"""
def __init__(self):
"""
Initialize the PromptGenerator object with empty lists of constraints, commands, resources, and performance evaluations.
"""
self.constraints = []
self.commands = []
self.resources = []
self.performance_evaluation = []
self.response_format = {
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
},
"command": {
"name": "tool name",
"input": "input to the tool"
}
}
def add_constraint(self, constraint):
"""
Add a constraint to the constraints list.
Args:
constraint (str): The constraint to be added.
"""
self.constraints.append(constraint)
def add_tool(self, tool: BaseTool):
self.commands.append(tool)
def _generate_command_string(self, tool):
return f'{tool.name}: {tool.description}'
def add_resource(self, resource):
"""
Add a resource to the resources list.
Args:
resource (str): The resource to be added.
"""
self.resources.append(resource)
def add_performance_evaluation(self, evaluation):
"""
Add a performance evaluation item to the performance_evaluation list.
Args:
evaluation (str): The evaluation item to be added.
"""
self.performance_evaluation.append(evaluation)
def _generate_numbered_list(self, items, item_type='list'):
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
item_type (str, optional): The type of items in the list. Defaults to 'list'.
Returns:
str: The formatted numbered list.
"""
if item_type == 'command':
return "\n".join(f"{i+1}. {self._generate_command_string(item)}" for i, item in enumerate(items))
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
def generate_prompt_string(self):
"""
Generate a prompt string based on the constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
formatted_response_format = json.dumps(self.response_format, indent=4)
prompt_string = (
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
f"Commands:\n{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
f"Performance Evaluation:\n{self._generate_numbered_list(self.performance_evaluation)}\n\n"
f"You should only respond in JSON format as described below \nResponse Format: \n{formatted_response_format} \nEnsure the response can be parsed by Python json.loads"
)
return prompt_string
def get_prompt(tools):
"""
This function generates a prompt string that includes various constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
# Initialize the PromptGenerator object
prompt_generator = PromptGenerator()
# Add constraints to the PromptGenerator object
prompt_generator.add_constraint("~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.")
prompt_generator.add_constraint("If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.")
prompt_generator.add_constraint("No user assistance")
prompt_generator.add_constraint('Exclusively use the commands listed in double quotes e.g. "command name"')
# Add commands to the PromptGenerator object
for tool in tools:
prompt_generator.add_tool(tool)
# Add resources to the PromptGenerator object
prompt_generator.add_resource("Internet access for searches and information gathering.")
prompt_generator.add_resource("Long Term memory management.")
prompt_generator.add_resource("GPT-3.5 powered Agents for delegation of simple tasks.")
prompt_generator.add_resource("File output.")
# Add performance evaluations to the PromptGenerator object
prompt_generator.add_performance_evaluation("Continuously review and analyze your actions to ensure you are performing to the best of your abilities.")
prompt_generator.add_performance_evaluation("Constructively self-criticize your big-picture behavior constantly.")
prompt_generator.add_performance_evaluation("Reflect on past decisions and strategies to refine your approach.")
prompt_generator.add_performance_evaluation("Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.")
# Generate the prompt string
prompt_string = prompt_generator.generate_prompt_string()
return prompt_string