mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-07 14:03:26 +00:00
beef up agent docs (#10866)
This commit is contained in:
@@ -1,130 +0,0 @@
|
||||
The `chat-conversational-react-description` agent type lets us create a conversational agent using a chat model instead of an LLM.
|
||||
|
||||
```python
|
||||
from langchain.memory import ConversationBufferMemory
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
||||
llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, temperature=0)
|
||||
agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="hi, i am bob")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Hello Bob! How can I assist you today?"
|
||||
}
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'Hello Bob! How can I assist you today?'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="what's my name?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Your name is Bob."
|
||||
}
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'Your name is Bob.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run("what are some good dinners to make this week, if i like thai food?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
{
|
||||
"action": "Current Search",
|
||||
"action_input": "Thai food dinner recipes"
|
||||
}
|
||||
Observation: 64 easy Thai recipes for any night of the week · Thai curry noodle soup · Thai yellow cauliflower, snake bean and tofu curry · Thai-spiced chicken hand pies · Thai ...
|
||||
Thought:{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Here are some Thai food dinner recipes you can try this week: Thai curry noodle soup, Thai yellow cauliflower, snake bean and tofu curry, Thai-spiced chicken hand pies, and many more. You can find the full list of recipes at the source I found earlier."
|
||||
}
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'Here are some Thai food dinner recipes you can try this week: Thai curry noodle soup, Thai yellow cauliflower, snake bean and tofu curry, Thai-spiced chicken hand pies, and many more. You can find the full list of recipes at the source I found earlier.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="tell me the last letter in my name, and also tell me who won the world cup in 1978?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "The last letter in your name is 'b'. Argentina won the World Cup in 1978."
|
||||
}
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
"The last letter in your name is 'b'. Argentina won the World Cup in 1978."
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="whats the weather like in pomfret?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
{
|
||||
"action": "Current Search",
|
||||
"action_input": "weather in pomfret"
|
||||
}
|
||||
Observation: Cloudy with showers. Low around 55F. Winds S at 5 to 10 mph. Chance of rain 60%. Humidity76%.
|
||||
Thought:{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Cloudy with showers. Low around 55F. Winds S at 5 to 10 mph. Chance of rain 60%. Humidity76%."
|
||||
}
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'Cloudy with showers. Low around 55F. Winds S at 5 to 10 mph. Chance of rain 60%. Humidity76%.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
@@ -1,150 +0,0 @@
|
||||
This is accomplished with a specific type of agent (`conversational-react-description`) which expects to be used with a memory component.
|
||||
|
||||
```python
|
||||
from langchain.agents import Tool
|
||||
from langchain.agents import AgentType
|
||||
from langchain.memory import ConversationBufferMemory
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.utilities import SerpAPIWrapper
|
||||
from langchain.agents import initialize_agent
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
search = SerpAPIWrapper()
|
||||
tools = [
|
||||
Tool(
|
||||
name = "Current Search",
|
||||
func=search.run,
|
||||
description="useful for when you need to answer questions about current events or the current state of the world"
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
memory = ConversationBufferMemory(memory_key="chat_history")
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
llm=OpenAI(temperature=0)
|
||||
agent_chain = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="hi, i am bob")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
|
||||
Thought: Do I need to use a tool? No
|
||||
AI: Hi Bob, nice to meet you! How can I help you today?
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'Hi Bob, nice to meet you! How can I help you today?'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="what's my name?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
|
||||
Thought: Do I need to use a tool? No
|
||||
AI: Your name is Bob!
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'Your name is Bob!'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run("what are some good dinners to make this week, if i like thai food?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
|
||||
Thought: Do I need to use a tool? Yes
|
||||
Action: Current Search
|
||||
Action Input: Thai food dinner recipes
|
||||
Observation: 59 easy Thai recipes for any night of the week · Marion Grasby's Thai spicy chilli and basil fried rice · Thai curry noodle soup · Marion Grasby's Thai Spicy ...
|
||||
Thought: Do I need to use a tool? No
|
||||
AI: Here are some great Thai dinner recipes you can try this week: Marion Grasby's Thai Spicy Chilli and Basil Fried Rice, Thai Curry Noodle Soup, Thai Green Curry with Coconut Rice, Thai Red Curry with Vegetables, and Thai Coconut Soup. I hope you enjoy them!
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
"Here are some great Thai dinner recipes you can try this week: Marion Grasby's Thai Spicy Chilli and Basil Fried Rice, Thai Curry Noodle Soup, Thai Green Curry with Coconut Rice, Thai Red Curry with Vegetables, and Thai Coconut Soup. I hope you enjoy them!"
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="tell me the last letter in my name, and also tell me who won the world cup in 1978?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
|
||||
Thought: Do I need to use a tool? Yes
|
||||
Action: Current Search
|
||||
Action Input: Who won the World Cup in 1978
|
||||
Observation: Argentina national football team
|
||||
Thought: Do I need to use a tool? No
|
||||
AI: The last letter in your name is "b" and the winner of the 1978 World Cup was the Argentina national football team.
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'The last letter in your name is "b" and the winner of the 1978 World Cup was the Argentina national football team.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
agent_chain.run(input="whats the current temperature in pomfret?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
|
||||
Thought: Do I need to use a tool? Yes
|
||||
Action: Current Search
|
||||
Action Input: Current temperature in Pomfret
|
||||
Observation: Partly cloudy skies. High around 70F. Winds W at 5 to 10 mph. Humidity41%.
|
||||
Thought: Do I need to use a tool? No
|
||||
AI: The current temperature in Pomfret is around 70F with partly cloudy skies and winds W at 5 to 10 mph. The humidity is 41%.
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
'The current temperature in Pomfret is around 70F with partly cloudy skies and winds W at 5 to 10 mph. The humidity is 41%.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
@@ -1,80 +0,0 @@
|
||||
Install `openai`, `google-search-results` packages which are required as the LangChain packages call them internally.
|
||||
|
||||
```bash
|
||||
pip install openai google-search-results
|
||||
```
|
||||
|
||||
```python
|
||||
from langchain.agents import initialize_agent, AgentType, Tool
|
||||
from langchain.chains import LLMMathChain
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.utilities import SerpAPIWrapper, SQLDatabase
|
||||
from langchain_experimental.sql import SQLDatabaseChain
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
||||
search = SerpAPIWrapper()
|
||||
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
|
||||
db = SQLDatabase.from_uri("sqlite:///../../../../../notebooks/Chinook.db")
|
||||
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)
|
||||
tools = [
|
||||
Tool(
|
||||
name = "Search",
|
||||
func=search.run,
|
||||
description="useful for when you need to answer questions about current events. You should ask targeted questions"
|
||||
),
|
||||
Tool(
|
||||
name="Calculator",
|
||||
func=llm_math_chain.run,
|
||||
description="useful for when you need to answer questions about math"
|
||||
),
|
||||
Tool(
|
||||
name="FooBar-DB",
|
||||
func=db_chain.run,
|
||||
description="useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context"
|
||||
)
|
||||
]
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
agent.run("Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new chain...
|
||||
|
||||
Invoking: `Search` with `{'query': 'Leo DiCaprio girlfriend'}`
|
||||
|
||||
|
||||
Amidst his casual romance with Gigi, Leo allegedly entered a relationship with 19-year old model, Eden Polani, in February 2023.
|
||||
Invoking: `Calculator` with `{'expression': '19^0.43'}`
|
||||
|
||||
|
||||
> Entering new chain...
|
||||
19^0.43```text
|
||||
19**0.43
|
||||
```
|
||||
...numexpr.evaluate("19**0.43")...
|
||||
|
||||
Answer: 3.547023357958959
|
||||
> Finished chain.
|
||||
Answer: 3.547023357958959Leo DiCaprio's girlfriend is reportedly Eden Polani. Her current age raised to the power of 0.43 is approximately 3.55.
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
"Leo DiCaprio's girlfriend is reportedly Eden Polani. Her current age raised to the power of 0.43 is approximately 3.55."
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
@@ -1,228 +0,0 @@
|
||||
## Imports
|
||||
|
||||
|
||||
```python
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.utilities import SerpAPIWrapper
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.chains import LLMMathChain
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
|
||||
```python
|
||||
search = SerpAPIWrapper()
|
||||
llm = OpenAI(temperature=0)
|
||||
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
|
||||
tools = [
|
||||
Tool(
|
||||
name = "Search",
|
||||
func=search.run,
|
||||
description="useful for when you need to answer questions about current events"
|
||||
),
|
||||
Tool(
|
||||
name="Calculator",
|
||||
func=llm_math_chain.run,
|
||||
description="useful for when you need to answer questions about math"
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
## Planner, Executor, and Agent
|
||||
|
||||
|
||||
```python
|
||||
model = ChatOpenAI(temperature=0)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
planner = load_chat_planner(model)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
executor = load_agent_executor(model, tools, verbose=True)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
|
||||
```
|
||||
|
||||
## Run example
|
||||
|
||||
|
||||
```python
|
||||
agent.run("Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
|
||||
|
||||
> Entering new PlanAndExecute chain...
|
||||
steps=[Step(value="Search for Leo DiCaprio's girlfriend on the internet."), Step(value='Find her current age.'), Step(value='Raise her current age to the 0.43 power using a calculator or programming language.'), Step(value='Output the result.'), Step(value="Given the above steps taken, respond to the user's original question.\n\n")]
|
||||
|
||||
> Entering new AgentExecutor chain...
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Search",
|
||||
"action_input": "Who is Leo DiCaprio's girlfriend?"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Observation: DiCaprio broke up with girlfriend Camila Morrone, 25, in the summer of 2022, after dating for four years. He's since been linked to another famous supermodel – Gigi Hadid. The power couple were first supposedly an item in September after being spotted getting cozy during a party at New York Fashion Week.
|
||||
Thought:Based on the previous observation, I can provide the answer to the current objective.
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Leo DiCaprio is currently linked to Gigi Hadid."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
> Finished chain.
|
||||
*****
|
||||
|
||||
Step: Search for Leo DiCaprio's girlfriend on the internet.
|
||||
|
||||
Response: Leo DiCaprio is currently linked to Gigi Hadid.
|
||||
|
||||
> Entering new AgentExecutor chain...
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Search",
|
||||
"action_input": "What is Gigi Hadid's current age?"
|
||||
}
|
||||
```
|
||||
|
||||
Observation: 28 years
|
||||
Thought:Previous steps: steps=[(Step(value="Search for Leo DiCaprio's girlfriend on the internet."), StepResponse(response='Leo DiCaprio is currently linked to Gigi Hadid.'))]
|
||||
|
||||
Current objective: value='Find her current age.'
|
||||
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Search",
|
||||
"action_input": "What is Gigi Hadid's current age?"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Observation: 28 years
|
||||
Thought:Previous steps: steps=[(Step(value="Search for Leo DiCaprio's girlfriend on the internet."), StepResponse(response='Leo DiCaprio is currently linked to Gigi Hadid.')), (Step(value='Find her current age.'), StepResponse(response='28 years'))]
|
||||
|
||||
Current objective: None
|
||||
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Gigi Hadid's current age is 28 years."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
> Finished chain.
|
||||
*****
|
||||
|
||||
Step: Find her current age.
|
||||
|
||||
Response: Gigi Hadid's current age is 28 years.
|
||||
|
||||
> Entering new AgentExecutor chain...
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Calculator",
|
||||
"action_input": "28 ** 0.43"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
> Entering new LLMMathChain chain...
|
||||
28 ** 0.43
|
||||
```text
|
||||
28 ** 0.43
|
||||
```
|
||||
...numexpr.evaluate("28 ** 0.43")...
|
||||
|
||||
Answer: 4.1906168361987195
|
||||
> Finished chain.
|
||||
|
||||
Observation: Answer: 4.1906168361987195
|
||||
Thought:The next step is to provide the answer to the user's question.
|
||||
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Gigi Hadid's current age raised to the 0.43 power is approximately 4.19."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
> Finished chain.
|
||||
*****
|
||||
|
||||
Step: Raise her current age to the 0.43 power using a calculator or programming language.
|
||||
|
||||
Response: Gigi Hadid's current age raised to the 0.43 power is approximately 4.19.
|
||||
|
||||
> Entering new AgentExecutor chain...
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "The result is approximately 4.19."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
> Finished chain.
|
||||
*****
|
||||
|
||||
Step: Output the result.
|
||||
|
||||
Response: The result is approximately 4.19.
|
||||
|
||||
> Entering new AgentExecutor chain...
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"action": "Final Answer",
|
||||
"action_input": "Gigi Hadid's current age raised to the 0.43 power is approximately 4.19."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
> Finished chain.
|
||||
*****
|
||||
|
||||
Step: Given the above steps taken, respond to the user's original question.
|
||||
|
||||
|
||||
|
||||
Response: Gigi Hadid's current age raised to the 0.43 power is approximately 4.19.
|
||||
> Finished chain.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"Gigi Hadid's current age raised to the 0.43 power is approximately 4.19."
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
@@ -1,62 +0,0 @@
|
||||
```python
|
||||
from langchain.agents import load_tools
|
||||
from langchain.agents import initialize_agent
|
||||
from langchain.agents import AgentType
|
||||
from langchain.llms import OpenAI
|
||||
```
|
||||
|
||||
First, let's load the language model we're going to use to control the agent.
|
||||
|
||||
|
||||
```python
|
||||
llm = OpenAI(temperature=0)
|
||||
```
|
||||
|
||||
Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.
|
||||
|
||||
|
||||
```python
|
||||
tools = load_tools(["serpapi", "llm-math"], llm=llm)
|
||||
```
|
||||
|
||||
Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
|
||||
|
||||
|
||||
```python
|
||||
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
|
||||
```
|
||||
|
||||
Now let's test it out!
|
||||
|
||||
|
||||
```python
|
||||
agent.run("Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
> Entering new AgentExecutor chain...
|
||||
I need to find out who Leo DiCaprio's girlfriend is and then calculate her age raised to the 0.43 power.
|
||||
Action: Search
|
||||
Action Input: "Leo DiCaprio girlfriend"
|
||||
Observation: Camila Morrone
|
||||
Thought: I need to find out Camila Morrone's age
|
||||
Action: Search
|
||||
Action Input: "Camila Morrone age"
|
||||
Observation: 25 years
|
||||
Thought: I need to calculate 25 raised to the 0.43 power
|
||||
Action: Calculator
|
||||
Action Input: 25^0.43
|
||||
Observation: Answer: 3.991298452658078
|
||||
|
||||
Thought: I now know the final answer
|
||||
Final Answer: Camila Morrone is Leo DiCaprio's girlfriend and her current age raised to the 0.43 power is 3.991298452658078.
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
"Camila Morrone is Leo DiCaprio's girlfriend and her current age raised to the 0.43 power is 3.991298452658078."
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
@@ -1,7 +0,0 @@
|
||||
```python
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
chat_model = ChatOpenAI(temperature=0)
|
||||
agent = initialize_agent(tools, chat_model, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
|
||||
agent.run("Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?")
|
||||
```
|
@@ -1,13 +1,15 @@
|
||||
This will go over how to get started building an agent.
|
||||
We will use a LangChain agent class, but show how to customize it to give it specific context.
|
||||
We will then define custom tools, and then run it all in the standard LangChain `AgentExecutor`.
|
||||
We will create this agent from scratch, using LangChain Expression Language.
|
||||
We will then define custom tools, and then run it in a custom loop (we will also show how to use the standard LangChain `AgentExecutor`).
|
||||
|
||||
### Set up the agent
|
||||
|
||||
We will use the `OpenAIFunctionsAgent`.
|
||||
This is easiest and best agent to get started with.
|
||||
It does however require usage of `ChatOpenAI` models.
|
||||
If you want to use a different language model, we would recommend using the [ReAct](/docs/modules/agents/agent_types/react) agent.
|
||||
We first need to create our agent.
|
||||
This is the chain responsible for determining what action to take next.
|
||||
|
||||
In this example, we will use OpenAI Function Calling to create this agent.
|
||||
This is generally the most reliable way create agents.
|
||||
In this example we will show what it is like to construct this agent from scratch, using LangChain Expression Language.
|
||||
|
||||
For this guide, we will construct a custom agent that has access to a custom tool.
|
||||
We are choosing this example because we think for most use cases you will NEED to customize either the agent or the tools.
|
||||
@@ -39,23 +41,94 @@ tools = [get_word_length]
|
||||
```
|
||||
|
||||
Now let us create the prompt.
|
||||
We can use the `OpenAIFunctionsAgent.create_prompt` helper function to create a prompt automatically.
|
||||
This allows for a few different ways to customize, including passing in a custom `SystemMessage`, which we will do.
|
||||
Because OpenAI Function Calling is finetuned for tool usage, we hardly need any instructions on how to reason, or how to output format.
|
||||
We will just have two input variables: `input` (for the user question) and `agent_scratchpad` (for any previous steps taken)
|
||||
|
||||
```python
|
||||
from langchain.schema import SystemMessage
|
||||
from langchain.agents import OpenAIFunctionsAgent
|
||||
system_message = SystemMessage(content="You are very powerful assistant, but bad at calculating lengths of words.")
|
||||
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
|
||||
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "You are very powerful assistant, but bad at calculating lengths of words."),
|
||||
("user", "{input}"),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
])
|
||||
```
|
||||
|
||||
How does the agent know what tools it can use?
|
||||
Those are passed in as a separate argument, so we can bind those as key word arguments to the LLM.
|
||||
|
||||
```python
|
||||
from langchain.tools.render import format_tool_to_openai_function
|
||||
llm_with_tools = llm.bind(
|
||||
functions=[format_tool_to_openai_function(t) for t in tools]
|
||||
)
|
||||
```
|
||||
|
||||
Putting those pieces together, we can now create the agent.
|
||||
We will import two last utility functions: a component for formatting intermediate steps to messages, and a component for converting the output message into an agent action/agent finish.
|
||||
|
||||
|
||||
```python
|
||||
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
|
||||
from langchain.agents.format_scratchpad import format_to_openai_functions
|
||||
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
|
||||
agent = {
|
||||
"input": lambda x: x["input"],
|
||||
"agent_scratchpad": lambda x: format_to_openai_functions(x['intermediate_steps'])
|
||||
} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()
|
||||
```
|
||||
|
||||
Finally, we create the `AgentExecutor` - the runtime for our agent.
|
||||
Now that we have our agent, let's play around with it!
|
||||
Let's pass in a simple question and empty intermediate steps and see what it returns:
|
||||
|
||||
```python
|
||||
agent.invoke({
|
||||
"input": "how many letters in the word educa?",
|
||||
"intermediate_steps": []
|
||||
})
|
||||
```
|
||||
|
||||
We can see that it responds with an `AgentAction` to take (it's actually an `AgentActionMessageLog` - a subclass of `AgentAction` which also tracks the full message log).
|
||||
|
||||
So this is just the first step - now we need to write a runtime for this.
|
||||
The simplest one is just one that continuously loops, calling the agent, then taking the action, and repeating until an `AgentFinish` is returned.
|
||||
Let's code that up below:
|
||||
|
||||
```python
|
||||
from langchain.schema.agent import AgentFinish
|
||||
intermediate_steps = []
|
||||
while True:
|
||||
output = agent.invoke({
|
||||
"input": "how many letters in the word educa?",
|
||||
"intermediate_steps": intermediate_steps
|
||||
})
|
||||
if isinstance(output, AgentFinish):
|
||||
final_result = output.return_values["output"]
|
||||
break
|
||||
else:
|
||||
print(output.tool, output.tool_input)
|
||||
tool = {
|
||||
"get_word_length": get_word_length
|
||||
}[output.tool]
|
||||
observation = tool.run(output.tool_input)
|
||||
intermediate_steps.append((output, observation))
|
||||
print(final_result)
|
||||
```
|
||||
|
||||
We can see this prints out the following:
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
get_word_length {'word': 'educa'}
|
||||
There are 5 letters in the word "educa".
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
Woo! It's working.
|
||||
|
||||
To simplify this a bit, we can import and use the `AgentExecutor` class.
|
||||
This bundles up all of the above and adds in error handling, early stopping, tracing, and other quality-of-life improvements that reduce safeguards you need to write.
|
||||
|
||||
|
||||
```python
|
||||
from langchain.agents import AgentExecutor
|
||||
@@ -66,7 +139,7 @@ Now let's test it out!
|
||||
|
||||
|
||||
```python
|
||||
agent_executor.run("how many letters in the word educa?")
|
||||
agent_executor.invoke({"input": "how many letters in the word educa?"})
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
@@ -101,15 +174,18 @@ In order to do this, we need to do two things:
|
||||
|
||||
First, let's add a place for memory in the prompt.
|
||||
We do this by adding a placeholder for messages with the key `"chat_history"`.
|
||||
Notice that we put this ABOVE the new user input (to follow the conversation flow).
|
||||
|
||||
```python
|
||||
from langchain.prompts import MessagesPlaceholder
|
||||
|
||||
MEMORY_KEY = "chat_history"
|
||||
prompt = OpenAIFunctionsAgent.create_prompt(
|
||||
system_message=system_message,
|
||||
extra_prompt_messages=[MessagesPlaceholder(variable_name=MEMORY_KEY)]
|
||||
)
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "You are very powerful assistant, but bad at calculating lengths of words."),
|
||||
MessagesPlaceholder(variable_name=MEMORY_KEY),
|
||||
("user", "{input}"),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
])
|
||||
```
|
||||
|
||||
Next, let's create a memory object.
|
||||
@@ -125,7 +201,11 @@ memory = ConversationBufferMemory(memory_key=MEMORY_KEY, return_messages=True)
|
||||
We can then put it all together!
|
||||
|
||||
```python
|
||||
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
|
||||
agent = {
|
||||
"input": lambda x: x["input"],
|
||||
"agent_scratchpad": lambda x: format_to_openai_functions(x['intermediate_steps']),
|
||||
"chat_history": lambda x: x["chat_history"]
|
||||
} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True)
|
||||
agent_executor.run("how many letters in the word educa?")
|
||||
agent_executor.run("is that a real word?")
|
||||
|
Reference in New Issue
Block a user