mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-09 23:12:38 +00:00
update agent docs (#10894)
This commit is contained in:
@@ -74,7 +74,8 @@
|
||||
"source": [
|
||||
"from langchain.tools.render import render_text_description\n",
|
||||
"from langchain.agents.output_parsers import ReActSingleInputOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str"
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str\n",
|
||||
"from langchain import hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -84,7 +85,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"prompt = hub.pull(\"hwchase17/react-chat\")"
|
||||
]
|
||||
},
|
||||
@@ -125,6 +125,16 @@
|
||||
"} | prompt | llm_with_stop | ReActSingleInputOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0b354cfe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
@@ -132,7 +142,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)"
|
||||
]
|
||||
@@ -276,6 +285,17 @@
|
||||
"We can also use a chat model here. The main difference here is in the prompts used."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a5a705b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain import hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
@@ -283,19 +303,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"prompt = hub.pull(\"hwchase17/react-chat-json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "e93c0832",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"hwchase17/react-chat-json\")\n",
|
||||
"chat_model = ChatOpenAI(temperature=0, model='gpt-4')"
|
||||
]
|
||||
},
|
||||
@@ -322,6 +330,17 @@
|
||||
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f50a5ea8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
@@ -329,9 +348,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_messages\n",
|
||||
"\n",
|
||||
"# We need some extra steering, or the chat model forgets how to respond sometimes\n",
|
||||
"TEMPLATE_TOOL_RESPONSE = \"\"\"TOOL RESPONSE: \n",
|
||||
"---------------------\n",
|
||||
@@ -349,6 +365,16 @@
|
||||
"} | prompt | chat_model_with_stop | JSONAgentOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6cc033fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
@@ -356,7 +382,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)"
|
||||
]
|
||||
@@ -491,13 +516,21 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "734d1b21",
|
||||
"id": "141f2469",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "734d1b21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
|
||||
"llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, temperature=0)\n",
|
||||
"agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"
|
||||
|
@@ -90,6 +90,16 @@
|
||||
"We will first use LangChain Expression Language to create this agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eac103f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
@@ -97,7 +107,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"You are a helpful assistant\"),\n",
|
||||
" (\"user\", \"{input}\"),\n",
|
||||
@@ -105,6 +114,16 @@
|
||||
"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "50f40df4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.render import format_tool_to_openai_function"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
@@ -112,12 +131,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.render import format_tool_to_openai_function\n",
|
||||
"llm_with_tools = llm.bind(\n",
|
||||
" functions=[format_tool_to_openai_function(t) for t in tools]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3cafa0a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
@@ -125,14 +154,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
|
||||
"agent = {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_functions(x['intermediate_steps'])\n",
|
||||
"} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5125573e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
@@ -140,7 +177,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
|
@@ -78,7 +78,8 @@
|
||||
"source": [
|
||||
"from langchain.tools.render import render_text_description\n",
|
||||
"from langchain.agents.output_parsers import ReActSingleInputOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str"
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str\n",
|
||||
"from langchain import hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -88,7 +89,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"prompt = hub.pull(\"hwchase17/react\")\n",
|
||||
"prompt = prompt.partial(\n",
|
||||
" tools=render_text_description(tools),\n",
|
||||
@@ -119,6 +119,16 @@
|
||||
"} | prompt | llm_with_stop | ReActSingleInputOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0a57769",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
@@ -126,7 +136,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
@@ -250,6 +259,16 @@
|
||||
"The main difference here is a different prompt. We will use JSON to encode the agent's actions (chat models are a bit tougher to steet, so using JSON helps to enforce the output format)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6eeb1693",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
@@ -257,8 +276,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat_model = ChatOpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
@@ -286,6 +303,16 @@
|
||||
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "deaeb1f6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
@@ -293,7 +320,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser\n",
|
||||
"agent = {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps'])\n",
|
||||
|
@@ -51,7 +51,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.output_parsers import SelfAskOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str"
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str\n",
|
||||
"from langchain import hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -61,7 +62,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"prompt = hub.pull(\"hwchase17/self-ask-with-search\")"
|
||||
]
|
||||
},
|
||||
@@ -93,6 +93,16 @@
|
||||
"} | prompt | llm_with_stop | SelfAskOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "643c3bfa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
@@ -100,7 +110,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
|
@@ -86,6 +86,16 @@
|
||||
"We can first construct this agent using LangChain Expression Language"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bf35a623",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
@@ -93,10 +103,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"prompt = hub.pull(\"hwchase17/react-multi-input-json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "38c6496f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.render import render_text_description_and_args"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
@@ -104,7 +123,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.render import render_text_description_and_args\n",
|
||||
"prompt = prompt.partial(\n",
|
||||
" tools=render_text_description_and_args(tools),\n",
|
||||
" tool_names=\", \".join([t.name for t in tools]),\n",
|
||||
@@ -122,6 +140,17 @@
|
||||
"llm_with_stop = llm.bind(stop=[\"Observation\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2ceceadb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
@@ -129,14 +158,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str\n",
|
||||
"agent = {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps']),\n",
|
||||
"} | prompt | llm_with_stop | JSONAgentOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "470b0859",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
@@ -144,7 +181,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
|
@@ -75,7 +75,8 @@
|
||||
"source": [
|
||||
"from langchain.tools.render import render_text_description\n",
|
||||
"from langchain.agents.output_parsers import XMLAgentOutputParser\n",
|
||||
"from langchain.agents.format_scratchpad import format_xml"
|
||||
"from langchain.agents.format_scratchpad import format_xml\n",
|
||||
"from langchain import hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -85,7 +86,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"prompt = hub.pull(\"hwchase17/xml-agent\")"
|
||||
]
|
||||
},
|
||||
@@ -125,6 +125,16 @@
|
||||
"} | prompt | llm_with_stop | XMLAgentOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e2bb03e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
@@ -132,7 +142,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
|
Reference in New Issue
Block a user