beef up agent docs (#10866)

This commit is contained in:
Harrison Chase
2023-09-20 23:09:58 -07:00
committed by GitHub
parent 4b558c9e17
commit 808caca607
28 changed files with 1973 additions and 628 deletions

View File

@@ -0,0 +1,528 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "69014601",
"metadata": {},
"source": [
"# Conversational\n",
"\n",
"This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\n",
"\n",
"If we compare it to the standard ReAct agent, the main difference is the prompt.\n",
"We want it to be much more conversational."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cc3fad9e",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import Tool\n",
"from langchain.agents import AgentType\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.llms import OpenAI\n",
"from langchain.utilities import SerpAPIWrapper\n",
"from langchain.agents import initialize_agent"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "2d84b9bc",
"metadata": {},
"outputs": [],
"source": [
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Current Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events or the current state of the world\"\n",
" ),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "799a31bf",
"metadata": {},
"outputs": [],
"source": [
"llm=OpenAI(temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "f9d11cb6",
"metadata": {},
"source": [
"## Using LCEL\n",
"\n",
"We will first show how to create this agent using LCEL"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "03c09ef9",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain.agents.output_parsers import ReActSingleInputOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "6bd84102",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react-chat\")"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "7ccc785d",
"metadata": {},
"outputs": [],
"source": [
"prompt = prompt.partial(\n",
" tools=render_text_description(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d7aac2b0",
"metadata": {},
"outputs": [],
"source": [
"llm_with_stop = llm.bind(stop=[\"\\nObservation\"])"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "a028bca6",
"metadata": {},
"outputs": [],
"source": [
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps']),\n",
" \"chat_history\": lambda x: x[\"chat_history\"]\n",
"} | prompt | llm_with_stop | ReActSingleInputOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "9b044ae9",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "adcdd0c7",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Thought: Do I need to use a tool? No\n",
"Final Answer: Hi Bob, nice to meet you! How can I help you today?\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Hi Bob, nice to meet you! How can I help you today?'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"hi, i am bob\"})['output']"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "c5846cd1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Thought: Do I need to use a tool? No\n",
"Final Answer: Your name is Bob.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Your name is Bob.'"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"whats my name?\"})['output']"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "95a1192a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Thought: Do I need to use a tool? Yes\n",
"Action: Current Search\n",
"Action Input: Movies showing 9/21/2023\u001b[0m\u001b[36;1m\u001b[1;3m['September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...']\u001b[0m\u001b[32;1m\u001b[1;3m Do I need to use a tool? No\n",
"Final Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.'"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})['output']"
]
},
{
"cell_type": "markdown",
"id": "c0b2d86d",
"metadata": {},
"source": [
"## Use the off-the-shelf agent\n",
"\n",
"We can also create this agent using the off-the-shelf agent class"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "53e43064",
"metadata": {},
"outputs": [],
"source": [
"agent_executor = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"
]
},
{
"cell_type": "markdown",
"id": "68e45a24",
"metadata": {},
"source": [
"## Use a chat model\n",
"\n",
"We can also use a chat model here. The main difference here is in the prompts used."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "16b17ca8",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react-chat-json\")"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "e93c0832",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"chat_model = ChatOpenAI(temperature=0, model='gpt-4')"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "c8a94b0b",
"metadata": {},
"outputs": [],
"source": [
"prompt = prompt.partial(\n",
" tools=render_text_description(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "c5d710f2",
"metadata": {},
"outputs": [],
"source": [
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "2c845796",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_messages\n",
"\n",
"# We need some extra steering, or the chat model forgets how to respond sometimes\n",
"TEMPLATE_TOOL_RESPONSE = \"\"\"TOOL RESPONSE: \n",
"---------------------\n",
"{observation}\n",
"\n",
"USER'S INPUT\n",
"--------------------\n",
"\n",
"Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else - even if you just want to respond to the user. Do NOT respond with anything except a JSON snippet no matter what!\"\"\"\n",
"\n",
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_messages(x['intermediate_steps'], template_tool_response=TEMPLATE_TOOL_RESPONSE),\n",
" \"chat_history\": lambda x: x[\"chat_history\"],\n",
"} | prompt | chat_model_with_stop | JSONAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "332ba2ff",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "139717b4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m```json\n",
"{\n",
" \"action\": \"Final Answer\",\n",
" \"action_input\": \"Hello Bob, how can I assist you today?\"\n",
"}\n",
"```\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Hello Bob, how can I assist you today?'"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"hi, i am bob\"})['output']"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "7e7cf6d3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m```json\n",
"{\n",
" \"action\": \"Final Answer\",\n",
" \"action_input\": \"Your name is Bob.\"\n",
"}\n",
"```\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Your name is Bob.'"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"whats my name?\"})['output']"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "3fc00073",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m```json\n",
"{\n",
" \"action\": \"Current Search\",\n",
" \"action_input\": \"movies showing on 9/21/2023\"\n",
"}\n",
"```\u001b[0m\u001b[36;1m\u001b[1;3m['September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...']\u001b[0m\u001b[32;1m\u001b[1;3m```json\n",
"{\n",
" \"action\": \"Final Answer\",\n",
" \"action_input\": \"Some movies that are showing on 9/21/2023 include 'The Creator', 'Dumb Money', 'Expend4bles', 'The Kill Room', 'The Inventor', 'The Equalizer 3', and 'PAW Patrol: The Mighty Movie'.\"\n",
"}\n",
"```\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"\"Some movies that are showing on 9/21/2023 include 'The Creator', 'Dumb Money', 'Expend4bles', 'The Kill Room', 'The Inventor', 'The Equalizer 3', and 'PAW Patrol: The Mighty Movie'.\""
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})['output']"
]
},
{
"cell_type": "markdown",
"id": "8d464ead",
"metadata": {},
"source": [
"We can also initialize the agent executor with a predefined agent type"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "734d1b21",
"metadata": {},
"outputs": [],
"source": [
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
"llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, temperature=0)\n",
"agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,259 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e10aa932",
"metadata": {},
"source": [
"# OpenAI functions\n",
"\n",
"Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been fine-tuned to detect when a function should be called and respond with the inputs that should be passed to the function. In an API call, you can describe functions and have the model intelligently choose to output a JSON object containing arguments to call those functions. The goal of the OpenAI Function APIs is to more reliably return valid and useful function calls than a generic text completion or chat API.\n",
"\n",
"The OpenAI Functions Agent is designed to work with these models.\n",
"\n",
"Install `openai`, `google-search-results` packages which are required as the LangChain packages call them internally."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ec89be68",
"metadata": {},
"outputs": [],
"source": [
"! pip install openai google-search-results"
]
},
{
"cell_type": "markdown",
"id": "82787d8d",
"metadata": {},
"source": [
"## Initialize tools\n",
"\n",
"We will first create some tools we can use"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "b812b982",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import initialize_agent, AgentType, Tool\n",
"from langchain.chains import LLMMathChain\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.utilities import SerpAPIWrapper, SQLDatabase\n",
"from langchain_experimental.sql import SQLDatabaseChain"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "23fc0aa6",
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n",
"search = SerpAPIWrapper()\n",
"llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)\n",
"db = SQLDatabase.from_uri(\"sqlite:///../../../../../notebooks/Chinook.db\")\n",
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
" ),\n",
" Tool(\n",
" name=\"Calculator\",\n",
" func=llm_math_chain.run,\n",
" description=\"useful for when you need to answer questions about math\"\n",
" ),\n",
" Tool(\n",
" name=\"FooBar-DB\",\n",
" func=db_chain.run,\n",
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\"\n",
" )\n",
"]"
]
},
{
"cell_type": "markdown",
"id": "39c3ba21",
"metadata": {},
"source": [
"## Using LCEL\n",
"\n",
"We will first use LangChain Expression Language to create this agent"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "55292bed",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"prompt = ChatPromptTemplate.from_messages([\n",
" (\"system\", \"You are a helpful assistant\"),\n",
" (\"user\", \"{input}\"),\n",
" MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "552421b3",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import format_tool_to_openai_function\n",
"llm_with_tools = llm.bind(\n",
" functions=[format_tool_to_openai_function(t) for t in tools]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "bf514eb4",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_to_openai_functions(x['intermediate_steps'])\n",
"} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "bdc7e506",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "2cd65218",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `Search` with `Leo DiCaprio's girlfriend`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m['Blake Lively and DiCaprio are believed to have enjoyed a whirlwind five-month romance in 2011. The pair were seen on a yacht together in Cannes, ...']\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Invoking: `Calculator` with `0.43`\n",
"\n",
"\n",
"\u001b[0m\n",
"\n",
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
"0.43\u001b[32;1m\u001b[1;3m```text\n",
"0.43\n",
"```\n",
"...numexpr.evaluate(\"0.43\")...\n",
"\u001b[0m\n",
"Answer: \u001b[33;1m\u001b[1;3m0.43\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\u001b[33;1m\u001b[1;3mAnswer: 0.43\u001b[0m\u001b[32;1m\u001b[1;3mI'm sorry, but I couldn't find any information about Leo DiCaprio's current girlfriend. As for raising her age to the power of 0.43, I'm not sure what her current age is, so I can't provide an answer for that.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'input': \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\",\n",
" 'output': \"I'm sorry, but I couldn't find any information about Leo DiCaprio's current girlfriend. As for raising her age to the power of 0.43, I'm not sure what her current age is, so I can't provide an answer for that.\"}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"})"
]
},
{
"cell_type": "markdown",
"id": "8e91393f",
"metadata": {},
"source": [
"## Using OpenAIFunctionsAgent\n",
"\n",
"We can now use `OpenAIFunctionsAgent`, which creates this agent under the hood"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "9ed07c8f",
"metadata": {},
"outputs": [],
"source": [
"agent_executor = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d9fb674",
"metadata": {},
"outputs": [],
"source": [
"agent_executor.invoke({\"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2bc581dc",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -444,9 +444,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "venv",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "venv"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -458,7 +458,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
"version": "3.10.1"
}
},
"nbformat": 4,

View File

@@ -0,0 +1,365 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d82e62ec",
"metadata": {},
"source": [
"# ReAct\n",
"\n",
"This walkthrough showcases using an agent to implement the [ReAct](https://react-lm.github.io/) logic."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "102b0e52",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import load_tools\n",
"from langchain.agents import initialize_agent\n",
"from langchain.agents import AgentType\n",
"from langchain.llms import OpenAI"
]
},
{
"cell_type": "markdown",
"id": "e0c9c056",
"metadata": {},
"source": [
"First, let's load the language model we're going to use to control the agent."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "184f0682",
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI(temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "2e67a000",
"metadata": {},
"source": [
"Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "256408d5",
"metadata": {},
"outputs": [],
"source": [
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)"
]
},
{
"cell_type": "markdown",
"id": "b7d04f53",
"metadata": {},
"source": [
"## Using LCEL\n",
"\n",
"We will first show how to create the agent using LCEL"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "bb0813a3",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain.agents.output_parsers import ReActSingleInputOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "d3ae5fcd",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react\")\n",
"prompt = prompt.partial(\n",
" tools=render_text_description(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "bf47a3c7",
"metadata": {},
"outputs": [],
"source": [
"llm_with_stop = llm.bind(stop=[\"\\nObservation\"])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "b3d3958b",
"metadata": {},
"outputs": [],
"source": [
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps'])\n",
"} | prompt | llm_with_stop | ReActSingleInputOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "026de6cd",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "57780ce1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m I need to find out who Leo DiCaprio's girlfriend is and then calculate her age raised to the 0.43 power.\n",
"Action: Search\n",
"Action Input: \"Leo DiCaprio girlfriend\"\u001b[0m\u001b[36;1m\u001b[1;3mmodel Vittoria Ceretti\u001b[0m\u001b[32;1m\u001b[1;3m I need to find out Vittoria Ceretti's age\n",
"Action: Search\n",
"Action Input: \"Vittoria Ceretti age\"\u001b[0m\u001b[36;1m\u001b[1;3m25 years\u001b[0m\u001b[32;1m\u001b[1;3m I need to calculate 25 raised to the 0.43 power\n",
"Action: Calculator\n",
"Action Input: 25^0.43\u001b[0m\u001b[33;1m\u001b[1;3mAnswer: 3.991298452658078\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: Leo DiCaprio's girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'input': \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\",\n",
" 'output': \"Leo DiCaprio's girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"})"
]
},
{
"cell_type": "markdown",
"id": "b4a33ea8",
"metadata": {},
"source": [
"## Using ZeroShotReactAgent\n",
"\n",
"We will now show how to use the agent with an off-the-shelf agent implementation"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "9752e90e",
"metadata": {},
"outputs": [],
"source": [
"agent_executor = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "04c5bcf6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m I need to find out who Leo DiCaprio's girlfriend is and then calculate her age raised to the 0.43 power.\n",
"Action: Search\n",
"Action Input: \"Leo DiCaprio girlfriend\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mmodel Vittoria Ceretti\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Vittoria Ceretti's age\n",
"Action: Search\n",
"Action Input: \"Vittoria Ceretti age\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m25 years\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 25 raised to the 0.43 power\n",
"Action: Calculator\n",
"Action Input: 25^0.43\u001b[0m\n",
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 3.991298452658078\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: Leo DiCaprio's girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'input': \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\",\n",
" 'output': \"Leo DiCaprio's girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"})"
]
},
{
"cell_type": "markdown",
"id": "7f3e8fc8",
"metadata": {},
"source": [
"## Using chat models\n",
"\n",
"You can also create ReAct agents that use chat models instead of LLMs as the agent driver.\n",
"\n",
"The main difference here is a different prompt. We will use JSON to encode the agent's actions (chat models are a bit tougher to steet, so using JSON helps to enforce the output format)."
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "fe846c48",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"chat_model = ChatOpenAI(temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "0843590d",
"metadata": {},
"outputs": [],
"source": [
"prompt = hub.pull(\"hwchase17/react-json\")\n",
"prompt = prompt.partial(\n",
" tools=render_text_description(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "a863b763",
"metadata": {},
"outputs": [],
"source": [
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "6336a378",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser\n",
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps'])\n",
"} | prompt | chat_model_with_stop | ReActJsonSingleInputOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "13ad514e",
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3a3394a4",
"metadata": {},
"outputs": [],
"source": [
"agent_executor.invoke({\"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"})"
]
},
{
"cell_type": "markdown",
"id": "ffc28e29",
"metadata": {},
"source": [
"We can also use an off-the-shelf agent class"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6c41464c",
"metadata": {},
"outputs": [],
"source": [
"\n",
"agent = initialize_agent(tools, chat_model, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -13,6 +13,145 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "2018da2d",
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.utilities import SerpAPIWrapper\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name=\"Intermediate Answer\",\n",
" func=search.run,\n",
" description=\"useful for when you need to ask with search\",\n",
" )\n",
"]"
]
},
{
"cell_type": "markdown",
"id": "769c5940",
"metadata": {},
"source": [
"## Using LangChain Expression Language\n",
"\n",
"First we will show how to construct this agent from components using LangChain Expression Language"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "6be0e94d",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import SelfAskOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "933ca47b",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/self-ask-with-search\")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "d1437a27",
"metadata": {},
"outputs": [],
"source": [
"llm_with_stop = llm.bind(stop=[\"\\nIntermediate answer:\"])"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "d793401e",
"metadata": {},
"outputs": [],
"source": [
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" # Use some custom observation_prefix/llm_prefix for formatting\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(\n",
" x['intermediate_steps'], \n",
" observation_prefix=\"\\nIntermediate answer: \",\n",
" llm_prefix=\"\",\n",
" ),\n",
"} | prompt | llm_with_stop | SelfAskOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "a1bb513c",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "5181f35f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\u001b[36;1m\u001b[1;3mMen's US Open Tennis Champions Novak Djokovic earned his 24th major singles title against 2021 US Open champion Daniil Medvedev, 6-3, 7-6 (7-5), 6-3. The victory ties the Serbian player with the legendary Margaret Court for the most Grand Slam wins across both men's and women's singles.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Follow up: Where is Novak Djokovic from?\u001b[0m\u001b[36;1m\u001b[1;3mBelgrade, Serbia\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"So the final answer is: Belgrade, Serbia\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'input': \"What is the hometown of the reigning men's U.S. Open champion?\",\n",
" 'output': 'Belgrade, Serbia'}"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"What is the hometown of the reigning men's U.S. Open champion?\"})"
]
},
{
"cell_type": "markdown",
"id": "6556f348",
"metadata": {},
"source": [
"## Use off-the-shelf agent"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "7e3b513e",
"metadata": {},
"outputs": [
@@ -25,10 +164,11 @@
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mCarlos Alcaraz Garfia\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mFollow up: Where is Carlos Alcaraz Garfia from?\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mEl Palmar, Spain\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mSo the final answer is: El Palmar, Spain\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mMen's US Open Tennis Champions Novak Djokovic earned his 24th major singles title against 2021 US Open champion Daniil Medvedev, 6-3, 7-6 (7-5), 6-3. The victory ties the Serbian player with the legendary Margaret Court for the most Grand Slam wins across both men's and women's singles.\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Follow up: Where is Novak Djokovic from?\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mBelgrade, Serbia\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mSo the final answer is: Belgrade, Serbia\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -36,29 +176,15 @@
{
"data": {
"text/plain": [
"'El Palmar, Spain'"
"'Belgrade, Serbia'"
]
},
"execution_count": 1,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.llms import OpenAI\nfrom langchain.utilities import SerpAPIWrapper\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name=\"Intermediate Answer\",\n",
" func=search.run,\n",
" description=\"useful for when you need to ask with search\",\n",
" )\n",
"]\n",
"\n",
"self_ask_with_search = initialize_agent(\n",
" tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True\n",
")\n",
@@ -92,7 +218,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
"version": "3.10.1"
},
"vscode": {
"interpreter": {

View File

@@ -0,0 +1,294 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "2ac2115b",
"metadata": {},
"source": [
"# Structured tool chat\n",
"\n",
"The structured tool chat agent is capable of using multi-input tools.\n",
"\n",
"Older agents are configured to specify an action input as a single string, but this agent can use the provided tools' `args_schema` to populate the action input.\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "68d58093",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentType\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.agents import initialize_agent"
]
},
{
"cell_type": "markdown",
"id": "9414475b",
"metadata": {},
"source": [
"## Initialize Tools\n",
"\n",
"We will test the agent using a web browser"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "a990cea8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.agent_toolkits import PlayWrightBrowserToolkit\n",
"from langchain.tools.playwright.utils import (\n",
" create_async_playwright_browser,\n",
" create_sync_playwright_browser, # A synchronous browser is available, though it isn't compatible with jupyter.\n",
")\n",
"\n",
"# This import is required only for jupyter notebooks, since they have their own eventloop\n",
"import nest_asyncio\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "536fa92a",
"metadata": {},
"outputs": [],
"source": [
"!pip install playwright\n",
"\n",
"!playwright install"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "daa3d594",
"metadata": {},
"outputs": [],
"source": [
"async_browser = create_async_playwright_browser()\n",
"browser_toolkit = PlayWrightBrowserToolkit.from_browser(async_browser=async_browser)\n",
"tools = browser_toolkit.get_tools()"
]
},
{
"cell_type": "markdown",
"id": "e3089aa8",
"metadata": {},
"source": [
"## Use LCEL\n",
"\n",
"We can first construct this agent using LangChain Expression Language"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "319e6c40",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react-multi-input-json\")"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "d25b216f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import render_text_description_and_args\n",
"prompt = prompt.partial(\n",
" tools=render_text_description_and_args(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "fffcad76",
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(temperature=0)\n",
"llm_with_stop = llm.bind(stop=[\"Observation\"])"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "d410855f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str\n",
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps']),\n",
"} | prompt | llm_with_stop | JSONAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "b62702b4",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "97c15ef5",
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mAction:\n",
"```\n",
"{\n",
" \"action\": \"navigate_browser\",\n",
" \"action_input\": {\n",
" \"url\": \"https://blog.langchain.dev\"\n",
" }\n",
"}\n",
"```\n",
"\u001b[0m\u001b[33;1m\u001b[1;3mNavigating to https://blog.langchain.dev returned status code 200\u001b[0m\u001b[32;1m\u001b[1;3mAction:\n",
"```\n",
"{\n",
" \"action\": \"extract_text\",\n",
" \"action_input\": {}\n",
"}\n",
"```\n",
"\n",
"\u001b[0m\u001b[31;1m\u001b[1;3mLangChain LangChain Home GitHub Docs By LangChain Release Notes Write with Us Sign in Subscribe The official LangChain blog. Subscribe now Login Featured Posts Announcing LangChain Hub Using LangSmith to Support Fine-tuning Announcing LangSmith, a unified platform for debugging, testing, evaluating, and monitoring your LLM applications Sep 20 Peering Into the Soul of AI Decision-Making with LangSmith 10 min read Sep 20 LangChain + Docugami Webinar: Lessons from Deploying LLMs with LangSmith 3 min read Sep 18 TED AI Hackathon Kickoff (and projects wed love to see) 2 min read Sep 12 How to Safely Query Enterprise Data with LangChain Agents + SQL + OpenAI + Gretel 6 min read Sep 12 OpaquePrompts x LangChain: Enhance the privacy of your LangChain application with just one code change 4 min read Load more LangChain © 2023 Sign up Powered by Ghost\u001b[0m\u001b[32;1m\u001b[1;3mAction:\n",
"```\n",
"{\n",
" \"action\": \"Final Answer\",\n",
" \"action_input\": \"The LangChain blog features posts on topics such as using LangSmith for fine-tuning, AI decision-making with LangSmith, deploying LLMs with LangSmith, and more. It also includes information on LangChain Hub and upcoming webinars. LangChain is a platform for debugging, testing, evaluating, and monitoring LLM applications.\"\n",
"}\n",
"```\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"The LangChain blog features posts on topics such as using LangSmith for fine-tuning, AI decision-making with LangSmith, deploying LLMs with LangSmith, and more. It also includes information on LangChain Hub and upcoming webinars. LangChain is a platform for debugging, testing, evaluating, and monitoring LLM applications.\n"
]
}
],
"source": [
"response = await agent_executor.ainvoke({\"input\": \"Browse to blog.langchain.dev and summarize the text, please.\"})\n",
"print(response['output'])"
]
},
{
"cell_type": "markdown",
"id": "62fc1fdf",
"metadata": {},
"source": [
"## Use off the shelf agent"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "4b585225",
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(temperature=0) # Also works well with Anthropic models\n",
"agent_chain = initialize_agent(tools, llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c2a9e29c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mAction:\n",
"```\n",
"{\n",
" \"action\": \"navigate_browser\",\n",
" \"action_input\": {\n",
" \"url\": \"https://blog.langchain.dev\"\n",
" }\n",
"}\n",
"```\u001b[0m\n",
"Observation: \u001b[33;1m\u001b[1;3mNavigating to https://blog.langchain.dev returned status code 200\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3mI have successfully navigated to the blog.langchain.dev website. Now I need to extract the text from the webpage to summarize it.\n",
"Action:\n",
"```\n",
"{\n",
" \"action\": \"extract_text\",\n",
" \"action_input\": {}\n",
"}\n",
"```\u001b[0m\n",
"Observation: \u001b[31;1m\u001b[1;3mLangChain LangChain Home GitHub Docs By LangChain Release Notes Write with Us Sign in Subscribe The official LangChain blog. Subscribe now Login Featured Posts Announcing LangChain Hub Using LangSmith to Support Fine-tuning Announcing LangSmith, a unified platform for debugging, testing, evaluating, and monitoring your LLM applications Sep 20 Peering Into the Soul of AI Decision-Making with LangSmith 10 min read Sep 20 LangChain + Docugami Webinar: Lessons from Deploying LLMs with LangSmith 3 min read Sep 18 TED AI Hackathon Kickoff (and projects wed love to see) 2 min read Sep 12 How to Safely Query Enterprise Data with LangChain Agents + SQL + OpenAI + Gretel 6 min read Sep 12 OpaquePrompts x LangChain: Enhance the privacy of your LangChain application with just one code change 4 min read Load more LangChain © 2023 Sign up Powered by Ghost\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3mI have successfully navigated to the blog.langchain.dev website. The text on the webpage includes featured posts such as \"Announcing LangChain Hub,\" \"Using LangSmith to Support Fine-tuning,\" \"Peering Into the Soul of AI Decision-Making with LangSmith,\" \"LangChain + Docugami Webinar: Lessons from Deploying LLMs with LangSmith,\" \"TED AI Hackathon Kickoff (and projects wed love to see),\" \"How to Safely Query Enterprise Data with LangChain Agents + SQL + OpenAI + Gretel,\" and \"OpaquePrompts x LangChain: Enhance the privacy of your LangChain application with just one code change.\" There are also links to other pages on the website.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"I have successfully navigated to the blog.langchain.dev website. The text on the webpage includes featured posts such as \"Announcing LangChain Hub,\" \"Using LangSmith to Support Fine-tuning,\" \"Peering Into the Soul of AI Decision-Making with LangSmith,\" \"LangChain + Docugami Webinar: Lessons from Deploying LLMs with LangSmith,\" \"TED AI Hackathon Kickoff (and projects wed love to see),\" \"How to Safely Query Enterprise Data with LangChain Agents + SQL + OpenAI + Gretel,\" and \"OpaquePrompts x LangChain: Enhance the privacy of your LangChain application with just one code change.\" There are also links to other pages on the website.\n"
]
}
],
"source": [
"response = await agent_chain.ainvoke({\"input\": \"Browse to blog.langchain.dev and summarize the text, please.\"})\n",
"print(response['output'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fc3ce811",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -11,34 +11,24 @@
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "f9d2ead2",
"cell_type": "markdown",
"id": "fe972808",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import XMLAgent, tool, AgentExecutor\n",
"from langchain.chat_models import ChatAnthropic\n",
"from langchain.chains import LLMChain"
"## Initialize the tools\n",
"\n",
"We will initialize some fake tools for demo purposes"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ebadf04f",
"metadata": {},
"outputs": [],
"source": [
"model = ChatAnthropic(model=\"claude-2\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "6ce9f9a5",
"execution_count": 1,
"id": "ba547497",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import tool\n",
"\n",
"@tool\n",
"def search(query: str) -> str:\n",
" \"\"\"Search things about current events.\"\"\"\n",
@@ -47,17 +37,165 @@
},
{
"cell_type": "code",
"execution_count": 10,
"id": "c589944e",
"execution_count": 6,
"id": "e30e99e2",
"metadata": {},
"outputs": [],
"source": [
"tool_list = [search]"
"tools = [search]"
]
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 2,
"id": "401db6ce",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatAnthropic\n",
"model = ChatAnthropic(model=\"claude-2\")"
]
},
{
"cell_type": "markdown",
"id": "90f83099",
"metadata": {},
"source": [
"## Use LangChain Expression Language\n",
"\n",
"We will first show how to create this agent using LangChain Expression Language"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "78937679",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain.agents.output_parsers import XMLAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_xml"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "54fc5a22",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/xml-agent\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "b1802fcc",
"metadata": {},
"outputs": [],
"source": [
"prompt = prompt.partial(\n",
" tools=render_text_description(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "f9d2ead2",
"metadata": {},
"outputs": [],
"source": [
"llm_with_stop = model.bind(stop=[\"</tool_input>\"])"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "ebadf04f",
"metadata": {},
"outputs": [],
"source": [
"agent = {\n",
" \"question\": lambda x: x[\"question\"],\n",
" \"agent_scratchpad\": lambda x: format_xml(x['intermediate_steps']),\n",
"} | prompt | llm_with_stop | XMLAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "6ce9f9a5",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "e14affef",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m <tool>search</tool>\n",
"<tool_input>weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m <tool>search</tool>\n",
"<tool_input>weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m <final_answer>\n",
"The weather in New York is 32 degrees.\n",
"</final_answer>\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'question': 'whats the weather in New york?',\n",
" 'output': '\\nThe weather in New York is 32 degrees.\\n'}"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"question\": \"whats the weather in New york?\"})"
]
},
{
"cell_type": "markdown",
"id": "42ff473d",
"metadata": {},
"source": [
"## Use off-the-shelf agent"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "7e5e73e3",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.agents import XMLAgent"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "2d8454be",
"metadata": {},
"outputs": [],
@@ -67,22 +205,22 @@
" prompt=XMLAgent.get_default_prompt(),\n",
" output_parser=XMLAgent.get_default_output_parser()\n",
")\n",
"agent = XMLAgent(tools=tool_list, llm_chain=chain)"
"agent = XMLAgent(tools=tools, llm_chain=chain)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 25,
"id": "bca6096f",
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)"
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 28,
"id": "71b872b1",
"metadata": {},
"outputs": [
@@ -94,7 +232,7 @@
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m <tool>search</tool>\n",
"<tool_input>weather in New York\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"<tool_input>weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"\n",
"<final_answer>The weather in New York is 32 degrees\u001b[0m\n",
"\n",
@@ -104,16 +242,17 @@
{
"data": {
"text/plain": [
"'The weather in New York is 32 degrees'"
"{'input': 'whats the weather in New york?',\n",
" 'output': 'The weather in New York is 32 degrees'}"
]
},
"execution_count": 13,
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.run(\"whats the weather in New york?\")"
"agent_executor.invoke({\"input\": \"whats the weather in New york?\"})"
]
},
{