diff --git a/docs/extras/modules/agents/agent_types/anthropic_agent.ipynb b/docs/extras/modules/agents/agent_types/anthropic_agent.ipynb
deleted file mode 100644
index ab5575c247f..00000000000
--- a/docs/extras/modules/agents/agent_types/anthropic_agent.ipynb
+++ /dev/null
@@ -1,274 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "9926203f",
- "metadata": {},
- "outputs": [],
- "source": [
- "import os\n",
- "\n",
- "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
- "os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://api.smith.langchain.com\"\n",
- "os.environ[\"LANGCHAIN_API_KEY\"] = \"\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "45bc4149",
- "metadata": {},
- "outputs": [],
- "source": [
- "agent_instructions = \"\"\"You are a helpful assistant. Help the user answer any questions.\n",
- "\n",
- "You have access to the following tools:\n",
- "\n",
- "{tools}\n",
- "\n",
- "In order to use a tool, you can use and tags. \\\n",
- "You will then get back a response in the form \n",
- "For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n",
- "\n",
- "searchweather in SF\n",
- "64 degrees\n",
- "\n",
- "When you are done, respond with a final answer between . For example:\n",
- "\n",
- "The weather in SF is 64 degrees\n",
- "\n",
- "Begin!\n",
- "\n",
- "Question: {question}\"\"\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "4da4c0d2",
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.6.14) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n",
- " warnings.warn(\n"
- ]
- }
- ],
- "source": [
- "from langchain.chat_models import ChatAnthropic\n",
- "from langchain.prompts import ChatPromptTemplate, AIMessagePromptTemplate\n",
- "from langchain.agents import tool"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "b81e9120",
- "metadata": {},
- "outputs": [],
- "source": [
- "model = ChatAnthropic(model=\"claude-2\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "5271f612",
- "metadata": {},
- "outputs": [],
- "source": [
- "prompt_template = ChatPromptTemplate.from_template(agent_instructions) + AIMessagePromptTemplate.from_template(\"{intermediate_steps}\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "83780d81",
- "metadata": {},
- "outputs": [],
- "source": [
- "chain = prompt_template | model.bind(stop=[\"\", \"\"])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "id": "c091d0e1",
- "metadata": {},
- "outputs": [],
- "source": [
- "@tool\n",
- "def search(query: str) -> str:\n",
- " \"\"\"Search things about current events.\"\"\"\n",
- " return \"32 degrees\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "id": "1e81b05d",
- "metadata": {},
- "outputs": [],
- "source": [
- "tool_list = [search]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "id": "5f0d986f",
- "metadata": {},
- "outputs": [],
- "source": [
- "from langchain.agents import Tool, AgentExecutor, BaseSingleActionAgent\n",
- "from typing import List, Tuple, Any, Union\n",
- "from langchain.schema import AgentAction, AgentFinish\n",
- "\n",
- "\n",
- "class AnthropicAgent(BaseSingleActionAgent):\n",
- " \n",
- " tools: List[Tool]\n",
- " chain: Any\n",
- "\n",
- " @property\n",
- " def input_keys(self):\n",
- " return [\"input\"]\n",
- "\n",
- " def plan(\n",
- " self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
- " ) -> Union[AgentAction, AgentFinish]:\n",
- " \"\"\"Given input, decided what to do.\n",
- "\n",
- " Args:\n",
- " intermediate_steps: Steps the LLM has taken to date,\n",
- " along with observations\n",
- " **kwargs: User inputs.\n",
- "\n",
- " Returns:\n",
- " Action specifying what tool to use.\n",
- " \"\"\"\n",
- " log = \"\"\n",
- " for action, observation in intermediate_steps:\n",
- " log += f\"{action.tool}{action.tool_input}{observation}\"\n",
- " tools = \"\"\n",
- " for tool in self.tools:\n",
- " tools += f\"{tool.name}: {tool.description}\\n\"\n",
- " response = self.chain.invoke({\"intermediate_steps\": log, \"tools\": tools, \"question\": kwargs[\"input\"]})\n",
- " if \"\" in response.content:\n",
- " t, ti = response.content.split(\"\")\n",
- " _t = t.split(\"\")[1]\n",
- " _ti = ti.split(\"\")[1]\n",
- " return AgentAction(tool=_t, tool_input=_ti, log=response.content)\n",
- " elif \"\" in response.content:\n",
- " t, ti = response.content.split(\"\")\n",
- " return AgentFinish(return_values={\"output\": ti}, log=response.content)\n",
- " else:\n",
- " raise ValueError\n",
- "\n",
- " async def aplan(\n",
- " self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
- " ) -> Union[AgentAction, AgentFinish]:\n",
- " \"\"\"Given input, decided what to do.\n",
- "\n",
- " Args:\n",
- " intermediate_steps: Steps the LLM has taken to date,\n",
- " along with observations\n",
- " **kwargs: User inputs.\n",
- "\n",
- " Returns:\n",
- " Action specifying what tool to use.\n",
- " \"\"\"\n",
- " raise ValueError"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "315361c5",
- "metadata": {},
- "outputs": [],
- "source": [
- "agent = AnthropicAgent(tools=tool_list, chain=chain)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "id": "bca6096f",
- "metadata": {},
- "outputs": [],
- "source": [
- "agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "id": "71b872b1",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- "\n",
- "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
- "\u001b[32;1m\u001b[1;3m search\n",
- "weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n",
- "\n",
- "The weather in New York is 32 degrees\u001b[0m\n",
- "\n",
- "\u001b[1m> Finished chain.\u001b[0m\n"
- ]
- },
- {
- "data": {
- "text/plain": [
- "'The weather in New York is 32 degrees'"
- ]
- },
- "execution_count": 12,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "agent_executor.run(\"whats the weather in New york?\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "cca87246",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/docs/extras/modules/agents/agent_types/xml_agent.ipynb b/docs/extras/modules/agents/agent_types/xml_agent.ipynb
new file mode 100644
index 00000000000..ed183d04678
--- /dev/null
+++ b/docs/extras/modules/agents/agent_types/xml_agent.ipynb
@@ -0,0 +1,149 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "3c284df8",
+ "metadata": {},
+ "source": [
+ "# XML Agent\n",
+ "\n",
+ "Some language models (like Anthropic's Claude) are particularly good at reasoning/writing XML. This goes over how to use an agent that uses XML when prompting. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "f9d2ead2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.agents import XMLAgent, tool, AgentExecutor\n",
+ "from langchain.chat_models import ChatAnthropic\n",
+ "from langchain.chains import LLMChain"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "ebadf04f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = ChatAnthropic(model=\"claude-2\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "6ce9f9a5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@tool\n",
+ "def search(query: str) -> str:\n",
+ " \"\"\"Search things about current events.\"\"\"\n",
+ " return \"32 degrees\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "c589944e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tool_list = [search]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "2d8454be",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "chain = LLMChain(\n",
+ " llm=model,\n",
+ " prompt=XMLAgent.get_default_prompt(),\n",
+ " output_parser=XMLAgent.get_default_output_parser()\n",
+ ")\n",
+ "agent = XMLAgent(tools=tool_list, llm_chain=chain)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "bca6096f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "71b872b1",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "\n",
+ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
+ "\u001b[32;1m\u001b[1;3m search\n",
+ "weather in New York\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n",
+ "\n",
+ "The weather in New York is 32 degrees\u001b[0m\n",
+ "\n",
+ "\u001b[1m> Finished chain.\u001b[0m\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "'The weather in New York is 32 degrees'"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "agent_executor.run(\"whats the weather in New york?\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cca87246",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}