From b89d9fc17798628c41601e484815cf04b07a9a81 Mon Sep 17 00:00:00 2001 From: mackong Date: Mon, 4 Mar 2024 06:25:12 +0800 Subject: [PATCH] langchain[patch]: add tools renderer for various non-openai agents (#18307) - **Description:** add tools_renderer for various non-openai agents, make tools can be render in different ways for your LLM. - **Issue:** N/A - **Dependencies:** N/A --------- Co-authored-by: Harrison Chase --- libs/langchain/langchain/agents/json_chat/base.py | 8 ++++++-- libs/langchain/langchain/agents/mrkl/base.py | 3 ++- libs/langchain/langchain/agents/react/agent.py | 7 +++++-- .../langchain/agents/structured_chat/base.py | 11 ++++++++--- libs/langchain/langchain/agents/xml/base.py | 11 ++++++++--- libs/langchain/langchain/tools/render.py | 6 +++++- 6 files changed, 34 insertions(+), 12 deletions(-) diff --git a/libs/langchain/langchain/agents/json_chat/base.py b/libs/langchain/langchain/agents/json_chat/base.py index 24e1310e2af..7a70c0bf4c6 100644 --- a/libs/langchain/langchain/agents/json_chat/base.py +++ b/libs/langchain/langchain/agents/json_chat/base.py @@ -8,7 +8,7 @@ from langchain_core.tools import BaseTool from langchain.agents.format_scratchpad import format_log_to_messages from langchain.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE from langchain.agents.output_parsers import JSONAgentOutputParser -from langchain.tools.render import render_text_description +from langchain.tools.render import ToolsRenderer, render_text_description def create_json_chat_agent( @@ -16,6 +16,7 @@ def create_json_chat_agent( tools: Sequence[BaseTool], prompt: ChatPromptTemplate, stop_sequence: bool = True, + tools_renderer: ToolsRenderer = render_text_description, ) -> Runnable: """Create an agent that uses JSON to format its logic, build for Chat Models. @@ -26,6 +27,9 @@ def create_json_chat_agent( stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates. Default is True. You may to set this to False if the LLM you are using does not support stop sequences. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. + Returns: A Runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an @@ -150,7 +154,7 @@ def create_json_chat_agent( raise ValueError(f"Prompt missing required variables: {missing_vars}") prompt = prompt.partial( - tools=render_text_description(list(tools)), + tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), ) if stop_sequence: diff --git a/libs/langchain/langchain/agents/mrkl/base.py b/libs/langchain/langchain/agents/mrkl/base.py index 976ecf66aae..7bf86f0cabf 100644 --- a/libs/langchain/langchain/agents/mrkl/base.py +++ b/libs/langchain/langchain/agents/mrkl/base.py @@ -17,6 +17,7 @@ from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input from langchain.chains import LLMChain +from langchain.tools.render import render_text_description class ChainConfig(NamedTuple): @@ -79,7 +80,7 @@ class ZeroShotAgent(Agent): Returns: A PromptTemplate with the template assembled from the pieces here. """ - tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) + tool_strings = render_text_description(list(tools)) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) diff --git a/libs/langchain/langchain/agents/react/agent.py b/libs/langchain/langchain/agents/react/agent.py index f1aa769dc40..7c0f70bbd4a 100644 --- a/libs/langchain/langchain/agents/react/agent.py +++ b/libs/langchain/langchain/agents/react/agent.py @@ -10,7 +10,7 @@ from langchain_core.tools import BaseTool from langchain.agents import AgentOutputParser from langchain.agents.format_scratchpad import format_log_to_str from langchain.agents.output_parsers import ReActSingleInputOutputParser -from langchain.tools.render import render_text_description +from langchain.tools.render import ToolsRenderer, render_text_description def create_react_agent( @@ -18,6 +18,7 @@ def create_react_agent( tools: Sequence[BaseTool], prompt: BasePromptTemplate, output_parser: Optional[AgentOutputParser] = None, + tools_renderer: ToolsRenderer = render_text_description, ) -> Runnable: """Create an agent that uses ReAct prompting. @@ -26,6 +27,8 @@ def create_react_agent( tools: Tools this agent has access to. prompt: The prompt to use. See Prompt section below for more. output_parser: AgentOutputParser for parse the LLM output. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. Returns: A Runnable sequence representing an agent. It takes as input all the same input @@ -102,7 +105,7 @@ def create_react_agent( raise ValueError(f"Prompt missing required variables: {missing_vars}") prompt = prompt.partial( - tools=render_text_description(list(tools)), + tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), ) llm_with_stop = llm.bind(stop=["\nObservation"]) diff --git a/libs/langchain/langchain/agents/structured_chat/base.py b/libs/langchain/langchain/agents/structured_chat/base.py index 3dc29b754bd..9054ef3f05e 100644 --- a/libs/langchain/langchain/agents/structured_chat/base.py +++ b/libs/langchain/langchain/agents/structured_chat/base.py @@ -23,7 +23,7 @@ from langchain.agents.structured_chat.output_parser import ( ) from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.chains.llm import LLMChain -from langchain.tools.render import render_text_description_and_args +from langchain.tools.render import ToolsRenderer, render_text_description_and_args HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}" @@ -151,7 +151,10 @@ class StructuredChatAgent(Agent): def create_structured_chat_agent( - llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: ChatPromptTemplate, + tools_renderer: ToolsRenderer = render_text_description_and_args, ) -> Runnable: """Create an agent aimed at supporting tools with multiple inputs. @@ -159,6 +162,8 @@ def create_structured_chat_agent( llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use. See Prompt section below for more. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. Returns: A Runnable sequence representing an agent. It takes as input all the same input @@ -265,7 +270,7 @@ def create_structured_chat_agent( raise ValueError(f"Prompt missing required variables: {missing_vars}") prompt = prompt.partial( - tools=render_text_description_and_args(list(tools)), + tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), ) llm_with_stop = llm.bind(stop=["Observation"]) diff --git a/libs/langchain/langchain/agents/xml/base.py b/libs/langchain/langchain/agents/xml/base.py index fe3f4883ece..572b50a8ca7 100644 --- a/libs/langchain/langchain/agents/xml/base.py +++ b/libs/langchain/langchain/agents/xml/base.py @@ -14,7 +14,7 @@ from langchain.agents.format_scratchpad import format_xml from langchain.agents.output_parsers import XMLAgentOutputParser from langchain.agents.xml.prompt import agent_instructions from langchain.chains.llm import LLMChain -from langchain.tools.render import render_text_description +from langchain.tools.render import ToolsRenderer, render_text_description @deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0") @@ -108,7 +108,10 @@ class XMLAgent(BaseSingleActionAgent): def create_xml_agent( - llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: BasePromptTemplate, + tools_renderer: ToolsRenderer = render_text_description, ) -> Runnable: """Create an agent that uses XML to format its logic. @@ -118,6 +121,8 @@ def create_xml_agent( prompt: The prompt to use, must have input keys `tools`: contains descriptions for each tool. `agent_scratchpad`: contains previous agent actions and tool outputs. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. Returns: A Runnable sequence representing an agent. It takes as input all the same input @@ -194,7 +199,7 @@ def create_xml_agent( raise ValueError(f"Prompt missing required variables: {missing_vars}") prompt = prompt.partial( - tools=render_text_description(list(tools)), + tools=tools_renderer(list(tools)), ) llm_with_stop = llm.bind(stop=[""]) diff --git a/libs/langchain/langchain/tools/render.py b/libs/langchain/langchain/tools/render.py index 11288baf3d3..cb6fde55ead 100644 --- a/libs/langchain/langchain/tools/render.py +++ b/libs/langchain/langchain/tools/render.py @@ -4,7 +4,7 @@ Depending on the LLM you are using and the prompting strategy you are using, you may want Tools to be rendered in a different way. This module contains various ways to render tools. """ -from typing import List +from typing import Callable, List # For backwards compatibility from langchain_core.tools import BaseTool @@ -14,6 +14,7 @@ from langchain_core.utils.function_calling import ( ) __all__ = [ + "ToolsRenderer", "render_text_description", "render_text_description_and_args", "format_tool_to_openai_tool", @@ -21,6 +22,9 @@ __all__ = [ ] +ToolsRenderer = Callable[[List[BaseTool]], str] + + def render_text_description(tools: List[BaseTool]) -> str: """Render the tool name and description in plain text.