langchain[patch]: add tools renderer for various non-openai agents (#18307)

- **Description:** add tools_renderer for various non-openai agents,
make tools can be render in different ways for your LLM.
  - **Issue:** N/A
  - **Dependencies:** N/A

---------

Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
mackong 2024-03-04 06:25:12 +08:00 committed by GitHub
parent 7ce2f32c64
commit b89d9fc177
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 34 additions and 12 deletions

View File

@ -8,7 +8,7 @@ from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad import format_log_to_messages from langchain.agents.format_scratchpad import format_log_to_messages
from langchain.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE from langchain.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE
from langchain.agents.output_parsers import JSONAgentOutputParser from langchain.agents.output_parsers import JSONAgentOutputParser
from langchain.tools.render import render_text_description from langchain.tools.render import ToolsRenderer, render_text_description
def create_json_chat_agent( def create_json_chat_agent(
@ -16,6 +16,7 @@ def create_json_chat_agent(
tools: Sequence[BaseTool], tools: Sequence[BaseTool],
prompt: ChatPromptTemplate, prompt: ChatPromptTemplate,
stop_sequence: bool = True, stop_sequence: bool = True,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable: ) -> Runnable:
"""Create an agent that uses JSON to format its logic, build for Chat Models. """Create an agent that uses JSON to format its logic, build for Chat Models.
@ -26,6 +27,9 @@ def create_json_chat_agent(
stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates. stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates.
Default is True. You may to set this to False if the LLM you are using Default is True. You may to set this to False if the LLM you are using
does not support stop sequences. does not support stop sequences.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns: Returns:
A Runnable sequence representing an agent. It takes as input all the same input A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an variables as the prompt passed in does. It returns as output either an
@ -150,7 +154,7 @@ def create_json_chat_agent(
raise ValueError(f"Prompt missing required variables: {missing_vars}") raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial( prompt = prompt.partial(
tools=render_text_description(list(tools)), tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]), tool_names=", ".join([t.name for t in tools]),
) )
if stop_sequence: if stop_sequence:

View File

@ -17,6 +17,7 @@ from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.tools import Tool from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input from langchain.agents.utils import validate_tools_single_input
from langchain.chains import LLMChain from langchain.chains import LLMChain
from langchain.tools.render import render_text_description
class ChainConfig(NamedTuple): class ChainConfig(NamedTuple):
@ -79,7 +80,7 @@ class ZeroShotAgent(Agent):
Returns: Returns:
A PromptTemplate with the template assembled from the pieces here. A PromptTemplate with the template assembled from the pieces here.
""" """
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_strings = render_text_description(list(tools))
tool_names = ", ".join([tool.name for tool in tools]) tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names) format_instructions = format_instructions.format(tool_names=tool_names)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])

View File

@ -10,7 +10,7 @@ from langchain_core.tools import BaseTool
from langchain.agents import AgentOutputParser from langchain.agents import AgentOutputParser
from langchain.agents.format_scratchpad import format_log_to_str from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActSingleInputOutputParser from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.tools.render import render_text_description from langchain.tools.render import ToolsRenderer, render_text_description
def create_react_agent( def create_react_agent(
@ -18,6 +18,7 @@ def create_react_agent(
tools: Sequence[BaseTool], tools: Sequence[BaseTool],
prompt: BasePromptTemplate, prompt: BasePromptTemplate,
output_parser: Optional[AgentOutputParser] = None, output_parser: Optional[AgentOutputParser] = None,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable: ) -> Runnable:
"""Create an agent that uses ReAct prompting. """Create an agent that uses ReAct prompting.
@ -26,6 +27,8 @@ def create_react_agent(
tools: Tools this agent has access to. tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more. prompt: The prompt to use. See Prompt section below for more.
output_parser: AgentOutputParser for parse the LLM output. output_parser: AgentOutputParser for parse the LLM output.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns: Returns:
A Runnable sequence representing an agent. It takes as input all the same input A Runnable sequence representing an agent. It takes as input all the same input
@ -102,7 +105,7 @@ def create_react_agent(
raise ValueError(f"Prompt missing required variables: {missing_vars}") raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial( prompt = prompt.partial(
tools=render_text_description(list(tools)), tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]), tool_names=", ".join([t.name for t in tools]),
) )
llm_with_stop = llm.bind(stop=["\nObservation"]) llm_with_stop = llm.bind(stop=["\nObservation"])

View File

@ -23,7 +23,7 @@ from langchain.agents.structured_chat.output_parser import (
) )
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.chains.llm import LLMChain from langchain.chains.llm import LLMChain
from langchain.tools.render import render_text_description_and_args from langchain.tools.render import ToolsRenderer, render_text_description_and_args
HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}" HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"
@ -151,7 +151,10 @@ class StructuredChatAgent(Agent):
def create_structured_chat_agent( def create_structured_chat_agent(
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
tools_renderer: ToolsRenderer = render_text_description_and_args,
) -> Runnable: ) -> Runnable:
"""Create an agent aimed at supporting tools with multiple inputs. """Create an agent aimed at supporting tools with multiple inputs.
@ -159,6 +162,8 @@ def create_structured_chat_agent(
llm: LLM to use as the agent. llm: LLM to use as the agent.
tools: Tools this agent has access to. tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more. prompt: The prompt to use. See Prompt section below for more.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns: Returns:
A Runnable sequence representing an agent. It takes as input all the same input A Runnable sequence representing an agent. It takes as input all the same input
@ -265,7 +270,7 @@ def create_structured_chat_agent(
raise ValueError(f"Prompt missing required variables: {missing_vars}") raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial( prompt = prompt.partial(
tools=render_text_description_and_args(list(tools)), tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]), tool_names=", ".join([t.name for t in tools]),
) )
llm_with_stop = llm.bind(stop=["Observation"]) llm_with_stop = llm.bind(stop=["Observation"])

View File

@ -14,7 +14,7 @@ from langchain.agents.format_scratchpad import format_xml
from langchain.agents.output_parsers import XMLAgentOutputParser from langchain.agents.output_parsers import XMLAgentOutputParser
from langchain.agents.xml.prompt import agent_instructions from langchain.agents.xml.prompt import agent_instructions
from langchain.chains.llm import LLMChain from langchain.chains.llm import LLMChain
from langchain.tools.render import render_text_description from langchain.tools.render import ToolsRenderer, render_text_description
@deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0") @deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0")
@ -108,7 +108,10 @@ class XMLAgent(BaseSingleActionAgent):
def create_xml_agent( def create_xml_agent(
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable: ) -> Runnable:
"""Create an agent that uses XML to format its logic. """Create an agent that uses XML to format its logic.
@ -118,6 +121,8 @@ def create_xml_agent(
prompt: The prompt to use, must have input keys prompt: The prompt to use, must have input keys
`tools`: contains descriptions for each tool. `tools`: contains descriptions for each tool.
`agent_scratchpad`: contains previous agent actions and tool outputs. `agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns: Returns:
A Runnable sequence representing an agent. It takes as input all the same input A Runnable sequence representing an agent. It takes as input all the same input
@ -194,7 +199,7 @@ def create_xml_agent(
raise ValueError(f"Prompt missing required variables: {missing_vars}") raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial( prompt = prompt.partial(
tools=render_text_description(list(tools)), tools=tools_renderer(list(tools)),
) )
llm_with_stop = llm.bind(stop=["</tool_input>"]) llm_with_stop = llm.bind(stop=["</tool_input>"])

View File

@ -4,7 +4,7 @@ Depending on the LLM you are using and the prompting strategy you are using,
you may want Tools to be rendered in a different way. you may want Tools to be rendered in a different way.
This module contains various ways to render tools. This module contains various ways to render tools.
""" """
from typing import List from typing import Callable, List
# For backwards compatibility # For backwards compatibility
from langchain_core.tools import BaseTool from langchain_core.tools import BaseTool
@ -14,6 +14,7 @@ from langchain_core.utils.function_calling import (
) )
__all__ = [ __all__ = [
"ToolsRenderer",
"render_text_description", "render_text_description",
"render_text_description_and_args", "render_text_description_and_args",
"format_tool_to_openai_tool", "format_tool_to_openai_tool",
@ -21,6 +22,9 @@ __all__ = [
] ]
ToolsRenderer = Callable[[List[BaseTool]], str]
def render_text_description(tools: List[BaseTool]) -> str: def render_text_description(tools: List[BaseTool]) -> str:
"""Render the tool name and description in plain text. """Render the tool name and description in plain text.