mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-30 18:33:40 +00:00
langchain[patch]: add tools renderer for various non-openai agents (#18307)
- **Description:** add tools_renderer for various non-openai agents, make tools can be render in different ways for your LLM. - **Issue:** N/A - **Dependencies:** N/A --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
parent
7ce2f32c64
commit
b89d9fc177
@ -8,7 +8,7 @@ from langchain_core.tools import BaseTool
|
||||
from langchain.agents.format_scratchpad import format_log_to_messages
|
||||
from langchain.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE
|
||||
from langchain.agents.output_parsers import JSONAgentOutputParser
|
||||
from langchain.tools.render import render_text_description
|
||||
from langchain.tools.render import ToolsRenderer, render_text_description
|
||||
|
||||
|
||||
def create_json_chat_agent(
|
||||
@ -16,6 +16,7 @@ def create_json_chat_agent(
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: ChatPromptTemplate,
|
||||
stop_sequence: bool = True,
|
||||
tools_renderer: ToolsRenderer = render_text_description,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses JSON to format its logic, build for Chat Models.
|
||||
|
||||
@ -26,6 +27,9 @@ def create_json_chat_agent(
|
||||
stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates.
|
||||
Default is True. You may to set this to False if the LLM you are using
|
||||
does not support stop sequences.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
then passed into the LLM. Default is `render_text_description`.
|
||||
|
||||
Returns:
|
||||
A Runnable sequence representing an agent. It takes as input all the same input
|
||||
variables as the prompt passed in does. It returns as output either an
|
||||
@ -150,7 +154,7 @@ def create_json_chat_agent(
|
||||
raise ValueError(f"Prompt missing required variables: {missing_vars}")
|
||||
|
||||
prompt = prompt.partial(
|
||||
tools=render_text_description(list(tools)),
|
||||
tools=tools_renderer(list(tools)),
|
||||
tool_names=", ".join([t.name for t in tools]),
|
||||
)
|
||||
if stop_sequence:
|
||||
|
@ -17,6 +17,7 @@ from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.agents.utils import validate_tools_single_input
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.tools.render import render_text_description
|
||||
|
||||
|
||||
class ChainConfig(NamedTuple):
|
||||
@ -79,7 +80,7 @@ class ZeroShotAgent(Agent):
|
||||
Returns:
|
||||
A PromptTemplate with the template assembled from the pieces here.
|
||||
"""
|
||||
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
|
||||
tool_strings = render_text_description(list(tools))
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
format_instructions = format_instructions.format(tool_names=tool_names)
|
||||
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
|
||||
|
@ -10,7 +10,7 @@ from langchain_core.tools import BaseTool
|
||||
from langchain.agents import AgentOutputParser
|
||||
from langchain.agents.format_scratchpad import format_log_to_str
|
||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||
from langchain.tools.render import render_text_description
|
||||
from langchain.tools.render import ToolsRenderer, render_text_description
|
||||
|
||||
|
||||
def create_react_agent(
|
||||
@ -18,6 +18,7 @@ def create_react_agent(
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: BasePromptTemplate,
|
||||
output_parser: Optional[AgentOutputParser] = None,
|
||||
tools_renderer: ToolsRenderer = render_text_description,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses ReAct prompting.
|
||||
|
||||
@ -26,6 +27,8 @@ def create_react_agent(
|
||||
tools: Tools this agent has access to.
|
||||
prompt: The prompt to use. See Prompt section below for more.
|
||||
output_parser: AgentOutputParser for parse the LLM output.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
then passed into the LLM. Default is `render_text_description`.
|
||||
|
||||
Returns:
|
||||
A Runnable sequence representing an agent. It takes as input all the same input
|
||||
@ -102,7 +105,7 @@ def create_react_agent(
|
||||
raise ValueError(f"Prompt missing required variables: {missing_vars}")
|
||||
|
||||
prompt = prompt.partial(
|
||||
tools=render_text_description(list(tools)),
|
||||
tools=tools_renderer(list(tools)),
|
||||
tool_names=", ".join([t.name for t in tools]),
|
||||
)
|
||||
llm_with_stop = llm.bind(stop=["\nObservation"])
|
||||
|
@ -23,7 +23,7 @@ from langchain.agents.structured_chat.output_parser import (
|
||||
)
|
||||
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.tools.render import render_text_description_and_args
|
||||
from langchain.tools.render import ToolsRenderer, render_text_description_and_args
|
||||
|
||||
HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"
|
||||
|
||||
@ -151,7 +151,10 @@ class StructuredChatAgent(Agent):
|
||||
|
||||
|
||||
def create_structured_chat_agent(
|
||||
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: ChatPromptTemplate,
|
||||
tools_renderer: ToolsRenderer = render_text_description_and_args,
|
||||
) -> Runnable:
|
||||
"""Create an agent aimed at supporting tools with multiple inputs.
|
||||
|
||||
@ -159,6 +162,8 @@ def create_structured_chat_agent(
|
||||
llm: LLM to use as the agent.
|
||||
tools: Tools this agent has access to.
|
||||
prompt: The prompt to use. See Prompt section below for more.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
then passed into the LLM. Default is `render_text_description`.
|
||||
|
||||
Returns:
|
||||
A Runnable sequence representing an agent. It takes as input all the same input
|
||||
@ -265,7 +270,7 @@ def create_structured_chat_agent(
|
||||
raise ValueError(f"Prompt missing required variables: {missing_vars}")
|
||||
|
||||
prompt = prompt.partial(
|
||||
tools=render_text_description_and_args(list(tools)),
|
||||
tools=tools_renderer(list(tools)),
|
||||
tool_names=", ".join([t.name for t in tools]),
|
||||
)
|
||||
llm_with_stop = llm.bind(stop=["Observation"])
|
||||
|
@ -14,7 +14,7 @@ from langchain.agents.format_scratchpad import format_xml
|
||||
from langchain.agents.output_parsers import XMLAgentOutputParser
|
||||
from langchain.agents.xml.prompt import agent_instructions
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.tools.render import render_text_description
|
||||
from langchain.tools.render import ToolsRenderer, render_text_description
|
||||
|
||||
|
||||
@deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0")
|
||||
@ -108,7 +108,10 @@ class XMLAgent(BaseSingleActionAgent):
|
||||
|
||||
|
||||
def create_xml_agent(
|
||||
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: BasePromptTemplate,
|
||||
tools_renderer: ToolsRenderer = render_text_description,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses XML to format its logic.
|
||||
|
||||
@ -118,6 +121,8 @@ def create_xml_agent(
|
||||
prompt: The prompt to use, must have input keys
|
||||
`tools`: contains descriptions for each tool.
|
||||
`agent_scratchpad`: contains previous agent actions and tool outputs.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
then passed into the LLM. Default is `render_text_description`.
|
||||
|
||||
Returns:
|
||||
A Runnable sequence representing an agent. It takes as input all the same input
|
||||
@ -194,7 +199,7 @@ def create_xml_agent(
|
||||
raise ValueError(f"Prompt missing required variables: {missing_vars}")
|
||||
|
||||
prompt = prompt.partial(
|
||||
tools=render_text_description(list(tools)),
|
||||
tools=tools_renderer(list(tools)),
|
||||
)
|
||||
llm_with_stop = llm.bind(stop=["</tool_input>"])
|
||||
|
||||
|
@ -4,7 +4,7 @@ Depending on the LLM you are using and the prompting strategy you are using,
|
||||
you may want Tools to be rendered in a different way.
|
||||
This module contains various ways to render tools.
|
||||
"""
|
||||
from typing import List
|
||||
from typing import Callable, List
|
||||
|
||||
# For backwards compatibility
|
||||
from langchain_core.tools import BaseTool
|
||||
@ -14,6 +14,7 @@ from langchain_core.utils.function_calling import (
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ToolsRenderer",
|
||||
"render_text_description",
|
||||
"render_text_description_and_args",
|
||||
"format_tool_to_openai_tool",
|
||||
@ -21,6 +22,9 @@ __all__ = [
|
||||
]
|
||||
|
||||
|
||||
ToolsRenderer = Callable[[List[BaseTool]], str]
|
||||
|
||||
|
||||
def render_text_description(tools: List[BaseTool]) -> str:
|
||||
"""Render the tool name and description in plain text.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user