mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-25 08:03:39 +00:00
langchain[patch]: add stop for various non-openai agents (#19333)
* Description: add stop for various non-openai agents. * Issue: N/A * Dependencies: N/A --------- Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
parent
7d216ad1e1
commit
d9396bdec1
@ -1,4 +1,4 @@
|
||||
from typing import Sequence
|
||||
from typing import List, Sequence, Union
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts.chat import ChatPromptTemplate
|
||||
@ -15,7 +15,7 @@ def create_json_chat_agent(
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: ChatPromptTemplate,
|
||||
stop_sequence: bool = True,
|
||||
stop_sequence: Union[bool, List[str]] = True,
|
||||
tools_renderer: ToolsRenderer = render_text_description,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses JSON to format its logic, build for Chat Models.
|
||||
@ -24,7 +24,11 @@ def create_json_chat_agent(
|
||||
llm: LLM to use as the agent.
|
||||
tools: Tools this agent has access to.
|
||||
prompt: The prompt to use. See Prompt section below for more.
|
||||
stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates.
|
||||
stop_sequence: bool or list of str.
|
||||
If True, adds a stop token of "Observation:" to avoid hallucinates.
|
||||
If False, does not add a stop token.
|
||||
If a list of str, uses the provided list as the stop tokens.
|
||||
|
||||
Default is True. You may to set this to False if the LLM you are using
|
||||
does not support stop sequences.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
@ -158,7 +162,8 @@ def create_json_chat_agent(
|
||||
tool_names=", ".join([t.name for t in tools]),
|
||||
)
|
||||
if stop_sequence:
|
||||
llm_to_use = llm.bind(stop=["\nObservation"])
|
||||
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
|
||||
llm_to_use = llm.bind(stop=stop)
|
||||
else:
|
||||
llm_to_use = llm
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional, Sequence
|
||||
from typing import List, Optional, Sequence, Union
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
@ -19,6 +19,8 @@ def create_react_agent(
|
||||
prompt: BasePromptTemplate,
|
||||
output_parser: Optional[AgentOutputParser] = None,
|
||||
tools_renderer: ToolsRenderer = render_text_description,
|
||||
*,
|
||||
stop_sequence: Union[bool, List[str]] = True,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses ReAct prompting.
|
||||
|
||||
@ -29,6 +31,13 @@ def create_react_agent(
|
||||
output_parser: AgentOutputParser for parse the LLM output.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
then passed into the LLM. Default is `render_text_description`.
|
||||
stop_sequence: bool or list of str.
|
||||
If True, adds a stop token of "Observation:" to avoid hallucinates.
|
||||
If False, does not add a stop token.
|
||||
If a list of str, uses the provided list as the stop tokens.
|
||||
|
||||
Default is True. You may to set this to False if the LLM you are using
|
||||
does not support stop sequences.
|
||||
|
||||
Returns:
|
||||
A Runnable sequence representing an agent. It takes as input all the same input
|
||||
@ -108,7 +117,11 @@ def create_react_agent(
|
||||
tools=tools_renderer(list(tools)),
|
||||
tool_names=", ".join([t.name for t in tools]),
|
||||
)
|
||||
llm_with_stop = llm.bind(stop=["\nObservation"])
|
||||
if stop_sequence:
|
||||
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
|
||||
llm_with_stop = llm.bind(stop=stop)
|
||||
else:
|
||||
llm_with_stop = llm
|
||||
output_parser = output_parser or ReActSingleInputOutputParser()
|
||||
agent = (
|
||||
RunnablePassthrough.assign(
|
||||
|
@ -1,5 +1,5 @@
|
||||
import re
|
||||
from typing import Any, List, Optional, Sequence, Tuple
|
||||
from typing import Any, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.agents import AgentAction
|
||||
@ -155,6 +155,8 @@ def create_structured_chat_agent(
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: ChatPromptTemplate,
|
||||
tools_renderer: ToolsRenderer = render_text_description_and_args,
|
||||
*,
|
||||
stop_sequence: Union[bool, List[str]] = True,
|
||||
) -> Runnable:
|
||||
"""Create an agent aimed at supporting tools with multiple inputs.
|
||||
|
||||
@ -162,6 +164,13 @@ def create_structured_chat_agent(
|
||||
llm: LLM to use as the agent.
|
||||
tools: Tools this agent has access to.
|
||||
prompt: The prompt to use. See Prompt section below for more.
|
||||
stop_sequence: bool or list of str.
|
||||
If True, adds a stop token of "Observation:" to avoid hallucinates.
|
||||
If False, does not add a stop token.
|
||||
If a list of str, uses the provided list as the stop tokens.
|
||||
|
||||
Default is True. You may to set this to False if the LLM you are using
|
||||
does not support stop sequences.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
then passed into the LLM. Default is `render_text_description`.
|
||||
|
||||
@ -273,7 +282,11 @@ def create_structured_chat_agent(
|
||||
tools=tools_renderer(list(tools)),
|
||||
tool_names=", ".join([t.name for t in tools]),
|
||||
)
|
||||
llm_with_stop = llm.bind(stop=["Observation"])
|
||||
if stop_sequence:
|
||||
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
|
||||
llm_with_stop = llm.bind(stop=stop)
|
||||
else:
|
||||
llm_with_stop = llm
|
||||
|
||||
agent = (
|
||||
RunnablePassthrough.assign(
|
||||
|
@ -112,6 +112,8 @@ def create_xml_agent(
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: BasePromptTemplate,
|
||||
tools_renderer: ToolsRenderer = render_text_description,
|
||||
*,
|
||||
stop_sequence: Union[bool, List[str]] = True,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses XML to format its logic.
|
||||
|
||||
@ -123,6 +125,13 @@ def create_xml_agent(
|
||||
`agent_scratchpad`: contains previous agent actions and tool outputs.
|
||||
tools_renderer: This controls how the tools are converted into a string and
|
||||
then passed into the LLM. Default is `render_text_description`.
|
||||
stop_sequence: bool or list of str.
|
||||
If True, adds a stop token of "</tool_input>" to avoid hallucinates.
|
||||
If False, does not add a stop token.
|
||||
If a list of str, uses the provided list as the stop tokens.
|
||||
|
||||
Default is True. You may to set this to False if the LLM you are using
|
||||
does not support stop sequences.
|
||||
|
||||
Returns:
|
||||
A Runnable sequence representing an agent. It takes as input all the same input
|
||||
@ -201,7 +210,12 @@ def create_xml_agent(
|
||||
prompt = prompt.partial(
|
||||
tools=tools_renderer(list(tools)),
|
||||
)
|
||||
llm_with_stop = llm.bind(stop=["</tool_input>"])
|
||||
|
||||
if stop_sequence:
|
||||
stop = ["</tool_input>"] if stop_sequence is True else stop_sequence
|
||||
llm_with_stop = llm.bind(stop=stop)
|
||||
else:
|
||||
llm_with_stop = llm
|
||||
|
||||
agent = (
|
||||
RunnablePassthrough.assign(
|
||||
|
Loading…
Reference in New Issue
Block a user