langchain: docstrings agents nested (#23598)

Added missed docstrings. Formatted docstrings to the consistent form.

---------

Co-authored-by: ccurme <chester.curme@gmail.com>
This commit is contained in:
Leonid Ganeline 2024-06-27 12:49:41 -07:00 committed by GitHub
parent 70834cd741
commit b64c4b4750
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 394 additions and 62 deletions

View File

@ -13,6 +13,7 @@ whether permissions of the given toolkit are appropriate for the application.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
from pathlib import Path
from typing import TYPE_CHECKING, Any

View File

@ -80,6 +80,23 @@ class ChatAgent(Agent):
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
) -> BasePromptTemplate:
"""Create a prompt from a list of tools.
Args:
tools: A list of tools.
system_message_prefix: The system message prefix.
Default is SYSTEM_MESSAGE_PREFIX.
system_message_suffix: The system message suffix.
Default is SYSTEM_MESSAGE_SUFFIX.
human_message: The human message. Default is HUMAN_MESSAGE.
format_instructions: The format instructions.
Default is FORMAT_INSTRUCTIONS.
input_variables: The input variables. Default is None.
Returns:
A prompt template.
"""
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
@ -113,7 +130,26 @@ class ChatAgent(Agent):
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The language model.
tools: A list of tools.
callback_manager: The callback manager. Default is None.
output_parser: The output parser. Default is None.
system_message_prefix: The system message prefix.
Default is SYSTEM_MESSAGE_PREFIX.
system_message_suffix: The system message suffix.
Default is SYSTEM_MESSAGE_SUFFIX.
human_message: The human message. Default is HUMAN_MESSAGE.
format_instructions: The format instructions.
Default is FORMAT_INSTRUCTIONS.
input_variables: The input variables. Default is None.
kwargs: Additional keyword arguments.
Returns:
An agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,

View File

@ -25,6 +25,20 @@ class ChatOutputParser(AgentOutputParser):
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)

View File

@ -1,4 +1,5 @@
"""An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence
@ -40,12 +41,20 @@ class ConversationalAgent(Agent):
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"
@classmethod
@ -64,11 +73,15 @@ class ConversationalAgent(Agent):
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
ai_prefix: String to use before AI output.
prefix: String to put before the list of tools. Defaults to PREFIX.
suffix: String to put after the list of tools. Defaults to SUFFIX.
format_instructions: Instructions on how to use the tools. Defaults to
FORMAT_INSTRUCTIONS
ai_prefix: String to use before AI output. Defaults to "AI".
human_prefix: String to use before human output.
Defaults to "Human".
input_variables: List of input variables the final prompt will expect.
Defaults to ["input", "chat_history", "agent_scratchpad"].
Returns:
A PromptTemplate with the template assembled from the pieces here.
@ -105,7 +118,26 @@ class ConversationalAgent(Agent):
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use. Default is None.
output_parser: The output parser to use. Default is None.
prefix: The prefix to use in the prompt. Default is PREFIX.
suffix: The suffix to use in the prompt. Default is SUFFIX.
format_instructions: The format instructions to use.
Default is FORMAT_INSTRUCTIONS.
ai_prefix: The prefix to use before AI output. Default is "AI".
human_prefix: The prefix to use before human output.
Default is "Human".
input_variables: The input variables to use. Default is None.
**kwargs: Any additional keyword arguments to pass to the agent.
Returns:
An agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,

View File

@ -22,6 +22,16 @@ class ConvoOutputParser(AgentOutputParser):
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text

View File

@ -1,4 +1,5 @@
"""An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Tuple
@ -35,7 +36,9 @@ class ConversationalChatAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""
output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
"""Output parser for the agent."""
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
"""Template for the tool response."""
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
@ -47,12 +50,20 @@ class ConversationalChatAgent(Agent):
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"
@classmethod
@ -69,6 +80,20 @@ class ConversationalChatAgent(Agent):
input_variables: Optional[List[str]] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> BasePromptTemplate:
"""Create a prompt for the agent.
Args:
tools: The tools to use.
system_message: The system message to use.
Defaults to the PREFIX.
human_message: The human message to use.
Defaults to the SUFFIX.
input_variables: The input variables to use. Defaults to None.
output_parser: The output parser to use. Defaults to None.
Returns:
A PromptTemplate.
"""
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools]
)
@ -115,7 +140,21 @@ class ConversationalChatAgent(Agent):
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use. Default is None.
output_parser: The output parser to use. Default is None.
system_message: The system message to use. Default is PREFIX.
human_message: The human message to use. Default is SUFFIX.
input_variables: The input variables to use. Default is None.
**kwargs: Any additional arguments.
Returns:
An agent.
"""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(

View File

@ -5,6 +5,7 @@ that result from previous iterations of the agent.
Depending on the prompting strategy you are using, you may want to format these
differently before passing them into the LLM.
"""
from langchain.agents.format_scratchpad.log import format_log_to_str
from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages
from langchain.agents.format_scratchpad.openai_functions import (

View File

@ -8,7 +8,18 @@ def format_log_to_str(
observation_prefix: str = "Observation: ",
llm_prefix: str = "Thought: ",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process."""
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
observation_prefix: Prefix to append the observation with.
Defaults to "Observation: ".
llm_prefix: Prefix to append the llm call with.
Defaults to "Thought: ".
Returns:
str: The scratchpad.
"""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log

View File

@ -8,7 +8,16 @@ def format_log_to_messages(
intermediate_steps: List[Tuple[AgentAction, str]],
template_tool_response: str = "{observation}",
) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
template_tool_response: Template to format the observation with.
Defaults to "{observation}".
Returns:
List[BaseMessage]: The scratchpad.
"""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))

View File

@ -16,7 +16,8 @@ def _convert_agent_action_to_messages(
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
AIMessage or the previous messages plus a FunctionMessage that corresponds to
the original tool invocation
"""
if isinstance(agent_action, AgentActionMessageLog):
return list(agent_action.message_log) + [
@ -31,10 +32,13 @@ def _create_function_message(
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
FunctionMessage that corresponds to the original tool invocation
FunctionMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
@ -59,7 +63,8 @@ def format_to_openai_function_messages(
Returns:
list of messages to send to the LLM for the next prediction
Raises:
ValueError: if the observation cannot be converted to a string.
"""
messages = []

View File

@ -15,11 +15,15 @@ def _create_tool_message(
agent_action: ToolAgentAction, observation: str
) -> ToolMessage:
"""Convert agent action and observation into a tool message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
ToolMessage that corresponds to the original tool invocation
ToolMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
@ -41,10 +45,10 @@ def format_to_tool_messages(
"""Convert (AgentAction, tool output) tuples into ToolMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
intermediate_steps: Steps the LLM has taken to date, along with observations.
Returns:
list of messages to send to the LLM for the next prediction
list of messages to send to the LLM for the next prediction.
"""
messages = []

View File

@ -36,11 +36,17 @@ def create_json_chat_agent(
then passed into the LLM. Default is `render_text_description`.
template_tool_response: Template prompt that uses the tool response (observation)
to make the LLM generate the next action to take.
Default is TEMPLATE_TOOL_RESPONSE.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
ValueError: If the template_tool_response is missing
the required variable 'observation'.
Example:

View File

@ -1,4 +1,5 @@
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
@ -20,9 +21,9 @@ from langchain.tools.render import render_text_description
class ChainConfig(NamedTuple):
"""Configuration for chain to use in MRKL system.
"""Configuration for a chain to use in MRKL system.
Args:
Parameters:
action_name: Name of the action.
action: Action function to call.
action_description: Description of the action.
@ -35,7 +36,11 @@ class ChainConfig(NamedTuple):
@deprecated("0.1.0", alternative="create_react_agent", removal="0.3.0")
class ZeroShotAgent(Agent):
"""Agent for the MRKL chain."""
"""Agent for the MRKL chain.
Parameters:
output_parser: Output parser for the agent.
"""
output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
@ -50,12 +55,20 @@ class ZeroShotAgent(Agent):
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"
@classmethod
@ -72,9 +85,12 @@ class ZeroShotAgent(Agent):
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
prefix: String to put before the list of tools. Defaults to PREFIX.
suffix: String to put after the list of tools. Defaults to SUFFIX.
format_instructions: Instructions on how to use the tools.
Defaults to FORMAT_INSTRUCTIONS
input_variables: List of input variables the final prompt will expect.
Defaults to None.
Returns:
A PromptTemplate with the template assembled from the pieces here.
@ -100,7 +116,20 @@ class ZeroShotAgent(Agent):
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The LLM to use as the agent LLM.
tools: The tools to use.
callback_manager: The callback manager to use. Defaults to None.
output_parser: The output parser to use. Defaults to None.
prefix: The prefix to use. Defaults to PREFIX.
suffix: The suffix to use. Defaults to SUFFIX.
format_instructions: The format instructions to use.
Defaults to FORMAT_INSTRUCTIONS.
input_variables: The input variables to use. Defaults to None.
**kwargs: Additional parameters to pass to the agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
@ -141,13 +170,13 @@ class ZeroShotAgent(Agent):
@deprecated("0.1.0", removal="0.3.0")
class MRKLChain(AgentExecutor):
"""[Deprecated] Chain that implements the MRKL system."""
"""Chain that implements the MRKL system."""
@classmethod
def from_chains(
cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any
) -> AgentExecutor:
"""User friendly way to initialize the MRKL chain.
"""User-friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.

View File

@ -30,6 +30,18 @@ class MRKLOutputParser(AgentOutputParser):
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
"""
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"

View File

@ -33,18 +33,34 @@ if TYPE_CHECKING:
class OpenAIAssistantFinish(AgentFinish):
"""AgentFinish with run and thread metadata."""
"""AgentFinish with run and thread metadata.
Parameters:
run_id: Run id.
thread_id: Thread id.
"""
run_id: str
thread_id: str
@classmethod
def is_lc_serializable(cls) -> bool:
"""Check if the class is serializable by LangChain.
Returns:
False
"""
return False
class OpenAIAssistantAction(AgentAction):
"""AgentAction with info needed to submit custom tool output to existing run."""
"""AgentAction with info needed to submit custom tool output to existing run.
Parameters:
tool_call_id: Tool call id.
run_id: Run id.
thread_id: Thread id
"""
tool_call_id: str
run_id: str
@ -52,6 +68,11 @@ class OpenAIAssistantAction(AgentAction):
@classmethod
def is_lc_serializable(cls) -> bool:
"""Check if the class is serializable by LangChain.
Returns:
False
"""
return False
@ -238,7 +259,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
model: Assistant model to use.
client: OpenAI or AzureOpenAI client.
Will create default OpenAI client if not specified.
Will create a default OpenAI client if not specified.
**kwargs: Additional arguments.
Returns:
OpenAIAssistantRunnable configured to run using the created assistant.
@ -272,12 +294,12 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
model: Override Assistant model for this run.
tools: Override Assistant tools for this run.
run_metadata: Metadata to associate with new run.
config: Runnable config:
config: Runnable config. Defaults to None.
Return:
If self.as_agent, will return
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise,
will return OpenAI types
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish].
Otherwise, will return OpenAI types
Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
"""
@ -351,7 +373,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
] = None,
**kwargs: Any,
) -> OpenAIAssistantRunnable:
"""Create an AsyncOpenAI Assistant and instantiate the Runnable.
"""Async create an AsyncOpenAI Assistant and instantiate the Runnable.
Args:
name: Assistant name.
@ -359,7 +381,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
model: Assistant model to use.
async_client: AsyncOpenAI client.
Will create default async_client if not specified.
Will create default async_client if not specified.
Returns:
AsyncOpenAIAssistantRunnable configured to run using the created assistant.
@ -387,19 +409,20 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
run_id: Existing run to use. Should only be supplied when providing
the tool output for a required action after an initial invocation.
file_ids: File ids to include in new run. Used for retrieval.
message_metadata: Metadata to associate with new message.
message_metadata: Metadata to associate with a new message.
thread_metadata: Metadata to associate with new thread. Only relevant
when new thread being created.
when a new thread is created.
instructions: Additional run instructions.
model: Override Assistant model for this run.
tools: Override Assistant tools for this run.
run_metadata: Metadata to associate with new run.
config: Runnable config:
config: Runnable config. Defaults to None.
**kwargs: Additional arguments.
Return:
If self.as_agent, will return
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise,
will return OpenAI types
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish].
Otherwise, will return OpenAI types
Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
"""

View File

@ -1,4 +1,5 @@
"""Memory used to save agent output AND intermediate steps."""
from typing import Any, Dict, List
from langchain_core.language_models import BaseLanguageModel
@ -12,7 +13,22 @@ from langchain.memory.chat_memory import BaseChatMemory
class AgentTokenBufferMemory(BaseChatMemory):
"""Memory used to save agent output AND intermediate steps."""
"""Memory used to save agent output AND intermediate steps.
Parameters:
human_prefix: Prefix for human messages. Default is "Human".
ai_prefix: Prefix for AI messages. Default is "AI".
llm: Language model.
memory_key: Key to save memory under. Default is "history".
max_token_limit: Maximum number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest
messages will be pruned. Default is 12000.
return_messages: Whether to return messages. Default is True.
output_key: Key to save output under. Default is "output".
intermediate_steps_key: Key to save intermediate steps under.
Default is "intermediate_steps".
format_as_tools: Whether to format as tools. Default is False.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
@ -33,14 +49,21 @@ class AgentTokenBufferMemory(BaseChatMemory):
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
"""Always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
"""Return history buffer.
Args:
inputs: Inputs to the agent.
Returns:
A dictionary with the history buffer.
"""
if self.return_messages:
final_buffer: Any = self.buffer
else:
@ -52,7 +75,12 @@ class AgentTokenBufferMemory(BaseChatMemory):
return {self.memory_key: final_buffer}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
"""Save context from this conversation to buffer. Pruned."""
"""Save context from this conversation to buffer. Pruned.
Args:
inputs: Inputs to the agent.
outputs: Outputs from the agent.
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
format_to_messages = (

View File

@ -1,4 +1,5 @@
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
from typing import Any, List, Optional, Sequence, Tuple, Type, Union
from langchain_core._api import deprecated
@ -41,6 +42,9 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
output_parser: The output parser for this agent. Should be an instance of
OpenAIFunctionsAgentOutputParser.
Defaults to OpenAIFunctionsAgentOutputParser.
"""
llm: BaseLanguageModel
@ -56,6 +60,17 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
@root_validator(pre=False, skip_on_failure=True)
def validate_prompt(cls, values: dict) -> dict:
"""Validate prompt.
Args:
values: Values to validate.
Returns:
Validated values.
Raises:
ValueError: If `agent_scratchpad` is not in the prompt.
"""
prompt: BasePromptTemplate = values["prompt"]
if "agent_scratchpad" not in prompt.input_variables:
raise ValueError(
@ -71,6 +86,8 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
@property
def functions(self) -> List[dict]:
"""Get functions."""
return [dict(convert_to_openai_function(t)) for t in self.tools]
def plan(
@ -83,11 +100,16 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to use. Defaults to None.
with_functions: Whether to use functions. Defaults to True.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
If the agent is finished, returns an AgentFinish.
If the agent is not finished, returns an AgentAction.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
@ -116,15 +138,18 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
along with observations.
callbacks: Callbacks to use. Defaults to None.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
If the agent is finished, returns an AgentFinish.
If the agent is not finished, returns an AgentAction.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
@ -145,7 +170,20 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
intermediate_steps: List[Tuple[AgentAction, str]],
**kwargs: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations."""
"""Return response when agent has been stopped due to max iterations.
Args:
early_stopping_method: The early stopping method to use.
intermediate_steps: Intermediate steps.
**kwargs: User inputs.
Returns:
AgentFinish.
Raises:
ValueError: If `early_stopping_method` is not `force` or `generate`.
ValueError: If `agent_decision` is not an AgentAction.
"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish(
@ -215,7 +253,17 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
),
**kwargs: Any,
) -> BaseSingleActionAgent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The LLM to use as the agent.
tools: The tools to use.
callback_manager: The callback manager to use. Defaults to None.
extra_prompt_messages: Extra prompt messages to use. Defaults to None.
system_message: The system message to use.
Defaults to a default system message.
**kwargs: Additional parameters to pass to the agent.
"""
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,
@ -243,8 +291,11 @@ def create_openai_functions_agent(
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If `agent_scratchpad` is not in the prompt.
Example:

View File

@ -1,4 +1,5 @@
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
import json
from json import JSONDecodeError
from typing import Any, List, Optional, Sequence, Tuple, Union
@ -95,7 +96,7 @@ def _parse_ai_message(message: BaseMessage) -> Union[List[AgentAction], AgentFin
@deprecated("0.1.0", alternative="create_openai_tools_agent", removal="0.3.0")
class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
"""An Agent driven by OpenAIs function powered API.
"""Agent driven by OpenAIs function powered API.
Args:
llm: This should be an instance of ChatOpenAI, specifically a model
@ -131,6 +132,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
@property
def functions(self) -> List[dict]:
"""Get the functions for the agent."""
enum_vals = [t.name for t in self.tools]
tool_selection = {
# OpenAI functions returns a single tool invocation
@ -198,7 +200,9 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to use. Default is None.
**kwargs: User inputs.
Returns:
@ -223,11 +227,12 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[List[AgentAction], AgentFinish]:
"""Given input, decided what to do.
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
along with observations.
callbacks: Callbacks to use. Default is None.
**kwargs: User inputs.
Returns:
@ -260,7 +265,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
system_message: Message to use as the system message that will be the
first in the prompt.
extra_prompt_messages: Prompt messages that will be placed between the
system message and the new human input.
system message and the new human input. Default is None.
Returns:
A prompt template to pass into this agent.
@ -293,7 +298,17 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
),
**kwargs: Any,
) -> BaseMultiActionAgent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use. Default is None.
extra_prompt_messages: Extra prompt messages to use. Default is None.
system_message: The system message to use.
Default is a default system message.
**kwargs: Additional arguments.
"""
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,

View File

@ -28,6 +28,9 @@ def create_openai_tools_agent(
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
.. code-block:: python

View File

@ -9,6 +9,7 @@ This contains a `return_values` dictionary. This usually contains a
single `output` key, but can be extended to contain more.
This also contains a `log` variable (which contains a log of the agent's thinking).
"""
from langchain.agents.output_parsers.json import JSONAgentOutputParser
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,

View File

@ -1,4 +1,5 @@
"""Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, Sequence

View File

@ -1,4 +1,5 @@
"""Chain that does self-ask with search."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence, Union