mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-11 16:01:33 +00:00
chore(langchain): add ruff rule E501 in langchain_v1
(#32812)
Co-authored-by: Mason Daugherty <mason@langchain.dev>
This commit is contained in:
committed by
GitHub
parent
1e101ae9a2
commit
017348b27c
@@ -74,15 +74,20 @@ class HumanInterrupt(TypedDict):
|
|||||||
|
|
||||||
|
|
||||||
class HumanResponse(TypedDict):
|
class HumanResponse(TypedDict):
|
||||||
"""The response provided by a human to an interrupt, which is returned when graph execution resumes.
|
"""Human response.
|
||||||
|
|
||||||
|
The response provided by a human to an interrupt,
|
||||||
|
which is returned when graph execution resumes.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
type: The type of response:
|
type: The type of response:
|
||||||
|
|
||||||
- "accept": Approves the current state without changes
|
- "accept": Approves the current state without changes
|
||||||
- "ignore": Skips/ignores the current step
|
- "ignore": Skips/ignores the current step
|
||||||
- "response": Provides text feedback or instructions
|
- "response": Provides text feedback or instructions
|
||||||
- "edit": Modifies the current state/content
|
- "edit": Modifies the current state/content
|
||||||
args: The response payload:
|
args: The response payload:
|
||||||
|
|
||||||
- None: For ignore/accept actions
|
- None: For ignore/accept actions
|
||||||
- str: For text responses
|
- str: For text responses
|
||||||
- ActionRequest: For edit actions with updated content
|
- ActionRequest: For edit actions with updated content
|
||||||
|
@@ -65,7 +65,10 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|||||||
# Right now, we do not support multiple tool calls with interrupts
|
# Right now, we do not support multiple tool calls with interrupts
|
||||||
if len(interrupt_tool_calls) > 1:
|
if len(interrupt_tool_calls) > 1:
|
||||||
tool_names = [t["name"] for t in interrupt_tool_calls]
|
tool_names = [t["name"] for t in interrupt_tool_calls]
|
||||||
msg = f"Called the following tools which require interrupts: {tool_names}\n\nYou may only call ONE tool that requires an interrupt at a time"
|
msg = (
|
||||||
|
f"Called the following tools which require interrupts: {tool_names}\n\n"
|
||||||
|
"You may only call ONE tool that requires an interrupt at a time"
|
||||||
|
)
|
||||||
return {
|
return {
|
||||||
"messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
|
"messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
|
||||||
"jump_to": "model",
|
"jump_to": "model",
|
||||||
@@ -74,7 +77,11 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|||||||
# Right now, we do not support interrupting a tool call if other tool calls exist
|
# Right now, we do not support interrupting a tool call if other tool calls exist
|
||||||
if auto_approved_tool_calls:
|
if auto_approved_tool_calls:
|
||||||
tool_names = [t["name"] for t in interrupt_tool_calls]
|
tool_names = [t["name"] for t in interrupt_tool_calls]
|
||||||
msg = f"Called the following tools which require interrupts: {tool_names}. You also called other tools that do not require interrupts. If you call a tool that requires and interrupt, you may ONLY call that tool."
|
msg = (
|
||||||
|
f"Called the following tools which require interrupts: {tool_names}. "
|
||||||
|
"You also called other tools that do not require interrupts. "
|
||||||
|
"If you call a tool that requires and interrupt, you may ONLY call that tool."
|
||||||
|
)
|
||||||
return {
|
return {
|
||||||
"messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
|
"messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
|
||||||
"jump_to": "model",
|
"jump_to": "model",
|
||||||
|
@@ -6,9 +6,12 @@ from langchain.agents.middleware.types import AgentMiddleware, AgentState, Model
|
|||||||
|
|
||||||
|
|
||||||
class AnthropicPromptCachingMiddleware(AgentMiddleware):
|
class AnthropicPromptCachingMiddleware(AgentMiddleware):
|
||||||
"""Prompt Caching Middleware - Optimizes API usage by caching conversation prefixes for Anthropic models.
|
"""Prompt Caching Middleware.
|
||||||
|
|
||||||
Learn more about anthropic prompt caching [here](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching).
|
Optimizes API usage by caching conversation prefixes for Anthropic models.
|
||||||
|
|
||||||
|
Learn more about Anthropic prompt caching
|
||||||
|
`here <https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching>`__.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -22,7 +25,8 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
|
|||||||
Args:
|
Args:
|
||||||
type: The type of cache to use, only "ephemeral" is supported.
|
type: The type of cache to use, only "ephemeral" is supported.
|
||||||
ttl: The time to live for the cache, only "5m" and "1h" are supported.
|
ttl: The time to live for the cache, only "5m" and "1h" are supported.
|
||||||
min_messages_to_cache: The minimum number of messages until the cache is used, default is 0.
|
min_messages_to_cache: The minimum number of messages until the cache is used,
|
||||||
|
default is 0.
|
||||||
"""
|
"""
|
||||||
self.type = type
|
self.type = type
|
||||||
self.ttl = ttl
|
self.ttl = ttl
|
||||||
@@ -34,15 +38,16 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
|
|||||||
from langchain_anthropic import ChatAnthropic
|
from langchain_anthropic import ChatAnthropic
|
||||||
except ImportError:
|
except ImportError:
|
||||||
msg = (
|
msg = (
|
||||||
"AnthropicPromptCachingMiddleware caching middleware only supports Anthropic models."
|
"AnthropicPromptCachingMiddleware caching middleware only supports "
|
||||||
|
"Anthropic models."
|
||||||
"Please install langchain-anthropic."
|
"Please install langchain-anthropic."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
|
|
||||||
if not isinstance(request.model, ChatAnthropic):
|
if not isinstance(request.model, ChatAnthropic):
|
||||||
msg = (
|
msg = (
|
||||||
"AnthropicPromptCachingMiddleware caching middleware only supports Anthropic models, "
|
"AnthropicPromptCachingMiddleware caching middleware only supports "
|
||||||
f"not instances of {type(request.model)}"
|
f"Anthropic models, not instances of {type(request.model)}"
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
@@ -48,7 +48,7 @@ Respond ONLY with the extracted context. Do not include any additional informati
|
|||||||
<messages>
|
<messages>
|
||||||
Messages to summarize:
|
Messages to summarize:
|
||||||
{messages}
|
{messages}
|
||||||
</messages>"""
|
</messages>""" # noqa: E501
|
||||||
|
|
||||||
SUMMARY_PREFIX = "## Previous conversation summary:"
|
SUMMARY_PREFIX = "## Previous conversation summary:"
|
||||||
|
|
||||||
|
@@ -58,7 +58,8 @@ StateT = TypeVar("StateT", bound=AgentState)
|
|||||||
class AgentMiddleware(Generic[StateT]):
|
class AgentMiddleware(Generic[StateT]):
|
||||||
"""Base middleware class for an agent.
|
"""Base middleware class for an agent.
|
||||||
|
|
||||||
Subclass this and implement any of the defined methods to customize agent behavior between steps in the main agent loop.
|
Subclass this and implement any of the defined methods to customize agent behavior
|
||||||
|
between steps in the main agent loop.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
state_schema: type[StateT] = cast("type[StateT]", AgentState)
|
state_schema: type[StateT] = cast("type[StateT]", AgentState)
|
||||||
|
@@ -176,8 +176,9 @@ def _validate_chat_history(
|
|||||||
error_message = create_error_message(
|
error_message = create_error_message(
|
||||||
message="Found AIMessages with tool_calls that do not have a corresponding ToolMessage. "
|
message="Found AIMessages with tool_calls that do not have a corresponding ToolMessage. "
|
||||||
f"Here are the first few of those tool calls: {tool_calls_without_results[:3]}.\n\n"
|
f"Here are the first few of those tool calls: {tool_calls_without_results[:3]}.\n\n"
|
||||||
"Every tool call (LLM requesting to call a tool) in the message history MUST have a corresponding ToolMessage "
|
"Every tool call (LLM requesting to call a tool) in the message history "
|
||||||
"(result of a tool invocation to return to the LLM) - this is required by most LLM providers.",
|
"MUST have a corresponding ToolMessage (result of a tool invocation to return to the LLM) -"
|
||||||
|
" this is required by most LLM providers.",
|
||||||
error_code=ErrorCode.INVALID_CHAT_HISTORY,
|
error_code=ErrorCode.INVALID_CHAT_HISTORY,
|
||||||
)
|
)
|
||||||
raise ValueError(error_message)
|
raise ValueError(error_message)
|
||||||
@@ -216,7 +217,8 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|||||||
if isinstance(model, Runnable) and not isinstance(model, BaseChatModel):
|
if isinstance(model, Runnable) and not isinstance(model, BaseChatModel):
|
||||||
msg = (
|
msg = (
|
||||||
"Expected `model` to be a BaseChatModel or a string, got {type(model)}."
|
"Expected `model` to be a BaseChatModel or a string, got {type(model)}."
|
||||||
"The `model` parameter should not have pre-bound tools, simply pass the model and tools separately."
|
"The `model` parameter should not have pre-bound tools, "
|
||||||
|
"simply pass the model and tools separately."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
|
|
||||||
@@ -308,7 +310,8 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|||||||
Command with structured response update if found, None otherwise
|
Command with structured response update if found, None otherwise
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
MultipleStructuredOutputsError: If multiple structured responses are returned and error handling is disabled
|
MultipleStructuredOutputsError: If multiple structured responses are returned
|
||||||
|
and error handling is disabled
|
||||||
StructuredOutputParsingError: If parsing fails and error handling is disabled
|
StructuredOutputParsingError: If parsing fails and error handling is disabled
|
||||||
"""
|
"""
|
||||||
if not isinstance(self.response_format, ToolStrategy) or not response.tool_calls:
|
if not isinstance(self.response_format, ToolStrategy) or not response.tool_calls:
|
||||||
@@ -452,7 +455,11 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|||||||
return model.bind(**kwargs)
|
return model.bind(**kwargs)
|
||||||
|
|
||||||
def _handle_structured_response_native(self, response: AIMessage) -> Command | None:
|
def _handle_structured_response_native(self, response: AIMessage) -> Command | None:
|
||||||
"""If native output is configured and there are no tool calls, parse using ProviderStrategyBinding."""
|
"""Handle structured output using the native output.
|
||||||
|
|
||||||
|
If native output is configured and there are no tool calls,
|
||||||
|
parse using ProviderStrategyBinding.
|
||||||
|
"""
|
||||||
if self.native_output_binding is None:
|
if self.native_output_binding is None:
|
||||||
return None
|
return None
|
||||||
if response.tool_calls:
|
if response.tool_calls:
|
||||||
@@ -922,7 +929,8 @@ def create_agent( # noqa: D417
|
|||||||
) -> CompiledStateGraph[StateT, ContextT]:
|
) -> CompiledStateGraph[StateT, ContextT]:
|
||||||
"""Creates an agent graph that calls tools in a loop until a stopping condition is met.
|
"""Creates an agent graph that calls tools in a loop until a stopping condition is met.
|
||||||
|
|
||||||
For more details on using `create_agent`, visit [Agents](https://langchain-ai.github.io/langgraph/agents/overview/) documentation.
|
For more details on using `create_agent`,
|
||||||
|
visit [Agents](https://langchain-ai.github.io/langgraph/agents/overview/) documentation.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
model: The language model for the agent. Supports static and dynamic
|
model: The language model for the agent. Supports static and dynamic
|
||||||
@@ -969,25 +977,35 @@ def create_agent( # noqa: D417
|
|||||||
must be a subset of those specified in the `tools` parameter.
|
must be a subset of those specified in the `tools` parameter.
|
||||||
|
|
||||||
tools: A list of tools or a ToolNode instance.
|
tools: A list of tools or a ToolNode instance.
|
||||||
If an empty list is provided, the agent will consist of a single LLM node without tool calling.
|
If an empty list is provided, the agent will consist of a single LLM node
|
||||||
|
without tool calling.
|
||||||
prompt: An optional prompt for the LLM. Can take a few different forms:
|
prompt: An optional prompt for the LLM. Can take a few different forms:
|
||||||
|
|
||||||
- str: This is converted to a SystemMessage and added to the beginning of the list of messages in state["messages"].
|
- str: This is converted to a SystemMessage and added to the beginning
|
||||||
- SystemMessage: this is added to the beginning of the list of messages in state["messages"].
|
of the list of messages in state["messages"].
|
||||||
- Callable: This function should take in full graph state and the output is then passed to the language model.
|
- SystemMessage: this is added to the beginning of the list of messages
|
||||||
- Runnable: This runnable should take in full graph state and the output is then passed to the language model.
|
in state["messages"].
|
||||||
|
- Callable: This function should take in full graph state and the output is then passed
|
||||||
|
to the language model.
|
||||||
|
- Runnable: This runnable should take in full graph state and the output is then passed
|
||||||
|
to the language model.
|
||||||
|
|
||||||
response_format: An optional UsingToolStrategy configuration for structured responses.
|
response_format: An optional UsingToolStrategy configuration for structured responses.
|
||||||
|
|
||||||
If provided, the agent will handle structured output via tool calls during the normal conversation flow.
|
If provided, the agent will handle structured output via tool calls
|
||||||
When the model calls a structured output tool, the response will be captured and returned in the 'structured_response' state key.
|
during the normal conversation flow.
|
||||||
|
When the model calls a structured output tool, the response will be captured
|
||||||
|
and returned in the 'structured_response' state key.
|
||||||
If not provided, `structured_response` will not be present in the output state.
|
If not provided, `structured_response` will not be present in the output state.
|
||||||
|
|
||||||
The UsingToolStrategy should contain:
|
The UsingToolStrategy should contain:
|
||||||
- schemas: A sequence of ResponseSchema objects that define the structured output format
|
|
||||||
|
- schemas: A sequence of ResponseSchema objects that define
|
||||||
|
the structured output format
|
||||||
- tool_choice: Either "required" or "auto" to control when structured output is used
|
- tool_choice: Either "required" or "auto" to control when structured output is used
|
||||||
|
|
||||||
Each ResponseSchema contains:
|
Each ResponseSchema contains:
|
||||||
|
|
||||||
- schema: A Pydantic model that defines the structure
|
- schema: A Pydantic model that defines the structure
|
||||||
- name: Optional custom name for the tool (defaults to model name)
|
- name: Optional custom name for the tool (defaults to model name)
|
||||||
- description: Optional custom description (defaults to model docstring)
|
- description: Optional custom description (defaults to model docstring)
|
||||||
@@ -997,11 +1015,15 @@ def create_agent( # noqa: D417
|
|||||||
`response_format` requires the model to support tool calling
|
`response_format` requires the model to support tool calling
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Structured responses are handled directly in the model call node via tool calls, eliminating the need for separate structured response nodes.
|
Structured responses are handled directly in the model call node via tool calls,
|
||||||
|
eliminating the need for separate structured response nodes.
|
||||||
|
|
||||||
pre_model_hook: An optional node to add before the `agent` node (i.e., the node that calls the LLM).
|
pre_model_hook: An optional node to add before the `agent` node
|
||||||
Useful for managing long message histories (e.g., message trimming, summarization, etc.).
|
(i.e., the node that calls the LLM).
|
||||||
Pre-model hook must be a callable or a runnable that takes in current graph state and returns a state update in the form of
|
Useful for managing long message histories
|
||||||
|
(e.g., message trimming, summarization, etc.).
|
||||||
|
Pre-model hook must be a callable or a runnable that takes in current
|
||||||
|
graph state and returns a state update in the form of
|
||||||
```python
|
```python
|
||||||
# At least one of `messages` or `llm_input_messages` MUST be provided
|
# At least one of `messages` or `llm_input_messages` MUST be provided
|
||||||
{
|
{
|
||||||
@@ -1016,11 +1038,13 @@ def create_agent( # noqa: D417
|
|||||||
```
|
```
|
||||||
|
|
||||||
!!! Important
|
!!! Important
|
||||||
At least one of `messages` or `llm_input_messages` MUST be provided and will be used as an input to the `agent` node.
|
At least one of `messages` or `llm_input_messages` MUST be provided
|
||||||
|
and will be used as an input to the `agent` node.
|
||||||
The rest of the keys will be added to the graph state.
|
The rest of the keys will be added to the graph state.
|
||||||
|
|
||||||
!!! Warning
|
!!! Warning
|
||||||
If you are returning `messages` in the pre-model hook, you should OVERWRITE the `messages` key by doing the following:
|
If you are returning `messages` in the pre-model hook,
|
||||||
|
you should OVERWRITE the `messages` key by doing the following:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{
|
{
|
||||||
@@ -1028,9 +1052,12 @@ def create_agent( # noqa: D417
|
|||||||
...
|
...
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
post_model_hook: An optional node to add after the `agent` node (i.e., the node that calls the LLM).
|
post_model_hook: An optional node to add after the `agent` node
|
||||||
Useful for implementing human-in-the-loop, guardrails, validation, or other post-processing.
|
(i.e., the node that calls the LLM).
|
||||||
Post-model hook must be a callable or a runnable that takes in current graph state and returns a state update.
|
Useful for implementing human-in-the-loop, guardrails, validation,
|
||||||
|
or other post-processing.
|
||||||
|
Post-model hook must be a callable or a runnable that takes in
|
||||||
|
current graph state and returns a state update.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Only available with `version="v2"`.
|
Only available with `version="v2"`.
|
||||||
@@ -1039,12 +1066,14 @@ def create_agent( # noqa: D417
|
|||||||
Defaults to `AgentState` that defines those two keys.
|
Defaults to `AgentState` that defines those two keys.
|
||||||
context_schema: An optional schema for runtime context.
|
context_schema: An optional schema for runtime context.
|
||||||
checkpointer: An optional checkpoint saver object. This is used for persisting
|
checkpointer: An optional checkpoint saver object. This is used for persisting
|
||||||
the state of the graph (e.g., as chat memory) for a single thread (e.g., a single conversation).
|
the state of the graph (e.g., as chat memory) for a single thread
|
||||||
|
(e.g., a single conversation).
|
||||||
store: An optional store object. This is used for persisting data
|
store: An optional store object. This is used for persisting data
|
||||||
across multiple threads (e.g., multiple conversations / users).
|
across multiple threads (e.g., multiple conversations / users).
|
||||||
interrupt_before: An optional list of node names to interrupt before.
|
interrupt_before: An optional list of node names to interrupt before.
|
||||||
Should be one of the following: "agent", "tools".
|
Should be one of the following: "agent", "tools".
|
||||||
This is useful if you want to add a user confirmation or other interrupt before taking an action.
|
This is useful if you want to add a user confirmation or other interrupt
|
||||||
|
before taking an action.
|
||||||
interrupt_after: An optional list of node names to interrupt after.
|
interrupt_after: An optional list of node names to interrupt after.
|
||||||
Should be one of the following: "agent", "tools".
|
Should be one of the following: "agent", "tools".
|
||||||
This is useful if you want to return directly or run additional processing on an output.
|
This is useful if you want to return directly or run additional processing on an output.
|
||||||
@@ -1059,7 +1088,8 @@ def create_agent( # noqa: D417
|
|||||||
node using the [Send](https://langchain-ai.github.io/langgraph/concepts/low_level/#send)
|
node using the [Send](https://langchain-ai.github.io/langgraph/concepts/low_level/#send)
|
||||||
API.
|
API.
|
||||||
name: An optional name for the CompiledStateGraph.
|
name: An optional name for the CompiledStateGraph.
|
||||||
This name will be automatically used when adding ReAct agent graph to another graph as a subgraph node -
|
This name will be automatically used when adding ReAct agent graph to
|
||||||
|
another graph as a subgraph node -
|
||||||
particularly useful for building multi-agent systems.
|
particularly useful for building multi-agent systems.
|
||||||
|
|
||||||
!!! warning "`config_schema` Deprecated"
|
!!! warning "`config_schema` Deprecated"
|
||||||
@@ -1071,9 +1101,11 @@ def create_agent( # noqa: D417
|
|||||||
A compiled LangChain runnable that can be used for chat interactions.
|
A compiled LangChain runnable that can be used for chat interactions.
|
||||||
|
|
||||||
The "agent" node calls the language model with the messages list (after applying the prompt).
|
The "agent" node calls the language model with the messages list (after applying the prompt).
|
||||||
If the resulting AIMessage contains `tool_calls`, the graph will then call the ["tools"][langgraph.prebuilt.tool_node.ToolNode].
|
If the resulting AIMessage contains `tool_calls`,
|
||||||
The "tools" node executes the tools (1 tool per `tool_call`) and adds the responses to the messages list
|
the graph will then call the ["tools"][langgraph.prebuilt.tool_node.ToolNode].
|
||||||
as `ToolMessage` objects. The agent node then calls the language model again.
|
The "tools" node executes the tools (1 tool per `tool_call`)
|
||||||
|
and adds the responses to the messages list as `ToolMessage` objects.
|
||||||
|
The agent node then calls the language model again.
|
||||||
The process repeats until no more `tool_calls` are present in the response.
|
The process repeats until no more `tool_calls` are present in the response.
|
||||||
The agent then returns the full list of messages as a dictionary containing the key "messages".
|
The agent then returns the full list of messages as a dictionary containing the key "messages".
|
||||||
|
|
||||||
@@ -1135,7 +1167,8 @@ def create_agent( # noqa: D417
|
|||||||
# Handle deprecated config_schema parameter
|
# Handle deprecated config_schema parameter
|
||||||
if (config_schema := deprecated_kwargs.pop("config_schema", MISSING)) is not MISSING:
|
if (config_schema := deprecated_kwargs.pop("config_schema", MISSING)) is not MISSING:
|
||||||
warn(
|
warn(
|
||||||
"`config_schema` is deprecated and will be removed. Please use `context_schema` instead.",
|
"`config_schema` is deprecated and will be removed. "
|
||||||
|
"Please use `context_schema` instead.",
|
||||||
category=DeprecationWarning,
|
category=DeprecationWarning,
|
||||||
stacklevel=2,
|
stacklevel=2,
|
||||||
)
|
)
|
||||||
|
@@ -47,7 +47,8 @@ class MultipleStructuredOutputsError(StructuredOutputError):
|
|||||||
self.tool_names = tool_names
|
self.tool_names = tool_names
|
||||||
|
|
||||||
super().__init__(
|
super().__init__(
|
||||||
f"Model incorrectly returned multiple structured responses ({', '.join(tool_names)}) when only one is expected."
|
"Model incorrectly returned multiple structured responses "
|
||||||
|
f"({', '.join(tool_names)}) when only one is expected."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -98,7 +99,8 @@ class _SchemaSpec(Generic[SchemaT]):
|
|||||||
"""Describes a structured output schema."""
|
"""Describes a structured output schema."""
|
||||||
|
|
||||||
schema: type[SchemaT]
|
schema: type[SchemaT]
|
||||||
"""The schema for the response, can be a Pydantic model, dataclass, TypedDict, or JSON schema dict."""
|
"""The schema for the response, can be a Pydantic model, dataclass, TypedDict,
|
||||||
|
or JSON schema dict."""
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
"""Name of the schema, used for tool calling.
|
"""Name of the schema, used for tool calling.
|
||||||
@@ -178,7 +180,8 @@ class ToolStrategy(Generic[SchemaT]):
|
|||||||
"""Schema specs for the tool calls."""
|
"""Schema specs for the tool calls."""
|
||||||
|
|
||||||
tool_message_content: str | None
|
tool_message_content: str | None
|
||||||
"""The content of the tool message to be returned when the model calls an artificial structured output tool."""
|
"""The content of the tool message to be returned when the model calls
|
||||||
|
an artificial structured output tool."""
|
||||||
|
|
||||||
handle_errors: (
|
handle_errors: (
|
||||||
bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str]
|
bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str]
|
||||||
@@ -204,7 +207,10 @@ class ToolStrategy(Generic[SchemaT]):
|
|||||||
| tuple[type[Exception], ...]
|
| tuple[type[Exception], ...]
|
||||||
| Callable[[Exception], str] = True,
|
| Callable[[Exception], str] = True,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Initialize ToolStrategy with schemas, tool message content, and error handling strategy."""
|
"""Initialize ToolStrategy.
|
||||||
|
|
||||||
|
Initialize ToolStrategy with schemas, tool message content, and error handling strategy.
|
||||||
|
"""
|
||||||
self.schema = schema
|
self.schema = schema
|
||||||
self.tool_message_content = tool_message_content
|
self.tool_message_content = tool_message_content
|
||||||
self.handle_errors = handle_errors
|
self.handle_errors = handle_errors
|
||||||
@@ -268,7 +274,8 @@ class OutputToolBinding(Generic[SchemaT]):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
schema: type[SchemaT]
|
schema: type[SchemaT]
|
||||||
"""The original schema provided for structured output (Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
"""The original schema provided for structured output
|
||||||
|
(Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
||||||
|
|
||||||
schema_kind: SchemaKind
|
schema_kind: SchemaKind
|
||||||
"""Classification of the schema type for proper response construction."""
|
"""Classification of the schema type for proper response construction."""
|
||||||
@@ -321,7 +328,8 @@ class ProviderStrategyBinding(Generic[SchemaT]):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
schema: type[SchemaT]
|
schema: type[SchemaT]
|
||||||
"""The original schema provided for structured output (Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
"""The original schema provided for structured output
|
||||||
|
(Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
||||||
|
|
||||||
schema_kind: SchemaKind
|
schema_kind: SchemaKind
|
||||||
"""Classification of the schema type for proper response construction."""
|
"""Classification of the schema type for proper response construction."""
|
||||||
@@ -362,7 +370,10 @@ class ProviderStrategyBinding(Generic[SchemaT]):
|
|||||||
data = json.loads(raw_text)
|
data = json.loads(raw_text)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
schema_name = getattr(self.schema, "__name__", "response_format")
|
schema_name = getattr(self.schema, "__name__", "response_format")
|
||||||
msg = f"Native structured output expected valid JSON for {schema_name}, but parsing failed: {e}."
|
msg = (
|
||||||
|
f"Native structured output expected valid JSON for {schema_name}, "
|
||||||
|
f"but parsing failed: {e}."
|
||||||
|
)
|
||||||
raise ValueError(msg) from e
|
raise ValueError(msg) from e
|
||||||
|
|
||||||
# Parse according to schema
|
# Parse according to schema
|
||||||
|
@@ -87,8 +87,16 @@ INVALID_TOOL_NAME_ERROR_TEMPLATE = (
|
|||||||
"Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
|
"Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
|
||||||
)
|
)
|
||||||
TOOL_CALL_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
|
TOOL_CALL_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
|
||||||
TOOL_EXECUTION_ERROR_TEMPLATE = "Error executing tool '{tool_name}' with kwargs {tool_kwargs} with error:\n {error}\n Please fix the error and try again."
|
TOOL_EXECUTION_ERROR_TEMPLATE = (
|
||||||
TOOL_INVOCATION_ERROR_TEMPLATE = "Error invoking tool '{tool_name}' with kwargs {tool_kwargs} with error:\n {error}\n Please fix the error and try again."
|
"Error executing tool '{tool_name}' with kwargs {tool_kwargs} with error:\n"
|
||||||
|
" {error}\n"
|
||||||
|
" Please fix the error and try again."
|
||||||
|
)
|
||||||
|
TOOL_INVOCATION_ERROR_TEMPLATE = (
|
||||||
|
"Error invoking tool '{tool_name}' with kwargs {tool_kwargs} with error:\n"
|
||||||
|
" {error}\n"
|
||||||
|
" Please fix the error and try again."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def msg_content_output(output: Any) -> str | list[dict]:
|
def msg_content_output(output: Any) -> str | list[dict]:
|
||||||
@@ -313,7 +321,8 @@ class ToolNode(RunnableCallable):
|
|||||||
error template containing the exception details.
|
error template containing the exception details.
|
||||||
- **str**: Catch all errors and return a ToolMessage with this custom
|
- **str**: Catch all errors and return a ToolMessage with this custom
|
||||||
error message string.
|
error message string.
|
||||||
- **type[Exception]**: Only catch exceptions with the specified type and return the default error message for it.
|
- **type[Exception]**: Only catch exceptions with the specified type and
|
||||||
|
return the default error message for it.
|
||||||
- **tuple[type[Exception], ...]**: Only catch exceptions with the specified
|
- **tuple[type[Exception], ...]**: Only catch exceptions with the specified
|
||||||
types and return default error messages for them.
|
types and return default error messages for them.
|
||||||
- **Callable[..., str]**: Catch exceptions matching the callable's signature
|
- **Callable[..., str]**: Catch exceptions matching the callable's signature
|
||||||
@@ -368,7 +377,7 @@ class ToolNode(RunnableCallable):
|
|||||||
|
|
||||||
tool_node = ToolNode([my_tool], handle_tool_errors=handle_errors)
|
tool_node = ToolNode([my_tool], handle_tool_errors=handle_errors)
|
||||||
```
|
```
|
||||||
"""
|
""" # noqa: E501
|
||||||
|
|
||||||
name: str = "tools"
|
name: str = "tools"
|
||||||
|
|
||||||
@@ -506,10 +515,12 @@ class ToolNode(RunnableCallable):
|
|||||||
|
|
||||||
# GraphInterrupt is a special exception that will always be raised.
|
# GraphInterrupt is a special exception that will always be raised.
|
||||||
# It can be triggered in the following scenarios,
|
# It can be triggered in the following scenarios,
|
||||||
# Where GraphInterrupt(GraphBubbleUp) is raised from an `interrupt` invocation most commonly:
|
# Where GraphInterrupt(GraphBubbleUp) is raised from an `interrupt` invocation
|
||||||
|
# most commonly:
|
||||||
# (1) a GraphInterrupt is raised inside a tool
|
# (1) a GraphInterrupt is raised inside a tool
|
||||||
# (2) a GraphInterrupt is raised inside a graph node for a graph called as a tool
|
# (2) a GraphInterrupt is raised inside a graph node for a graph called as a tool
|
||||||
# (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph called as a tool
|
# (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph
|
||||||
|
# called as a tool
|
||||||
# (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
|
# (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
|
||||||
except GraphBubbleUp:
|
except GraphBubbleUp:
|
||||||
raise
|
raise
|
||||||
@@ -570,10 +581,12 @@ class ToolNode(RunnableCallable):
|
|||||||
|
|
||||||
# GraphInterrupt is a special exception that will always be raised.
|
# GraphInterrupt is a special exception that will always be raised.
|
||||||
# It can be triggered in the following scenarios,
|
# It can be triggered in the following scenarios,
|
||||||
# Where GraphInterrupt(GraphBubbleUp) is raised from an `interrupt` invocation most commonly:
|
# Where GraphInterrupt(GraphBubbleUp) is raised from an `interrupt` invocation
|
||||||
|
# most commonly:
|
||||||
# (1) a GraphInterrupt is raised inside a tool
|
# (1) a GraphInterrupt is raised inside a tool
|
||||||
# (2) a GraphInterrupt is raised inside a graph node for a graph called as a tool
|
# (2) a GraphInterrupt is raised inside a graph node for a graph called as a tool
|
||||||
# (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph called as a tool
|
# (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph
|
||||||
|
# called as a tool
|
||||||
# (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
|
# (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
|
||||||
except GraphBubbleUp:
|
except GraphBubbleUp:
|
||||||
raise
|
raise
|
||||||
@@ -770,10 +783,12 @@ class ToolNode(RunnableCallable):
|
|||||||
input_type: Literal["list", "dict", "tool_calls"],
|
input_type: Literal["list", "dict", "tool_calls"],
|
||||||
) -> Command:
|
) -> Command:
|
||||||
if isinstance(command.update, dict):
|
if isinstance(command.update, dict):
|
||||||
# input type is dict when ToolNode is invoked with a dict input (e.g. {"messages": [AIMessage(..., tool_calls=[...])]})
|
# input type is dict when ToolNode is invoked with a dict input
|
||||||
|
# (e.g. {"messages": [AIMessage(..., tool_calls=[...])]})
|
||||||
if input_type not in ("dict", "tool_calls"):
|
if input_type not in ("dict", "tool_calls"):
|
||||||
msg = (
|
msg = (
|
||||||
f"Tools can provide a dict in Command.update only when using dict with '{self._messages_key}' key as ToolNode input, "
|
"Tools can provide a dict in Command.update only when using dict "
|
||||||
|
f"with '{self._messages_key}' key as ToolNode input, "
|
||||||
f"got: {command.update} for tool '{call['name']}'"
|
f"got: {command.update} for tool '{call['name']}'"
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
@@ -782,10 +797,12 @@ class ToolNode(RunnableCallable):
|
|||||||
state_update = cast("dict[str, Any]", updated_command.update) or {}
|
state_update = cast("dict[str, Any]", updated_command.update) or {}
|
||||||
messages_update = state_update.get(self._messages_key, [])
|
messages_update = state_update.get(self._messages_key, [])
|
||||||
elif isinstance(command.update, list):
|
elif isinstance(command.update, list):
|
||||||
# Input type is list when ToolNode is invoked with a list input (e.g. [AIMessage(..., tool_calls=[...])])
|
# Input type is list when ToolNode is invoked with a list input
|
||||||
|
# (e.g. [AIMessage(..., tool_calls=[...])])
|
||||||
if input_type != "list":
|
if input_type != "list":
|
||||||
msg = (
|
msg = (
|
||||||
f"Tools can provide a list of messages in Command.update only when using list of messages as ToolNode input, "
|
"Tools can provide a list of messages in Command.update "
|
||||||
|
"only when using list of messages as ToolNode input, "
|
||||||
f"got: {command.update} for tool '{call['name']}'"
|
f"got: {command.update} for tool '{call['name']}'"
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
@@ -815,13 +832,17 @@ class ToolNode(RunnableCallable):
|
|||||||
# Command.update if command is sent to the CURRENT graph
|
# Command.update if command is sent to the CURRENT graph
|
||||||
if updated_command.graph is None and not has_matching_tool_message:
|
if updated_command.graph is None and not has_matching_tool_message:
|
||||||
example_update = (
|
example_update = (
|
||||||
'`Command(update={"messages": [ToolMessage("Success", tool_call_id=tool_call_id), ...]}, ...)`'
|
'`Command(update={"messages": '
|
||||||
|
'[ToolMessage("Success", tool_call_id=tool_call_id), ...]}, ...)`'
|
||||||
if input_type == "dict"
|
if input_type == "dict"
|
||||||
else '`Command(update=[ToolMessage("Success", tool_call_id=tool_call_id), ...], ...)`'
|
else "`Command(update="
|
||||||
|
'[ToolMessage("Success", tool_call_id=tool_call_id), ...], ...)`'
|
||||||
)
|
)
|
||||||
msg = (
|
msg = (
|
||||||
f"Expected to have a matching ToolMessage in Command.update for tool '{call['name']}', got: {messages_update}. "
|
"Expected to have a matching ToolMessage in Command.update "
|
||||||
"Every tool call (LLM requesting to call a tool) in the message history MUST have a corresponding ToolMessage. "
|
f"for tool '{call['name']}', got: {messages_update}. "
|
||||||
|
"Every tool call (LLM requesting to call a tool) "
|
||||||
|
"in the message history MUST have a corresponding ToolMessage. "
|
||||||
f"You can fix it by modifying the tool to return {example_update}."
|
f"You can fix it by modifying the tool to return {example_update}."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
|
@@ -131,7 +131,6 @@ flake8-annotations.allow-star-arg-any = true
|
|||||||
]
|
]
|
||||||
"langchain/agents/*" = [
|
"langchain/agents/*" = [
|
||||||
"ANN401", # we use Any right now, need to narrow
|
"ANN401", # we use Any right now, need to narrow
|
||||||
"E501", # line too long, needs to fix
|
|
||||||
"A002", # input is shadowing builtin
|
"A002", # input is shadowing builtin
|
||||||
"A001", # input is shadowing builtin
|
"A001", # input is shadowing builtin
|
||||||
"B904", # use from for exceptions
|
"B904", # use from for exceptions
|
||||||
|
Reference in New Issue
Block a user