mirror of
https://github.com/hwchase17/langchain.git
synced 2026-05-03 01:46:42 +00:00
feat(langchain): add respond decision to HITL middleware (#37095)
Extends `HumanInTheLoopMiddleware` with a new `respond` decision type
for "ask user" style tools — tools whose real implementation is the
human's response. The interrupt is raised with the tool call as usual;
the resume payload becomes the body of a synthetic `ToolMessage` with
`status="success"`, and the tool itself is not executed.
This complements `reject` (which produces a synthetic `ToolMessage` with
`status="error"`) by enabling the symmetric success path: a reviewer can
answer on the tool's behalf without invoking it.
## Changes
- New `RespondDecision` `TypedDict` with a required `message: str`
field; added to the `Decision` union.
- `"respond"` added to the `DecisionType` literal.
- `_process_decision` handles `"respond"` by emitting a `ToolMessage`
with `status="success"` and preserving the original tool call on the
`AIMessage` so provider-required tool-call/tool-message pairing is
maintained.
- The `True` shortcut in `interrupt_on` now expands to `["approve",
"edit", "reject", "respond"]`, so existing callers that opted into "all
decisions" pick up the new capability without code changes. The `reject`
decision already permits a reviewer to inject arbitrary `ToolMessage`
content, so `respond` extends the same trust model — not a new
capability class.
## Example
```python
from langchain.agents.middleware import HumanInTheLoopMiddleware
middleware = HumanInTheLoopMiddleware(
interrupt_on={"ask_user": {"allowed_decisions": ["respond"]}}
)
# Resume payload: {"decisions": [{"type": "respond", "message": "blue"}]}
# → synthetic ToolMessage(content="blue", status="success") for `ask_user`.
```
---
*Implementation drafted with AI-agent assistance.*
Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -39,7 +39,7 @@ class ActionRequest(TypedDict):
|
||||
"""The description of the action to be reviewed."""
|
||||
|
||||
|
||||
DecisionType = Literal["approve", "edit", "reject"]
|
||||
DecisionType = Literal["approve", "edit", "reject", "respond"]
|
||||
|
||||
|
||||
class ReviewConfig(TypedDict):
|
||||
@@ -95,7 +95,22 @@ class RejectDecision(TypedDict):
|
||||
"""The message sent to the model explaining why the action was rejected."""
|
||||
|
||||
|
||||
Decision = ApproveDecision | EditDecision | RejectDecision
|
||||
class RespondDecision(TypedDict):
|
||||
"""Response when a human answers on behalf of the tool, skipping execution.
|
||||
|
||||
Used for "ask user" style tools whose real implementation is the human's
|
||||
response. The tool is not executed; instead, a synthetic `ToolMessage` with
|
||||
`status="success"` and the provided `message` is returned to the model.
|
||||
"""
|
||||
|
||||
type: Literal["respond"]
|
||||
"""The type of response when a human responds on behalf of the tool."""
|
||||
|
||||
message: str
|
||||
"""Content of the synthetic `ToolMessage` returned to the model."""
|
||||
|
||||
|
||||
Decision = ApproveDecision | EditDecision | RejectDecision | RespondDecision
|
||||
|
||||
|
||||
class HITLResponse(TypedDict):
|
||||
@@ -180,7 +195,8 @@ class HumanInTheLoopMiddleware(AgentMiddleware[StateT, ContextT, ResponseT]):
|
||||
|
||||
If a tool doesn't have an entry, it's auto-approved by default.
|
||||
|
||||
* `True` indicates all decisions are allowed: approve, edit, and reject.
|
||||
* `True` indicates all decisions are allowed: approve, edit, reject,
|
||||
and respond.
|
||||
* `False` indicates that the tool is auto-approved.
|
||||
* `InterruptOnConfig` indicates the specific decisions allowed for this
|
||||
tool.
|
||||
@@ -200,7 +216,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware[StateT, ContextT, ResponseT]):
|
||||
if isinstance(tool_config, bool):
|
||||
if tool_config is True:
|
||||
resolved_configs[tool_name] = InterruptOnConfig(
|
||||
allowed_decisions=["approve", "edit", "reject"]
|
||||
allowed_decisions=["approve", "edit", "reject", "respond"]
|
||||
)
|
||||
elif tool_config.get("allowed_decisions"):
|
||||
resolved_configs[tool_name] = tool_config
|
||||
@@ -277,6 +293,15 @@ class HumanInTheLoopMiddleware(AgentMiddleware[StateT, ContextT, ResponseT]):
|
||||
status="error",
|
||||
)
|
||||
return tool_call, tool_message
|
||||
if decision["type"] == "respond" and "respond" in allowed_decisions:
|
||||
# Skip tool execution; the human answers on behalf of the tool.
|
||||
tool_message = ToolMessage(
|
||||
content=decision["message"],
|
||||
name=tool_call["name"],
|
||||
tool_call_id=tool_call["id"],
|
||||
status="success",
|
||||
)
|
||||
return tool_call, tool_message
|
||||
msg = (
|
||||
f"Unexpected human decision: {decision}. "
|
||||
f"Decision type '{decision.get('type')}' "
|
||||
|
||||
@@ -150,6 +150,136 @@ def test_human_in_the_loop_middleware_single_tool_response() -> None:
|
||||
assert result["messages"][1].tool_call_id == "1"
|
||||
|
||||
|
||||
def test_human_in_the_loop_middleware_single_tool_respond() -> None:
|
||||
"""Test HumanInTheLoopMiddleware with `respond` decision producing a success ToolMessage."""
|
||||
middleware = HumanInTheLoopMiddleware(
|
||||
interrupt_on={"ask_user": {"allowed_decisions": ["respond"]}}
|
||||
)
|
||||
|
||||
ai_message = AIMessage(
|
||||
content="Let me ask the user.",
|
||||
tool_calls=[{"name": "ask_user", "args": {"question": "favorite color?"}, "id": "1"}],
|
||||
)
|
||||
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
|
||||
|
||||
def mock_respond(_: Any) -> dict[str, Any]:
|
||||
return {"decisions": [{"type": "respond", "message": "blue"}]}
|
||||
|
||||
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_respond):
|
||||
result = middleware.after_model(state, Runtime())
|
||||
assert result is not None
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) == 2
|
||||
assert isinstance(result["messages"][0], AIMessage)
|
||||
# Tool call is preserved on the AI message (provider APIs require pairing).
|
||||
assert len(result["messages"][0].tool_calls) == 1
|
||||
assert result["messages"][0].tool_calls[0]["id"] == "1"
|
||||
|
||||
tool_message = result["messages"][1]
|
||||
assert isinstance(tool_message, ToolMessage)
|
||||
assert tool_message.content == "blue"
|
||||
assert tool_message.name == "ask_user"
|
||||
assert tool_message.tool_call_id == "1"
|
||||
assert tool_message.status == "success"
|
||||
|
||||
|
||||
def test_human_in_the_loop_middleware_respond_disallowed() -> None:
|
||||
"""Test that `respond` raises when not in `allowed_decisions`."""
|
||||
middleware = HumanInTheLoopMiddleware(
|
||||
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
|
||||
)
|
||||
|
||||
ai_message = AIMessage(
|
||||
content="I'll help you",
|
||||
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
|
||||
)
|
||||
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
|
||||
|
||||
def mock_respond(_: Any) -> dict[str, Any]:
|
||||
return {"decisions": [{"type": "respond", "message": "synthetic"}]}
|
||||
|
||||
with (
|
||||
patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_respond),
|
||||
pytest.raises(
|
||||
ValueError,
|
||||
match=re.escape(
|
||||
"Decision type 'respond' is not allowed for tool 'test_tool'. "
|
||||
"Expected one of ['approve', 'edit', 'reject'] based on the tool's "
|
||||
"configuration."
|
||||
),
|
||||
),
|
||||
):
|
||||
middleware.after_model(state, Runtime())
|
||||
|
||||
|
||||
def test_human_in_the_loop_middleware_mixed_with_respond() -> None:
|
||||
"""Test mixed decisions: one tool approved, one tool answered via `respond`."""
|
||||
middleware = HumanInTheLoopMiddleware(
|
||||
interrupt_on={
|
||||
"get_forecast": {"allowed_decisions": ["approve"]},
|
||||
"ask_user": {"allowed_decisions": ["respond"]},
|
||||
}
|
||||
)
|
||||
|
||||
ai_message = AIMessage(
|
||||
content="Two things",
|
||||
tool_calls=[
|
||||
{"name": "get_forecast", "args": {"location": "SF"}, "id": "1"},
|
||||
{"name": "ask_user", "args": {"question": "favorite color?"}, "id": "2"},
|
||||
],
|
||||
)
|
||||
state = AgentState[Any](messages=[HumanMessage(content="Hi"), ai_message])
|
||||
|
||||
def mock_mixed(_: Any) -> dict[str, Any]:
|
||||
return {
|
||||
"decisions": [
|
||||
{"type": "approve"},
|
||||
{"type": "respond", "message": "blue"},
|
||||
]
|
||||
}
|
||||
|
||||
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_mixed):
|
||||
result = middleware.after_model(state, Runtime())
|
||||
assert result is not None
|
||||
# AI message + 1 synthetic ToolMessage for the respond decision.
|
||||
assert len(result["messages"]) == 2
|
||||
|
||||
updated_ai_message = result["messages"][0]
|
||||
assert len(updated_ai_message.tool_calls) == 2
|
||||
assert updated_ai_message.tool_calls[0]["name"] == "get_forecast"
|
||||
assert updated_ai_message.tool_calls[1]["name"] == "ask_user"
|
||||
|
||||
tool_message = result["messages"][1]
|
||||
assert isinstance(tool_message, ToolMessage)
|
||||
assert tool_message.content == "blue"
|
||||
assert tool_message.name == "ask_user"
|
||||
assert tool_message.tool_call_id == "2"
|
||||
assert tool_message.status == "success"
|
||||
|
||||
|
||||
def test_human_in_the_loop_middleware_true_allows_respond() -> None:
|
||||
"""Test that the `True` shortcut permits `respond` decisions."""
|
||||
middleware = HumanInTheLoopMiddleware(interrupt_on={"ask_user": True})
|
||||
|
||||
ai_message = AIMessage(
|
||||
content="Asking",
|
||||
tool_calls=[{"name": "ask_user", "args": {"q": "?"}, "id": "1"}],
|
||||
)
|
||||
state = AgentState[Any](messages=[HumanMessage(content="Hi"), ai_message])
|
||||
|
||||
with patch(
|
||||
"langchain.agents.middleware.human_in_the_loop.interrupt",
|
||||
return_value={"decisions": [{"type": "respond", "message": "answer"}]},
|
||||
):
|
||||
result = middleware.after_model(state, Runtime())
|
||||
assert result is not None
|
||||
assert len(result["messages"]) == 2
|
||||
tool_message = result["messages"][1]
|
||||
assert isinstance(tool_message, ToolMessage)
|
||||
assert tool_message.content == "answer"
|
||||
assert tool_message.status == "success"
|
||||
|
||||
|
||||
def test_human_in_the_loop_middleware_multiple_tools_mixed_responses() -> None:
|
||||
"""Test HumanInTheLoopMiddleware with multiple tools and mixed response types."""
|
||||
middleware = HumanInTheLoopMiddleware(
|
||||
|
||||
Reference in New Issue
Block a user