fix(anthropic,standard-tests): carry over updates to v0.3 (#33393)

Cherry pick of https://github.com/langchain-ai/langchain/pull/33390 and
https://github.com/langchain-ai/langchain/pull/33391.
This commit is contained in:
ccurme
2025-10-09 14:25:34 -04:00
committed by GitHub
parent dd4de696b8
commit 5fa1094451
4 changed files with 95 additions and 2 deletions

View File

@@ -489,6 +489,10 @@ def _format_messages(
_lc_tool_calls_to_anthropic_tool_use_blocks(missing_tool_calls),
)
if not content and role == "assistant" and _i < len(merged_messages) - 1:
# anthropic.BadRequestError: Error code: 400: all messages must have
# non-empty content except for the optional final assistant message
continue
formatted_messages.append({"role": role, "content": content})
return system, formatted_messages

View File

@@ -271,6 +271,53 @@ def test_system_invoke() -> None:
assert isinstance(result.content, str)
def test_handle_empty_aimessage() -> None:
# Anthropic can generate empty AIMessages, which are not valid unless in the last
# message in a sequence.
llm = ChatAnthropic(model=MODEL_NAME)
messages = [
HumanMessage("Hello"),
AIMessage([]),
HumanMessage("My name is Bob."),
]
_ = llm.invoke(messages)
# Test tool call sequence
llm_with_tools = llm.bind_tools(
[
{
"name": "get_weather",
"description": "Get weather report for a city",
"input_schema": {
"type": "object",
"properties": {"location": {"type": "string"}},
},
},
],
)
_ = llm_with_tools.invoke(
[
HumanMessage("What's the weather in Boston?"),
AIMessage(
content=[],
tool_calls=[
{
"name": "get_weather",
"args": {"location": "Boston"},
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
"type": "tool_call",
},
],
),
ToolMessage(
content="It's sunny.", tool_call_id="toolu_01V6d6W32QGGSmQm4BT98EKk"
),
AIMessage([]),
HumanMessage("Thanks!"),
]
)
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
chat = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]

View File

@@ -594,6 +594,38 @@ def test__format_messages_with_tool_calls() -> None:
actual = _format_messages(messages)
assert expected == actual
# Check handling of empty AIMessage
empty_contents: list[str | list[str | dict]] = ["", []]
for empty_content in empty_contents:
## Permit message in final position
_, anthropic_messages = _format_messages([human, AIMessage(empty_content)])
expected_messages = [
{"role": "user", "content": "foo"},
{"role": "assistant", "content": empty_content},
]
assert expected_messages == anthropic_messages
## Remove message otherwise
_, anthropic_messages = _format_messages(
[human, AIMessage(empty_content), human]
)
expected_messages = [
{"role": "user", "content": "foo"},
{"role": "user", "content": "foo"},
]
assert expected_messages == anthropic_messages
actual = _format_messages(
[system, human, ai, tool, AIMessage(empty_content), human]
)
assert actual[0] == "fuzz"
assert [message["role"] for message in actual[1]] == [
"user",
"assistant",
"user",
"user",
]
def test__format_tool_use_block() -> None:
# Test we correctly format tool_use blocks when there is no corresponding tool_call.

View File

@@ -2979,8 +2979,18 @@ class ChatModelIntegrationTests(ChatModelTests):
[
{
"type": "thinking",
"thinking": "I'm thinking...",
"signature": "abc123",
"thinking": (
"This is a simple greeting. I should respond warmly and "
"professionally, and perhaps ask how I can help the person "
"today."
),
"signature": (
"ErUBCkYICBgCIkDCTQUXPc3O7nHXd302Zercaz8WrrpddpOqHITxBih5ze"
"FPoJkwKBvkvZ8ID1aAfJftji6+ZI5gBYDo7XmNBIkzEgzVDHKopedAn/sc"
"G80aDFDXVZrDOWgla7lEBiIwLq5kfFjQjvF/CyuL8J5V7dRwsJN5gQIXaM"
"B6xXTs6T+2Zp0VdiyiMb/hcdrHt+7aKh0z2E1UnjiOCoTlofNFHzOnKk0q"
"PIoPmfGgpPgGNRgC"
),
},
{
"type": "text",