This commit is contained in:
Bagatur 2024-08-30 12:52:06 -07:00
parent b31faf6572
commit eb7e485886
4 changed files with 141 additions and 99 deletions

View File

@ -44,7 +44,7 @@ from langchain_core.messages.utils import (
_message_from_dict,
convert_to_messages,
filter_messages,
format_messages_as,
format_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
@ -84,5 +84,5 @@ __all__ = [
"filter_messages",
"merge_message_runs",
"trim_messages",
"format_messages_as",
"format_messages",
]

View File

@ -934,30 +934,52 @@ def _runnable_generator(func: Callable) -> Callable:
@_runnable_generator
def format_messages_as(
def format_messages(
messages: Union[MessageLikeRepresentation, Sequence[MessageLikeRepresentation]],
*,
format: Literal["openai", "anthropic"],
format: Literal["langchain-openai", "langchain-anthropic"],
text_format: Literal["string", "block"],
) -> Union[BaseMessage, List[BaseMessage]]:
"""Convert message contents into a standard format.
.. versionadded:: 0.2.36
Can be used imperatively (pass in messages, get out messages) or can be used
declaratively (call without messages, use resulting Runnable in a chain).
.. versionadded:: 0.2.37
Args:
messages: Message-like object or iterable of objects whose contents are already
messages: Message-like object or iterable of objects whose contents are
in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats.
format: Format to convert message contents to.
text_format: How to format text contents. If ``text='string'`` then any string
contents are left as strings. If a message has content blocks that are all
of type 'text', these are joined with a newline to make a single string. If
a message has content blocks and at least one isn't of type 'text', then
all blocks are left as dicts. If ``text='block'`` then all contents are
turned into a list of dicts.
format: Output message format:
- "langchain-openai":
BaseMessages with OpenAI-style contents.
- "langchain-anthropic":
BaseMessages with Anthropic-style contents.
text_format: How to format string or text block contents:
- "string":
If a message has a string content, this is left as a string. If
a message has content blocks that are all of type 'text', these are
joined with a newline to make a single string. If a message has
content blocks and at least one isn't of type 'text', then
all blocks are left as dicts.
- "block":
If a message has a string content, this is turned into a list
with a single content block of type 'text'. If a message has content
blocks these are left as is.
Returns:
A single BaseMessage is a single message-like object was passed in, else list
of BaseMessages.
The return type depends on the input type:
- BaseMessage:
If a single message-like object is passed in, a BaseMessage is
returned.
- List[BaseMessage]:
If a sequence of message-like objects are passed in, a list
of BaseMessages are returned.
- Runnable:
If no messages are passed in, a Runnable is generated that formats
messages (per the above) when invoked.
.. dropdown:: Basic usage
:open:
@ -965,7 +987,7 @@ def format_messages_as(
.. code-block:: python
from langchain_core.messages import (
format_messages_as,
format_messages,
AIMessage,
HumanMessage,
SystemMessage,
@ -979,7 +1001,7 @@ def format_messages_as(
ToolMessage("foobar", tool_call_id="1", name="bar"),
{"role": "assistant", "content": "thats nice"},
]
oai_strings = format_messages_as(messages, format="openai", text="string")
oai_strings = format_messages(messages, format="langchain-openai", text="string")
# -> [
# SystemMessage(content='foo'),
# HumanMessage(content=[{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]),
@ -988,7 +1010,7 @@ def format_messages_as(
# AIMessage(content='thats nice')
# ]
anthropic_blocks = format_messages_as(messages, format="anthropic", text="block")
anthropic_blocks = format_messages(messages, format="langchain-anthropic", text="block")
# -> [
# SystemMessage(content=[{'type': 'text', 'text': 'foo'}]),
# HumanMessage(content=[{'type': 'text', 'text': 'whats in this'}, {'type': 'image', 'source': {'type': 'base64', 'media_type': 'image/png', 'data': "'/9j/4AAQSk'"}}]),
@ -997,15 +1019,15 @@ def format_messages_as(
# AIMessage(content=[{'type': 'text', 'text': 'thats nice'}])
# ]
.. dropdown:: Chain usage
.. dropdown:: Chaining
:open:
.. code-block:: python
from langchain_core.messages import format_messages_as
from langchain_core.messages import format_messages
from langchain.chat_models import init_chat_model
formatter = format_messages_as(format="openai", text="string")
formatter = format_messages(format="langchain-openai", text="string")
llm = init_chat_model() | formatter
llm.invoke(
@ -1020,6 +1042,15 @@ def format_messages_as(
)
# -> AIMessage(["My name is...], ...)
.. dropdown:: Streaming
:open:
.. code-block:: python
from langchain_core.messages import format_messages
from langchain.chat_models import init_chat_model
formatter = format_messages(format="langchain-openai", text="string")
def multiply(a: int, b: int) -> int:
'''Return product of a and b.'''
@ -1031,40 +1062,41 @@ def format_messages_as(
"what's 5 times 2", config={"model": "claude-3-5-sonnet-20240620"}
):
print(chunk)
# -> AIMessageChunk(content='', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75', usage_metadata={'input_tokens': 370, 'output_tokens': 0, 'total_tokens': 370}),
# AIMessageChunk(content='Certainly', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content='! To', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' calculate', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' 5 times ', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content='2, we can use', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' the "', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content='multiply" function that', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content="'s", id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' available to', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' us.', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' Let', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content="'s use", id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' this tool', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' to', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' get', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content=' the result.', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75'),
# AIMessageChunk(content='', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75', tool_calls=[{'name': 'multiply', 'args': {}, 'id': 'toolu_01PW8o6BkATCecjsJX8QgG6z', 'type': 'tool_call'}], tool_call_chunks=[{'name': 'multiply', 'args': '', 'id': 'toolu_01PW8o6BkATCecjsJX8QgG6z', 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75', tool_calls=[{'name': '', 'args': {}, 'id': None, 'type': 'tool_call'}], tool_call_chunks=[{'name': None, 'args': '', 'id': None, 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75', tool_calls=[{'name': '', 'args': {'a': 5}, 'id': None, 'type': 'tool_call'}], tool_call_chunks=[{'name': None, 'args': '{"a": 5', 'id': None, 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75', invalid_tool_calls=[{'name': None, 'args': ', "b": 2}', 'id': None, 'error': None, 'type': 'invalid_tool_call'}], tool_call_chunks=[{'name': None, 'args': ', "b": 2}', 'id': None, 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', response_metadata={'stop_reason': 'tool_use', 'stop_sequence': None}, id='run-64757cb2-b85f-4d51-8f34-5a6c1d40ad75', usage_metadata={'input_tokens': 0, 'output_tokens': 104, 'total_tokens': 104})
# -> AIMessageChunk(content='', id='run-6...', usage_metadata={'input_tokens': 370, 'output_tokens': 0, 'total_tokens': 370}),
# AIMessageChunk(content='Certainly', id='run-6...'),
# AIMessageChunk(content='! To', id='run-6...'),
# AIMessageChunk(content=' calculate', id='run-6...'),
# AIMessageChunk(content=' 5 times ', id='run-6...'),
# AIMessageChunk(content='2, we can use', id='run-6...'),
# AIMessageChunk(content=' the "', id='run-6...'),
# AIMessageChunk(content='multiply" function that', id='run-6...'),
# AIMessageChunk(content="'s", id='run-6...'),
# AIMessageChunk(content=' available to', id='run-6...'),
# AIMessageChunk(content=' us.', id='run-6...'),
# AIMessageChunk(content=' Let', id='run-6...'),
# AIMessageChunk(content="'s use", id='run-6...'),
# AIMessageChunk(content=' this tool', id='run-6...'),
# AIMessageChunk(content=' to', id='run-6...'),
# AIMessageChunk(content=' get', id='run-6...'),
# AIMessageChunk(content=' the result.', id='run-6...'),
# AIMessageChunk(content='', id='run-6...', tool_calls=[{'name': 'multiply', 'args': {}, 'id': 'toolu_0...', 'type': 'tool_call'}], tool_call_chunks=[{'name': 'multiply', 'args': '', 'id': 'toolu_0...', 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', id='run-6...', tool_calls=[{'name': '', 'args': {}, 'id': None, 'type': 'tool_call'}], tool_call_chunks=[{'name': None, 'args': '', 'id': None, 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', id='run-6...', tool_calls=[{'name': '', 'args': {'a': 5}, 'id': None, 'type': 'tool_call'}], tool_call_chunks=[{'name': None, 'args': '{"a": 5', 'id': None, 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', id='run-6...', invalid_tool_calls=[{'name': None, 'args': ', "b": 2}', 'id': None, 'error': None, 'type': 'invalid_tool_call'}], tool_call_chunks=[{'name': None, 'args': ', "b": 2}', 'id': None, 'index': 1, 'type': 'tool_call_chunk'}]),
# AIMessageChunk(content='', response_metadata={'stop_reason': 'tool_use', 'stop_sequence': None}, id='run-6...', usage_metadata={'input_tokens': 0, 'output_tokens': 104, 'total_tokens': 104})
""" # noqa: E501
if is_single := isinstance(messages, (BaseMessage, dict)):
messages = [messages]
messages = convert_to_messages(messages, copy=True)
if format.lower() == "openai":
formatted = _format_messages_as_openai(messages, text_format=text_format)
elif format.lower() == "anthropic":
formatted = _format_messages_as_anthropic(messages, text_format=text_format)
if format.lower().replace("_", "-") == "langchain-openai":
formatted = _format_messages_openai(messages, text_format=text_format)
elif format.lower().replace("_", "-") == "langchain-anthropic":
formatted = _format_messages_anthropic(messages, text_format=text_format)
else:
raise ValueError(
f"Unrecognized {format=}. Expected one of ('openai', 'anthropic')."
f"Unrecognized {format=}. Expected one of ('langchain-openai', "
f"'langchain-anthropic')."
)
if is_single:
return formatted[0]
@ -1072,7 +1104,7 @@ def format_messages_as(
return formatted
def _format_messages_as_openai(
def _format_messages_openai(
messages: Sequence[BaseMessage], *, text_format: Literal["string", "block"]
) -> List[BaseMessage]:
"""Mutates messages so their contents match OpenAI messages API."""
@ -1226,7 +1258,7 @@ def _format_messages_as_openai(
)
# Recurse to make sure tool message contents are OpenAI format.
tool_messages.extend(
_format_messages_as_openai(
_format_messages_openai(
[tool_message], text_format=text_format
)
)
@ -1307,7 +1339,7 @@ def _format_messages_as_openai(
_OPTIONAL_ANTHROPIC_KEYS = ("cache_control", "is_error", "index")
def _format_messages_as_anthropic(
def _format_messages_anthropic(
messages: Sequence[BaseMessage], *, text_format: Literal["string", "block"]
) -> List[BaseMessage]:
"""Mutates messages so their contents match Anthropic messages API."""

View File

@ -32,7 +32,7 @@ EXPECTED_ALL = [
"filter_messages",
"merge_message_runs",
"trim_messages",
"format_messages_as",
"format_messages",
]

View File

@ -17,7 +17,7 @@ from langchain_core.messages.utils import (
_bytes_to_b64_str,
convert_to_messages,
filter_messages,
format_messages_as,
format_messages,
merge_message_runs,
trim_messages,
)
@ -566,20 +566,20 @@ def create_base64_image(format: str = "jpeg") -> str:
return f"data:image/{format};base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAABAAEDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigD//2Q==" # noqa: E501
def test_format_messages_as_single_message() -> None:
def test_format_messages_single_message() -> None:
message = HumanMessage(content="Hello")
result = format_messages_as(message, format="openai", text_format="string")
result = format_messages(message, format="langchain-openai", text_format="string")
assert isinstance(result, BaseMessage)
assert result.content == "Hello"
def test_format_messages_as_multiple_messages() -> None:
def test_format_messages_multiple_messages() -> None:
messages = [
SystemMessage(content="System message"),
HumanMessage(content="Human message"),
AIMessage(content="AI message"),
]
result = format_messages_as(messages, format="openai", text_format="string")
result = format_messages(messages, format="langchain-openai", text_format="string")
assert isinstance(result, list)
assert len(result) == 3
assert all(isinstance(msg, BaseMessage) for msg in result)
@ -590,7 +590,7 @@ def test_format_messages_as_multiple_messages() -> None:
]
def test_format_messages_as_openai_string() -> None:
def test_format_messages_openai_string() -> None:
messages = [
HumanMessage(
content=[
@ -602,23 +602,23 @@ def test_format_messages_as_openai_string() -> None:
content=[{"type": "text", "text": "Hi"}, {"type": "text", "text": "there"}]
),
]
result = format_messages_as(messages, format="openai", text_format="string")
result = format_messages(messages, format="langchain-openai", text_format="string")
assert [msg.content for msg in result] == ["Hello\nWorld", "Hi\nthere"]
def test_format_messages_as_openai_block() -> None:
def test_format_messages_openai_block() -> None:
messages = [
HumanMessage(content="Hello"),
AIMessage(content="Hi there"),
]
result = format_messages_as(messages, format="openai", text_format="block")
result = format_messages(messages, format="langchain-openai", text_format="block")
assert [msg.content for msg in result] == [
[{"type": "text", "text": "Hello"}],
[{"type": "text", "text": "Hi there"}],
]
def test_format_messages_as_anthropic_string() -> None:
def test_format_messages_anthropic_string() -> None:
messages = [
HumanMessage(
content=[
@ -630,30 +630,34 @@ def test_format_messages_as_anthropic_string() -> None:
content=[{"type": "text", "text": "Hi"}, {"type": "text", "text": "there"}]
),
]
result = format_messages_as(messages, format="anthropic", text_format="string")
result = format_messages(
messages, format="langchain-anthropic", text_format="string"
)
assert [msg.content for msg in result] == ["Hello\nWorld", "Hi\nthere"]
def test_format_messages_as_anthropic_block() -> None:
def test_format_messages_anthropic_block() -> None:
messages = [
HumanMessage(content="Hello"),
AIMessage(content="Hi there"),
]
result = format_messages_as(messages, format="anthropic", text_format="block")
result = format_messages(
messages, format="langchain-anthropic", text_format="block"
)
assert [msg.content for msg in result] == [
[{"type": "text", "text": "Hello"}],
[{"type": "text", "text": "Hi there"}],
]
def test_format_messages_as_invalid_format() -> None:
def test_format_messages_invalid_format() -> None:
with pytest.raises(ValueError, match="Unrecognized format="):
format_messages_as(
format_messages(
[HumanMessage(content="Hello")], format="invalid", text_format="string"
)
def test_format_messages_as_openai_image() -> None:
def test_format_messages_openai_image() -> None:
base64_image = create_base64_image()
messages = [
HumanMessage(
@ -663,12 +667,12 @@ def test_format_messages_as_openai_image() -> None:
]
)
]
result = format_messages_as(messages, format="openai", text_format="block")
result = format_messages(messages, format="langchain-openai", text_format="block")
assert result[0].content[1]["type"] == "image_url"
assert result[0].content[1]["image_url"]["url"] == base64_image
def test_format_messages_as_anthropic_image() -> None:
def test_format_messages_anthropic_image() -> None:
base64_image = create_base64_image()
messages = [
HumanMessage(
@ -678,21 +682,25 @@ def test_format_messages_as_anthropic_image() -> None:
]
)
]
result = format_messages_as(messages, format="anthropic", text_format="block")
result = format_messages(
messages, format="langchain-anthropic", text_format="block"
)
assert result[0].content[1]["type"] == "image"
assert result[0].content[1]["source"]["type"] == "base64"
assert result[0].content[1]["source"]["media_type"] == "image/jpeg"
def test_format_messages_as_tool_message() -> None:
def test_format_messages_tool_message() -> None:
tool_message = ToolMessage(content="Tool result", tool_call_id="123")
result = format_messages_as([tool_message], format="openai", text_format="block")
result = format_messages(
[tool_message], format="langchain-openai", text_format="block"
)
assert isinstance(result[0], ToolMessage)
assert result[0].content == [{"type": "text", "text": "Tool result"}]
assert result[0].tool_call_id == "123"
def test_format_messages_as_tool_use() -> None:
def test_format_messages_tool_use() -> None:
messages = [
AIMessage(
content=[
@ -700,21 +708,21 @@ def test_format_messages_as_tool_use() -> None:
]
)
]
result = format_messages_as(messages, format="openai", text_format="block")
result = format_messages(messages, format="langchain-openai", text_format="block")
assert result[0].tool_calls[0]["id"] == "123"
assert result[0].tool_calls[0]["name"] == "calculator"
assert result[0].tool_calls[0]["args"] == "2+2"
def test_format_messages_as_json() -> None:
def test_format_messages_json() -> None:
json_data = {"key": "value"}
messages = [HumanMessage(content=[{"type": "json", "json": json_data}])]
result = format_messages_as(messages, format="openai", text_format="block")
result = format_messages(messages, format="langchain-openai", text_format="block")
assert result[0].content[0]["type"] == "text"
assert json.loads(result[0].content[0]["text"]) == json_data
def test_format_messages_as_guard_content() -> None:
def test_format_messages_guard_content() -> None:
messages = [
HumanMessage(
content=[
@ -725,12 +733,12 @@ def test_format_messages_as_guard_content() -> None:
]
)
]
result = format_messages_as(messages, format="openai", text_format="block")
result = format_messages(messages, format="langchain-openai", text_format="block")
assert result[0].content[0]["type"] == "text"
assert result[0].content[0]["text"] == "Protected content"
def test_format_messages_as_vertexai_image() -> None:
def test_format_messages_vertexai_image() -> None:
messages = [
HumanMessage(
content=[
@ -738,7 +746,7 @@ def test_format_messages_as_vertexai_image() -> None:
]
)
]
result = format_messages_as(messages, format="openai", text_format="block")
result = format_messages(messages, format="langchain-openai", text_format="block")
assert result[0].content[0]["type"] == "image_url"
assert (
result[0].content[0]["image_url"]["url"]
@ -746,27 +754,27 @@ def test_format_messages_as_vertexai_image() -> None:
)
def test_format_messages_as_invalid_block() -> None:
def test_format_messages_invalid_block() -> None:
messages = [HumanMessage(content=[{"type": "invalid", "foo": "bar"}])]
with pytest.raises(ValueError, match="Unrecognized content block"):
format_messages_as(messages, format="openai", text_format="block")
format_messages(messages, format="langchain-openai", text_format="block")
with pytest.raises(ValueError, match="Unrecognized content block"):
format_messages_as(messages, format="anthropic", text_format="block")
format_messages(messages, format="langchain-anthropic", text_format="block")
def test_format_messages_as_empty_message() -> None:
result = format_messages_as(
HumanMessage(content=""), format="openai", text_format="string"
def test_format_messages_empty_message() -> None:
result = format_messages(
HumanMessage(content=""), format="langchain-openai", text_format="string"
)
assert result.content == ""
def test_format_messages_as_empty_list() -> None:
result = format_messages_as([], format="openai", text_format="string")
def test_format_messages_empty_list() -> None:
result = format_messages([], format="langchain-openai", text_format="string")
assert result == []
def test_format_messages_as_mixed_content_types() -> None:
def test_format_messages_mixed_content_types() -> None:
messages = [
HumanMessage(
content=[
@ -776,21 +784,23 @@ def test_format_messages_as_mixed_content_types() -> None:
]
)
]
result = format_messages_as(messages, format="openai", text_format="block")
result = format_messages(messages, format="langchain-openai", text_format="block")
assert len(result[0].content) == 3
assert isinstance(result[0].content[0], dict)
assert isinstance(result[0].content[1], dict)
assert isinstance(result[0].content[2], dict)
def test_format_messages_as_anthropic_tool_calls() -> None:
def test_format_messages_anthropic_tool_calls() -> None:
message = AIMessage(
"blah",
tool_calls=[
{"type": "tool_call", "name": "foo", "id": "1", "args": {"bar": "baz"}}
],
)
result = format_messages_as(message, format="anthropic", text_format="string")
result = format_messages(
message, format="langchain-anthropic", text_format="string"
)
assert result.content == [
{"type": "text", "text": "blah"},
{"type": "tool_use", "id": "1", "name": "foo", "input": {"bar": "baz"}},
@ -798,8 +808,8 @@ def test_format_messages_as_anthropic_tool_calls() -> None:
assert result.tool_calls == message.tool_calls
def test_format_messages_as_declarative() -> None:
formatter = format_messages_as(format="openai", text_format="block")
def test_format_messages_declarative() -> None:
formatter = format_messages(format="langchain-openai", text_format="block")
base64_image = create_base64_image()
messages = [
HumanMessage(
@ -996,7 +1006,7 @@ def _stream_anthropic(input_: Any) -> Iterator:
@pytest.mark.parametrize("stream", [_stream_oai, _stream_anthropic])
def test_format_messages_openai_string_stream(stream: Callable) -> None:
formatter = format_messages_as(format="openai", text_format="string")
formatter = format_messages(format="langchain-openai", text_format="string")
chain = RunnableLambda(stream) | formatter
tool_call_idx = 1 if stream == _stream_anthropic else 0
@ -1090,7 +1100,7 @@ def test_format_messages_openai_string_stream(stream: Callable) -> None:
@pytest.mark.parametrize("stream", [_stream_oai, _stream_anthropic])
def test_format_messages_openai_block_stream(stream: Callable) -> None:
formatter = format_messages_as(format="openai", text_format="block")
formatter = format_messages(format="langchain-openai", text_format="block")
chain = RunnableLambda(stream) | formatter
tool_call_idx = 1 if stream == _stream_anthropic else 0
@ -1183,7 +1193,7 @@ def test_format_messages_openai_block_stream(stream: Callable) -> None:
@pytest.mark.parametrize("stream", [_stream_oai, _stream_anthropic])
def test_format_messages_anthropic_block_stream(stream: Callable) -> None:
formatter = format_messages_as(format="anthropic", text_format="block")
formatter = format_messages(format="langchain-anthropic", text_format="block")
chain = RunnableLambda(stream) | formatter
expected = [
@ -1285,7 +1295,7 @@ def test_format_messages_anthropic_block_stream(stream: Callable) -> None:
@pytest.mark.parametrize("stream", [_stream_oai, _stream_anthropic])
def test_format_messages_anthropic_string_stream(stream: Callable) -> None:
formatter = format_messages_as(format="anthropic", text_format="string")
formatter = format_messages(format="langchain-anthropic", text_format="string")
chain = RunnableLambda(stream) | formatter
expected = [