fix(core): rename output_version to message_version (#32412)

This commit is contained in:
ccurme 2025-08-05 15:23:58 -03:00 committed by GitHub
parent 757bae0263
commit 56ee00cb1d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 97 additions and 146 deletions

View File

@ -569,7 +569,7 @@ def convert_to_messages_v1(
from langchain_core.prompt_values import PromptValue from langchain_core.prompt_values import PromptValue
if isinstance(messages, PromptValue): if isinstance(messages, PromptValue):
return messages.to_messages(output_version="v1") return messages.to_messages(message_version="v1")
return [_convert_to_message_v1(m) for m in messages] return [_convert_to_message_v1(m) for m in messages]

View File

@ -96,15 +96,15 @@ class PromptValue(Serializable, ABC):
@overload @overload
def to_messages( def to_messages(
self, output_version: Literal["v0"] = "v0" self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ... ) -> list[BaseMessage]: ...
@overload @overload
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ... def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
@abstractmethod @abstractmethod
def to_messages( def to_messages(
self, output_version: Literal["v0", "v1"] = "v0" self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt as a list of Messages.""" """Return prompt as a list of Messages."""
@ -131,17 +131,17 @@ class StringPromptValue(PromptValue):
@overload @overload
def to_messages( def to_messages(
self, output_version: Literal["v0"] = "v0" self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ... ) -> list[BaseMessage]: ...
@overload @overload
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ... def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
def to_messages( def to_messages(
self, output_version: Literal["v0", "v1"] = "v0" self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt as messages.""" """Return prompt as messages."""
if output_version == "v1": if message_version == "v1":
return [HumanMessageV1(content=self.text)] return [HumanMessageV1(content=self.text)]
return [HumanMessage(content=self.text)] return [HumanMessage(content=self.text)]
@ -161,21 +161,21 @@ class ChatPromptValue(PromptValue):
@overload @overload
def to_messages( def to_messages(
self, output_version: Literal["v0"] = "v0" self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ... ) -> list[BaseMessage]: ...
@overload @overload
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ... def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
def to_messages( def to_messages(
self, output_version: Literal["v0", "v1"] = "v0" self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt as a list of messages. """Return prompt as a list of messages.
Args: Args:
output_version: The output version, either "v0" (default) or "v1". message_version: The output version, either "v0" (default) or "v1".
""" """
if output_version == "v1": if message_version == "v1":
return [_convert_to_v1(m) for m in self.messages] return [_convert_to_v1(m) for m in self.messages]
return list(self.messages) return list(self.messages)
@ -213,17 +213,17 @@ class ImagePromptValue(PromptValue):
@overload @overload
def to_messages( def to_messages(
self, output_version: Literal["v0"] = "v0" self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ... ) -> list[BaseMessage]: ...
@overload @overload
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ... def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
def to_messages( def to_messages(
self, output_version: Literal["v0", "v1"] = "v0" self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt (image URL) as messages.""" """Return prompt (image URL) as messages."""
if output_version == "v1": if message_version == "v1":
block: types.ImageContentBlock = { block: types.ImageContentBlock = {
"type": "image", "type": "image",
"url": self.image_url["url"], "url": self.image_url["url"],

View File

@ -2361,7 +2361,7 @@ class Runnable(ABC, Generic[Input, Output]):
name: Optional[str] = None, name: Optional[str] = None,
description: Optional[str] = None, description: Optional[str] = None,
arg_types: Optional[dict[str, type]] = None, arg_types: Optional[dict[str, type]] = None,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ) -> BaseTool:
"""Create a BaseTool from a Runnable. """Create a BaseTool from a Runnable.
@ -2377,7 +2377,7 @@ class Runnable(ABC, Generic[Input, Output]):
name: The name of the tool. Defaults to None. name: The name of the tool. Defaults to None.
description: The description of the tool. Defaults to None. description: The description of the tool. Defaults to None.
arg_types: A dictionary of argument names to types. Defaults to None. arg_types: A dictionary of argument names to types. Defaults to None.
output_version: Version of ToolMessage to return given message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input. :class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@ -2467,7 +2467,7 @@ class Runnable(ABC, Generic[Input, Output]):
name=name, name=name,
description=description, description=description,
arg_types=arg_types, arg_types=arg_types,
output_version=output_version, message_version=message_version,
) )

View File

@ -499,7 +499,7 @@ class ChildTool(BaseTool):
two-tuple corresponding to the (content, artifact) of a ToolMessage. two-tuple corresponding to the (content, artifact) of a ToolMessage.
""" """
output_version: Literal["v0", "v1"] = "v0" message_version: Literal["v0", "v1"] = "v0"
"""Version of ToolMessage to return given """Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input. :class:`~langchain_core.messages.content_blocks.ToolCall` input.
@ -894,7 +894,7 @@ class ChildTool(BaseTool):
tool_call_id, tool_call_id,
self.name, self.name,
status, status,
output_version=self.output_version, message_version=self.message_version,
) )
run_manager.on_tool_end(output, color=color, name=self.name, **kwargs) run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output return output
@ -1015,7 +1015,7 @@ class ChildTool(BaseTool):
tool_call_id, tool_call_id,
self.name, self.name,
status, status,
output_version=self.output_version, message_version=self.message_version,
) )
await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs) await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output return output
@ -1156,7 +1156,7 @@ def _format_output(
name: str, name: str,
status: Literal["success", "error"], status: Literal["success", "error"],
*, *,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> Union[ToolOutputMixin, Any]: ) -> Union[ToolOutputMixin, Any]:
"""Format tool output as a ToolMessage if appropriate. """Format tool output as a ToolMessage if appropriate.
@ -1166,7 +1166,7 @@ def _format_output(
tool_call_id: The ID of the tool call. tool_call_id: The ID of the tool call.
name: The name of the tool. name: The name of the tool.
status: The execution status. status: The execution status.
output_version: The version of the ToolMessage to return. message_version: The version of the ToolMessage to return.
Returns: Returns:
The formatted output, either as a ToolMessage or the original content. The formatted output, either as a ToolMessage or the original content.
@ -1175,7 +1175,7 @@ def _format_output(
return content return content
if not _is_message_content_type(content): if not _is_message_content_type(content):
content = _stringify(content) content = _stringify(content)
if output_version == "v0": if message_version == "v0":
return ToolMessage( return ToolMessage(
content, content,
artifact=artifact, artifact=artifact,

View File

@ -22,7 +22,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content", response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False, parse_docstring: bool = False,
error_on_invalid_docstring: bool = True, error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ... ) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
@ -38,7 +38,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content", response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False, parse_docstring: bool = False,
error_on_invalid_docstring: bool = True, error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ... ) -> BaseTool: ...
@ -53,7 +53,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content", response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False, parse_docstring: bool = False,
error_on_invalid_docstring: bool = True, error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ... ) -> BaseTool: ...
@ -68,7 +68,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content", response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False, parse_docstring: bool = False,
error_on_invalid_docstring: bool = True, error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ... ) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
@ -83,7 +83,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content", response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False, parse_docstring: bool = False,
error_on_invalid_docstring: bool = True, error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> Union[ ) -> Union[
BaseTool, BaseTool,
Callable[[Union[Callable, Runnable]], BaseTool], Callable[[Union[Callable, Runnable]], BaseTool],
@ -123,7 +123,7 @@ def tool(
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
whether to raise ValueError on invalid Google Style docstrings. whether to raise ValueError on invalid Google Style docstrings.
Defaults to True. Defaults to True.
output_version: Version of ToolMessage to return given message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input. :class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@ -284,7 +284,7 @@ def tool(
response_format=response_format, response_format=response_format,
parse_docstring=parse_docstring, parse_docstring=parse_docstring,
error_on_invalid_docstring=error_on_invalid_docstring, error_on_invalid_docstring=error_on_invalid_docstring,
output_version=output_version, message_version=message_version,
) )
# If someone doesn't want a schema applied, we must treat it as # If someone doesn't want a schema applied, we must treat it as
# a simple string->string function # a simple string->string function
@ -301,7 +301,7 @@ def tool(
return_direct=return_direct, return_direct=return_direct,
coroutine=coroutine, coroutine=coroutine,
response_format=response_format, response_format=response_format,
output_version=output_version, message_version=message_version,
) )
return _tool_factory return _tool_factory
@ -395,7 +395,7 @@ def convert_runnable_to_tool(
name: Optional[str] = None, name: Optional[str] = None,
description: Optional[str] = None, description: Optional[str] = None,
arg_types: Optional[dict[str, type]] = None, arg_types: Optional[dict[str, type]] = None,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ) -> BaseTool:
"""Convert a Runnable into a BaseTool. """Convert a Runnable into a BaseTool.
@ -405,7 +405,7 @@ def convert_runnable_to_tool(
name: The name of the tool. Defaults to None. name: The name of the tool. Defaults to None.
description: The description of the tool. Defaults to None. description: The description of the tool. Defaults to None.
arg_types: The types of the arguments. Defaults to None. arg_types: The types of the arguments. Defaults to None.
output_version: Version of ToolMessage to return given message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input. :class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@ -426,7 +426,7 @@ def convert_runnable_to_tool(
func=runnable.invoke, func=runnable.invoke,
coroutine=runnable.ainvoke, coroutine=runnable.ainvoke,
description=description, description=description,
output_version=output_version, message_version=message_version,
) )
async def ainvoke_wrapper( async def ainvoke_wrapper(
@ -454,5 +454,5 @@ def convert_runnable_to_tool(
coroutine=ainvoke_wrapper, coroutine=ainvoke_wrapper,
description=description, description=description,
args_schema=args_schema, args_schema=args_schema,
output_version=output_version, message_version=message_version,
) )

View File

@ -72,7 +72,7 @@ def create_retriever_tool(
document_prompt: Optional[BasePromptTemplate] = None, document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n", document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content", response_format: Literal["content", "content_and_artifact"] = "content",
output_version: Literal["v0", "v1"] = "v1", message_version: Literal["v0", "v1"] = "v1",
) -> Tool: ) -> Tool:
r"""Create a tool to do retrieval of documents. r"""Create a tool to do retrieval of documents.
@ -89,7 +89,7 @@ def create_retriever_tool(
"content_and_artifact" then the output is expected to be a two-tuple "content_and_artifact" then the output is expected to be a two-tuple
corresponding to the (content, artifact) of a ToolMessage (artifact corresponding to the (content, artifact) of a ToolMessage (artifact
being a list of documents in this case). Defaults to "content". being a list of documents in this case). Defaults to "content".
output_version: Version of ToolMessage to return given message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input. :class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@ -120,5 +120,5 @@ def create_retriever_tool(
coroutine=afunc, coroutine=afunc,
args_schema=RetrieverInput, args_schema=RetrieverInput,
response_format=response_format, response_format=response_format,
output_version=output_version, message_version=message_version,
) )

View File

@ -129,7 +129,7 @@ class StructuredTool(BaseTool):
response_format: Literal["content", "content_and_artifact"] = "content", response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False, parse_docstring: bool = False,
error_on_invalid_docstring: bool = False, error_on_invalid_docstring: bool = False,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any, **kwargs: Any,
) -> StructuredTool: ) -> StructuredTool:
"""Create tool from a given function. """Create tool from a given function.
@ -158,7 +158,7 @@ class StructuredTool(BaseTool):
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
whether to raise ValueError on invalid Google Style docstrings. whether to raise ValueError on invalid Google Style docstrings.
Defaults to False. Defaults to False.
output_version: Version of ToolMessage to return given message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input. :class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@ -239,7 +239,7 @@ class StructuredTool(BaseTool):
description=description_, description=description_,
return_direct=return_direct, return_direct=return_direct,
response_format=response_format, response_format=response_format,
output_version=output_version, message_version=message_version,
**kwargs, **kwargs,
) )

View File

@ -362,7 +362,7 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC):
def _convert_input(self, model_input: LanguageModelInput) -> list[MessageV1]: def _convert_input(self, model_input: LanguageModelInput) -> list[MessageV1]:
if isinstance(model_input, PromptValue): if isinstance(model_input, PromptValue):
return model_input.to_messages(output_version="v1") return model_input.to_messages(message_version="v1")
if isinstance(model_input, str): if isinstance(model_input, str):
return [HumanMessageV1(content=model_input)] return [HumanMessageV1(content=model_input)]
if isinstance(model_input, Sequence): if isinstance(model_input, Sequence):

View File

@ -1381,16 +1381,16 @@ def test_tool_annotated_descriptions() -> None:
} }
@pytest.mark.parametrize("output_version", ["v0", "v1"]) @pytest.mark.parametrize("message_version", ["v0", "v1"])
def test_tool_call_input_tool_message(output_version: Literal["v0", "v1"]) -> None: def test_tool_call_input_tool_message(message_version: Literal["v0", "v1"]) -> None:
tool_call = { tool_call = {
"name": "structured_api", "name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}}, "args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123", "id": "123",
"type": "tool_call", "type": "tool_call",
} }
tool = _MockStructuredTool(output_version=output_version) tool = _MockStructuredTool(message_version=message_version)
if output_version == "v0": if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"1 True {'img': 'base64string...'}", "1 True {'img': 'base64string...'}",
tool_call_id="123", tool_call_id="123",
@ -1434,7 +1434,7 @@ def _mock_structured_tool_with_artifact(
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3} return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
@tool("structured_api", response_format="content_and_artifact", output_version="v1") @tool("structured_api", response_format="content_and_artifact", message_version="v1")
def _mock_structured_tool_with_artifact_v1( def _mock_structured_tool_with_artifact_v1(
*, arg1: int, arg2: bool, arg3: Optional[dict] = None *, arg1: int, arg2: bool, arg3: Optional[dict] = None
) -> tuple[str, dict]: ) -> tuple[str, dict]:
@ -1469,7 +1469,7 @@ def test_tool_call_input_tool_message_with_artifact(tool: BaseTool) -> None:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"tool", "tool",
[ [
_MockStructuredToolWithRawOutput(output_version="v1"), _MockStructuredToolWithRawOutput(message_version="v1"),
_mock_structured_tool_with_artifact_v1, _mock_structured_tool_with_artifact_v1,
], ],
) )
@ -1603,7 +1603,7 @@ def injected_tool(x: int, y: Annotated[str, InjectedToolArg]) -> str:
return y return y
@tool("foo", parse_docstring=True, output_version="v1") @tool("foo", parse_docstring=True, message_version="v1")
def injected_tool_v1(x: int, y: Annotated[str, InjectedToolArg]) -> str: def injected_tool_v1(x: int, y: Annotated[str, InjectedToolArg]) -> str:
"""Foo. """Foo.
@ -1651,12 +1651,12 @@ def injected_tool_with_schema(x: int, y: str) -> str:
return y return y
@tool("foo", args_schema=fooSchema, output_version="v1") @tool("foo", args_schema=fooSchema, message_version="v1")
def injected_tool_with_schema_v1(x: int, y: str) -> str: def injected_tool_with_schema_v1(x: int, y: str) -> str:
return y return y
@pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(output_version="v1")]) @pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(message_version="v1")])
def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None: def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
assert _schema(tool_.get_input_schema()) == { assert _schema(tool_.get_input_schema()) == {
"title": "foo", "title": "foo",
@ -1676,7 +1676,7 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
"required": ["x"], "required": ["x"],
} }
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
if tool_.output_version == "v0": if tool_.message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo" "bar", tool_call_id="123", name="foo"
) )
@ -1718,7 +1718,7 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
injected_tool_with_schema, injected_tool_with_schema,
InjectedToolWithSchema(), InjectedToolWithSchema(),
injected_tool_with_schema_v1, injected_tool_with_schema_v1,
InjectedToolWithSchema(output_version="v1"), InjectedToolWithSchema(message_version="v1"),
], ],
) )
def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None: def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
@ -1740,7 +1740,7 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
"required": ["x"], "required": ["x"],
} }
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
if tool_.output_version == "v0": if tool_.message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo" "bar", tool_call_id="123", name="foo"
) )
@ -1776,9 +1776,9 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
} }
@pytest.mark.parametrize("output_version", ["v0", "v1"]) @pytest.mark.parametrize("message_version", ["v0", "v1"])
def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None: def test_tool_injected_arg(message_version: Literal["v0", "v1"]) -> None:
tool_ = injected_tool if output_version == "v0" else injected_tool_v1 tool_ = injected_tool if message_version == "v0" else injected_tool_v1
assert _schema(tool_.get_input_schema()) == { assert _schema(tool_.get_input_schema()) == {
"title": "foo", "title": "foo",
"description": "Foo.", "description": "Foo.",
@ -1797,7 +1797,7 @@ def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
"required": ["x"], "required": ["x"],
} }
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
if output_version == "v0": if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo" "bar", tool_call_id="123", name="foo"
) )
@ -1833,8 +1833,8 @@ def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
} }
@pytest.mark.parametrize("output_version", ["v0", "v1"]) @pytest.mark.parametrize("message_version", ["v0", "v1"])
def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> None: def test_tool_inherited_injected_arg(message_version: Literal["v0", "v1"]) -> None:
class BarSchema(BaseModel): class BarSchema(BaseModel):
"""bar.""" """bar."""
@ -1855,7 +1855,7 @@ def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> Non
def _run(self, x: int, y: str) -> Any: def _run(self, x: int, y: str) -> Any:
return y return y
tool_ = InheritedInjectedArgTool(output_version=output_version) tool_ = InheritedInjectedArgTool(message_version=message_version)
assert tool_.get_input_schema().model_json_schema() == { assert tool_.get_input_schema().model_json_schema() == {
"title": "FooSchema", # Matches the title from the provided schema "title": "FooSchema", # Matches the title from the provided schema
"description": "foo.", "description": "foo.",
@ -1875,7 +1875,7 @@ def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> Non
"required": ["x"], "required": ["x"],
} }
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
if output_version == "v0": if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo" "bar", tool_call_id="123", name="foo"
) )
@ -2253,8 +2253,8 @@ def test_tool_annotations_preserved() -> None:
assert schema.__annotations__ == expected_type_hints assert schema.__annotations__ == expected_type_hints
@pytest.mark.parametrize("output_version", ["v0", "v1"]) @pytest.mark.parametrize("message_version", ["v0", "v1"])
def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None: def test_create_retriever_tool(message_version: Literal["v0", "v1"]) -> None:
class MyRetriever(BaseRetriever): class MyRetriever(BaseRetriever):
def _get_relevant_documents( def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun self, query: str, *, run_manager: CallbackManagerForRetrieverRun
@ -2266,13 +2266,13 @@ def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
retriever, retriever,
"retriever_tool_content", "retriever_tool_content",
"Retriever Tool Content", "Retriever Tool Content",
output_version=output_version, message_version=message_version,
) )
assert isinstance(retriever_tool, BaseTool) assert isinstance(retriever_tool, BaseTool)
assert retriever_tool.name == "retriever_tool_content" assert retriever_tool.name == "retriever_tool_content"
assert retriever_tool.description == "Retriever Tool Content" assert retriever_tool.description == "Retriever Tool Content"
assert retriever_tool.invoke("bar") == "foo bar\n\nbar" assert retriever_tool.invoke("bar") == "foo bar\n\nbar"
if output_version == "v0": if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content" "foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content"
) )
@ -2300,13 +2300,13 @@ def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
"retriever_tool_artifact", "retriever_tool_artifact",
"Retriever Tool Artifact", "Retriever Tool Artifact",
response_format="content_and_artifact", response_format="content_and_artifact",
output_version=output_version, message_version=message_version,
) )
assert isinstance(retriever_tool_artifact, BaseTool) assert isinstance(retriever_tool_artifact, BaseTool)
assert retriever_tool_artifact.name == "retriever_tool_artifact" assert retriever_tool_artifact.name == "retriever_tool_artifact"
assert retriever_tool_artifact.description == "Retriever Tool Artifact" assert retriever_tool_artifact.description == "Retriever Tool Artifact"
assert retriever_tool_artifact.invoke("bar") == "foo bar\n\nbar" assert retriever_tool_artifact.invoke("bar") == "foo bar\n\nbar"
if output_version == "v0": if message_version == "v0":
expected = ToolMessage( expected = ToolMessage(
"foo bar\n\nbar", "foo bar\n\nbar",
artifact=[Document(page_content="foo bar"), Document(page_content="bar")], artifact=[Document(page_content="foo bar"), Document(page_content="bar")],
@ -2666,7 +2666,7 @@ def test_empty_string_tool_call_id() -> None:
def test_empty_string_tool_call_id_v1() -> None: def test_empty_string_tool_call_id_v1() -> None:
@tool(output_version="v1") @tool(message_version="v1")
def foo(x: int) -> str: def foo(x: int) -> str:
"""Foo.""" """Foo."""
return "hi" return "hi"

View File

@ -40,7 +40,7 @@ def init_chat_model(
model_provider: Optional[str] = None, model_provider: Optional[str] = None,
configurable_fields: Literal[None] = None, configurable_fields: Literal[None] = None,
config_prefix: Optional[str] = None, config_prefix: Optional[str] = None,
output_version: Literal["v0"] = "v0", message_version: Literal["v0"] = "v0",
**kwargs: Any, **kwargs: Any,
) -> BaseChatModel: ... ) -> BaseChatModel: ...
@ -52,7 +52,7 @@ def init_chat_model(
model_provider: Optional[str] = None, model_provider: Optional[str] = None,
configurable_fields: Literal[None] = None, configurable_fields: Literal[None] = None,
config_prefix: Optional[str] = None, config_prefix: Optional[str] = None,
output_version: Literal["v1"] = "v1", message_version: Literal["v1"] = "v1",
**kwargs: Any, **kwargs: Any,
) -> BaseChatModelV1: ... ) -> BaseChatModelV1: ...
@ -64,7 +64,7 @@ def init_chat_model(
model_provider: Optional[str] = None, model_provider: Optional[str] = None,
configurable_fields: Literal[None] = None, configurable_fields: Literal[None] = None,
config_prefix: Optional[str] = None, config_prefix: Optional[str] = None,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any, **kwargs: Any,
) -> _ConfigurableModel: ... ) -> _ConfigurableModel: ...
@ -76,7 +76,7 @@ def init_chat_model(
model_provider: Optional[str] = None, model_provider: Optional[str] = None,
configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ..., configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ...,
config_prefix: Optional[str] = None, config_prefix: Optional[str] = None,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any, **kwargs: Any,
) -> _ConfigurableModel: ... ) -> _ConfigurableModel: ...
@ -92,7 +92,7 @@ def init_chat_model(
Union[Literal["any"], list[str], tuple[str, ...]] Union[Literal["any"], list[str], tuple[str, ...]]
] = None, ] = None,
config_prefix: Optional[str] = None, config_prefix: Optional[str] = None,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any, **kwargs: Any,
) -> Union[BaseChatModel, BaseChatModelV1, _ConfigurableModel]: ) -> Union[BaseChatModel, BaseChatModelV1, _ConfigurableModel]:
"""Initialize a ChatModel in a single line using the model's name and provider. """Initialize a ChatModel in a single line using the model's name and provider.
@ -146,7 +146,7 @@ def init_chat_model(
- ``grok...`` -> ``xai`` - ``grok...`` -> ``xai``
- ``sonar...`` -> ``perplexity`` - ``sonar...`` -> ``perplexity``
output_version: The version of the BaseChatModel to return. Either ``"v0"`` for message_version: The version of the BaseChatModel to return. Either ``"v0"`` for
a v0 :class:`~langchain_core.language_models.chat_models.BaseChatModel` or a v0 :class:`~langchain_core.language_models.chat_models.BaseChatModel` or
``"v1"`` for a v1 :class:`~langchain_core.v1.chat_models.BaseChatModel`. The ``"v1"`` for a v1 :class:`~langchain_core.v1.chat_models.BaseChatModel`. The
output version determines what type of message objects the model will output version determines what type of message objects the model will
@ -347,7 +347,7 @@ def init_chat_model(
return _init_chat_model_helper( return _init_chat_model_helper(
cast("str", model), cast("str", model),
model_provider=model_provider, model_provider=model_provider,
output_version=output_version, message_version=message_version,
**kwargs, **kwargs,
) )
if model: if model:
@ -365,13 +365,13 @@ def _init_chat_model_helper(
model: str, model: str,
*, *,
model_provider: Optional[str] = None, model_provider: Optional[str] = None,
output_version: Literal["v0", "v1"] = "v0", message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any, **kwargs: Any,
) -> Union[BaseChatModel, BaseChatModelV1]: ) -> Union[BaseChatModel, BaseChatModelV1]:
model, model_provider = _parse_model(model, model_provider) model, model_provider = _parse_model(model, model_provider)
if model_provider == "openai": if model_provider == "openai":
_check_pkg("langchain_openai") _check_pkg("langchain_openai")
if output_version == "v0": if message_version == "v0":
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
return ChatOpenAI(model=model, **kwargs) return ChatOpenAI(model=model, **kwargs)

View File

@ -1,7 +1,7 @@
""" """
This module converts between AIMessage output formats, which are governed by the This module converts between AIMessage output formats, which are governed by the
``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"``, ``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"`` and
``"responses/v1"``, and ``"v1"``. ``"responses/v1"``.
``"v0"`` corresponds to the format as of ChatOpenAI v0.3. For the Responses API, it ``"v0"`` corresponds to the format as of ChatOpenAI v0.3. For the Responses API, it
stores reasoning and tool outputs in AIMessage.additional_kwargs: stores reasoning and tool outputs in AIMessage.additional_kwargs:
@ -60,8 +60,6 @@ representing these items in the content sequence:
There are other, small improvements as well-- e.g., we store message IDs on text There are other, small improvements as well-- e.g., we store message IDs on text
content blocks, rather than on the AIMessage.id, which now stores the response ID. content blocks, rather than on the AIMessage.id, which now stores the response ID.
``"v1"`` represents LangChain's cross-provider standard format.
For backwards compatibility, this module provides functions to convert between the For backwards compatibility, this module provides functions to convert between the
formats. The functions are used internally by ChatOpenAI. formats. The functions are used internally by ChatOpenAI.
""" # noqa: E501 """ # noqa: E501

View File

@ -594,25 +594,6 @@ class BaseChatOpenAI(BaseChatModel):
.. versionadded:: 0.3.9 .. versionadded:: 0.3.9
""" """
output_version: str = "v1"
"""Version of AIMessage output format to use.
This field is used to roll-out new output formats for chat model AIMessages
in a backwards-compatible way.
Supported values:
- ``"v0"``: AIMessage format as of langchain-openai 0.3.x.
- ``"responses/v1"``: Formats Responses API output
items into AIMessage content blocks.
- ``"v1"``: v1 of LangChain cross-provider standard.
``output_version="v1"`` is recommended.
.. versionadded:: 0.3.25
"""
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="before") @model_validator(mode="before")
@ -1026,8 +1007,6 @@ class BaseChatOpenAI(BaseChatModel):
def _use_responses_api(self, payload: dict) -> bool: def _use_responses_api(self, payload: dict) -> bool:
if isinstance(self.use_responses_api, bool): if isinstance(self.use_responses_api, bool):
return self.use_responses_api return self.use_responses_api
elif self.output_version == "responses/v1":
return True
elif self.include is not None: elif self.include is not None:
return True return True
elif self.reasoning is not None: elif self.reasoning is not None:
@ -1866,7 +1845,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python .. code-block:: python
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
llm = ChatOpenAI( llm = ChatOpenAI(
model="gpt-4o", model="gpt-4o",
@ -1886,7 +1865,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python .. code-block:: python
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
import openai import openai
ChatOpenAI(..., frequency_penalty=0.2).invoke(...) ChatOpenAI(..., frequency_penalty=0.2).invoke(...)
@ -2100,23 +2079,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
`docs <https://python.langchain.com/docs/integrations/chat/openai/>`_ for more `docs <https://python.langchain.com/docs/integrations/chat/openai/>`_ for more
detail. detail.
.. note::
``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
AIMessage format when using the Responses API. Setting
.. code-block:: python .. code-block:: python
llm = ChatOpenAI(model="...", output_version="responses/v1") from langchain_openai.v1 import ChatOpenAI
will format output from reasoning summaries, built-in tool invocations, and llm = ChatOpenAI(model="gpt-4.1-mini")
other response items into the message's ``content`` field, rather than
``additional_kwargs``. We recommend this format for new applications.
.. code-block:: python
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4.1-mini", output_version="responses/v1")
tool = {"type": "web_search_preview"} tool = {"type": "web_search_preview"}
llm_with_tools = llm.bind_tools([tool]) llm_with_tools = llm.bind_tools([tool])
@ -2157,7 +2124,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python .. code-block:: python
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True) llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
response = llm.invoke("Hi, I'm Bob.") response = llm.invoke("Hi, I'm Bob.")
@ -2195,30 +2162,16 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
OpenAI's Responses API supports `reasoning models <https://platform.openai.com/docs/guides/reasoning?api-mode=responses>`_ OpenAI's Responses API supports `reasoning models <https://platform.openai.com/docs/guides/reasoning?api-mode=responses>`_
that expose a summary of internal reasoning processes. that expose a summary of internal reasoning processes.
.. note::
``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
AIMessage format when using the Responses API. Setting
.. code-block:: python .. code-block:: python
llm = ChatOpenAI(model="...", output_version="responses/v1") from langchain_openai.v1 import ChatOpenAI
will format output from reasoning summaries, built-in tool invocations, and
other response items into the message's ``content`` field, rather than
``additional_kwargs``. We recommend this format for new applications.
.. code-block:: python
from langchain_openai import ChatOpenAI
reasoning = { reasoning = {
"effort": "medium", # 'low', 'medium', or 'high' "effort": "medium", # 'low', 'medium', or 'high'
"summary": "auto", # 'detailed', 'auto', or None "summary": "auto", # 'detailed', 'auto', or None
} }
llm = ChatOpenAI( llm = ChatOpenAI(model="o4-mini", reasoning=reasoning)
model="o4-mini", reasoning=reasoning, output_version="responses/v1"
)
response = llm.invoke("What is 3^3?") response = llm.invoke("What is 3^3?")
# Response text # Response text
@ -2436,7 +2389,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python .. code-block:: python
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
llm = ChatOpenAI(model="o4-mini", service_tier="flex") llm = ChatOpenAI(model="o4-mini", service_tier="flex")
@ -2688,7 +2641,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
from typing import Optional from typing import Optional
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -2719,7 +2672,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
from typing import Optional from typing import Optional
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -2750,7 +2703,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python .. code-block:: python
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel from pydantic import BaseModel
@ -2783,7 +2736,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
# from typing_extensions, not from typing. # from typing_extensions, not from typing.
from typing_extensions import Annotated, TypedDict from typing_extensions import Annotated, TypedDict
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
class AnswerWithJustification(TypedDict): class AnswerWithJustification(TypedDict):
@ -2810,7 +2763,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python .. code-block:: python
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
oai_schema = { oai_schema = {
'name': 'AnswerWithJustification', 'name': 'AnswerWithJustification',
@ -2840,7 +2793,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: .. code-block::
from langchain_openai import ChatOpenAI from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel from pydantic import BaseModel
class AnswerWithJustification(BaseModel): class AnswerWithJustification(BaseModel):