diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py
index 287963d33c2..02d4679fe76 100644
--- a/libs/core/langchain_core/messages/utils.py
+++ b/libs/core/langchain_core/messages/utils.py
@@ -415,6 +415,10 @@ def convert_from_v1_message(message: MessageV1) -> BaseMessage:
return ToolMessage(
content=content,
id=message.id,
+ tool_call_id=message.tool_call_id,
+ artifact=message.artifact,
+ name=message.name,
+ status=message.status,
)
message = f"Unsupported message type: {type(message)}"
raise NotImplementedError(message)
@@ -569,7 +573,7 @@ def convert_to_messages_v1(
from langchain_core.prompt_values import PromptValue
if isinstance(messages, PromptValue):
- return messages.to_messages(output_version="v1")
+ return messages.to_messages(message_version="v1")
return [_convert_to_message_v1(m) for m in messages]
diff --git a/libs/core/langchain_core/prompt_values.py b/libs/core/langchain_core/prompt_values.py
index fec4de6f741..f0069f47fa4 100644
--- a/libs/core/langchain_core/prompt_values.py
+++ b/libs/core/langchain_core/prompt_values.py
@@ -96,15 +96,15 @@ class PromptValue(Serializable, ABC):
@overload
def to_messages(
- self, output_version: Literal["v0"] = "v0"
+ self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ...
@overload
- def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
+ def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
@abstractmethod
def to_messages(
- self, output_version: Literal["v0", "v1"] = "v0"
+ self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt as a list of Messages."""
@@ -131,17 +131,17 @@ class StringPromptValue(PromptValue):
@overload
def to_messages(
- self, output_version: Literal["v0"] = "v0"
+ self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ...
@overload
- def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
+ def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
def to_messages(
- self, output_version: Literal["v0", "v1"] = "v0"
+ self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt as messages."""
- if output_version == "v1":
+ if message_version == "v1":
return [HumanMessageV1(content=self.text)]
return [HumanMessage(content=self.text)]
@@ -161,21 +161,21 @@ class ChatPromptValue(PromptValue):
@overload
def to_messages(
- self, output_version: Literal["v0"] = "v0"
+ self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ...
@overload
- def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
+ def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
def to_messages(
- self, output_version: Literal["v0", "v1"] = "v0"
+ self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt as a list of messages.
Args:
- output_version: The output version, either "v0" (default) or "v1".
+ message_version: The output version, either "v0" (default) or "v1".
"""
- if output_version == "v1":
+ if message_version == "v1":
return [_convert_to_v1(m) for m in self.messages]
return list(self.messages)
@@ -213,17 +213,17 @@ class ImagePromptValue(PromptValue):
@overload
def to_messages(
- self, output_version: Literal["v0"] = "v0"
+ self, message_version: Literal["v0"] = "v0"
) -> list[BaseMessage]: ...
@overload
- def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
+ def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
def to_messages(
- self, output_version: Literal["v0", "v1"] = "v0"
+ self, message_version: Literal["v0", "v1"] = "v0"
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
"""Return prompt (image URL) as messages."""
- if output_version == "v1":
+ if message_version == "v1":
block: types.ImageContentBlock = {
"type": "image",
"url": self.image_url["url"],
diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py
index fb8ce6fcb45..ba57594314d 100644
--- a/libs/core/langchain_core/runnables/base.py
+++ b/libs/core/langchain_core/runnables/base.py
@@ -2361,7 +2361,7 @@ class Runnable(ABC, Generic[Input, Output]):
name: Optional[str] = None,
description: Optional[str] = None,
arg_types: Optional[dict[str, type]] = None,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool:
"""Create a BaseTool from a Runnable.
@@ -2377,7 +2377,7 @@ class Runnable(ABC, Generic[Input, Output]):
name: The name of the tool. Defaults to None.
description: The description of the tool. Defaults to None.
arg_types: A dictionary of argument names to types. Defaults to None.
- output_version: Version of ToolMessage to return given
+ message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@@ -2467,7 +2467,7 @@ class Runnable(ABC, Generic[Input, Output]):
name=name,
description=description,
arg_types=arg_types,
- output_version=output_version,
+ message_version=message_version,
)
diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py
index b7e1fe7354d..dfc5c6e228b 100644
--- a/libs/core/langchain_core/tools/base.py
+++ b/libs/core/langchain_core/tools/base.py
@@ -499,7 +499,7 @@ class ChildTool(BaseTool):
two-tuple corresponding to the (content, artifact) of a ToolMessage.
"""
- output_version: Literal["v0", "v1"] = "v0"
+ message_version: Literal["v0", "v1"] = "v0"
"""Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
@@ -894,7 +894,7 @@ class ChildTool(BaseTool):
tool_call_id,
self.name,
status,
- output_version=self.output_version,
+ message_version=self.message_version,
)
run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output
@@ -1015,7 +1015,7 @@ class ChildTool(BaseTool):
tool_call_id,
self.name,
status,
- output_version=self.output_version,
+ message_version=self.message_version,
)
await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output
@@ -1156,7 +1156,7 @@ def _format_output(
name: str,
status: Literal["success", "error"],
*,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> Union[ToolOutputMixin, Any]:
"""Format tool output as a ToolMessage if appropriate.
@@ -1166,7 +1166,7 @@ def _format_output(
tool_call_id: The ID of the tool call.
name: The name of the tool.
status: The execution status.
- output_version: The version of the ToolMessage to return.
+ message_version: The version of the ToolMessage to return.
Returns:
The formatted output, either as a ToolMessage or the original content.
@@ -1175,7 +1175,7 @@ def _format_output(
return content
if not _is_message_content_type(content):
content = _stringify(content)
- if output_version == "v0":
+ if message_version == "v0":
return ToolMessage(
content,
artifact=artifact,
diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py
index e28efa0bbfe..7da794a39e7 100644
--- a/libs/core/langchain_core/tools/convert.py
+++ b/libs/core/langchain_core/tools/convert.py
@@ -22,7 +22,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
@@ -38,7 +38,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ...
@@ -53,7 +53,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ...
@@ -68,7 +68,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
@@ -83,7 +83,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> Union[
BaseTool,
Callable[[Union[Callable, Runnable]], BaseTool],
@@ -123,7 +123,7 @@ def tool(
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
whether to raise ValueError on invalid Google Style docstrings.
Defaults to True.
- output_version: Version of ToolMessage to return given
+ message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@@ -284,7 +284,7 @@ def tool(
response_format=response_format,
parse_docstring=parse_docstring,
error_on_invalid_docstring=error_on_invalid_docstring,
- output_version=output_version,
+ message_version=message_version,
)
# If someone doesn't want a schema applied, we must treat it as
# a simple string->string function
@@ -301,7 +301,7 @@ def tool(
return_direct=return_direct,
coroutine=coroutine,
response_format=response_format,
- output_version=output_version,
+ message_version=message_version,
)
return _tool_factory
@@ -395,7 +395,7 @@ def convert_runnable_to_tool(
name: Optional[str] = None,
description: Optional[str] = None,
arg_types: Optional[dict[str, type]] = None,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
) -> BaseTool:
"""Convert a Runnable into a BaseTool.
@@ -405,7 +405,7 @@ def convert_runnable_to_tool(
name: The name of the tool. Defaults to None.
description: The description of the tool. Defaults to None.
arg_types: The types of the arguments. Defaults to None.
- output_version: Version of ToolMessage to return given
+ message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@@ -426,7 +426,7 @@ def convert_runnable_to_tool(
func=runnable.invoke,
coroutine=runnable.ainvoke,
description=description,
- output_version=output_version,
+ message_version=message_version,
)
async def ainvoke_wrapper(
@@ -454,5 +454,5 @@ def convert_runnable_to_tool(
coroutine=ainvoke_wrapper,
description=description,
args_schema=args_schema,
- output_version=output_version,
+ message_version=message_version,
)
diff --git a/libs/core/langchain_core/tools/retriever.py b/libs/core/langchain_core/tools/retriever.py
index 2ef7de31995..b2afc6b5d82 100644
--- a/libs/core/langchain_core/tools/retriever.py
+++ b/libs/core/langchain_core/tools/retriever.py
@@ -72,7 +72,7 @@ def create_retriever_tool(
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content",
- output_version: Literal["v0", "v1"] = "v1",
+ message_version: Literal["v0", "v1"] = "v1",
) -> Tool:
r"""Create a tool to do retrieval of documents.
@@ -89,7 +89,7 @@ def create_retriever_tool(
"content_and_artifact" then the output is expected to be a two-tuple
corresponding to the (content, artifact) of a ToolMessage (artifact
being a list of documents in this case). Defaults to "content".
- output_version: Version of ToolMessage to return given
+ message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@@ -120,5 +120,5 @@ def create_retriever_tool(
coroutine=afunc,
args_schema=RetrieverInput,
response_format=response_format,
- output_version=output_version,
+ message_version=message_version,
)
diff --git a/libs/core/langchain_core/tools/structured.py b/libs/core/langchain_core/tools/structured.py
index e6b0aa8ba32..f106e5e06f7 100644
--- a/libs/core/langchain_core/tools/structured.py
+++ b/libs/core/langchain_core/tools/structured.py
@@ -129,7 +129,7 @@ class StructuredTool(BaseTool):
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = False,
- output_version: Literal["v0", "v1"] = "v0",
+ message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any,
) -> StructuredTool:
"""Create tool from a given function.
@@ -158,7 +158,7 @@ class StructuredTool(BaseTool):
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
whether to raise ValueError on invalid Google Style docstrings.
Defaults to False.
- output_version: Version of ToolMessage to return given
+ message_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
@@ -239,7 +239,7 @@ class StructuredTool(BaseTool):
description=description_,
return_direct=return_direct,
response_format=response_format,
- output_version=output_version,
+ message_version=message_version,
**kwargs,
)
diff --git a/libs/core/langchain_core/v1/chat_models.py b/libs/core/langchain_core/v1/chat_models.py
index 6c4e01101e1..afa07b526f6 100644
--- a/libs/core/langchain_core/v1/chat_models.py
+++ b/libs/core/langchain_core/v1/chat_models.py
@@ -362,7 +362,7 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC):
def _convert_input(self, model_input: LanguageModelInput) -> list[MessageV1]:
if isinstance(model_input, PromptValue):
- return model_input.to_messages(output_version="v1")
+ return model_input.to_messages(message_version="v1")
if isinstance(model_input, str):
return [HumanMessageV1(content=model_input)]
if isinstance(model_input, Sequence):
diff --git a/libs/core/tests/unit_tests/test_tools.py b/libs/core/tests/unit_tests/test_tools.py
index 339a4db826f..e7df9ab0ca7 100644
--- a/libs/core/tests/unit_tests/test_tools.py
+++ b/libs/core/tests/unit_tests/test_tools.py
@@ -1381,16 +1381,16 @@ def test_tool_annotated_descriptions() -> None:
}
-@pytest.mark.parametrize("output_version", ["v0", "v1"])
-def test_tool_call_input_tool_message(output_version: Literal["v0", "v1"]) -> None:
+@pytest.mark.parametrize("message_version", ["v0", "v1"])
+def test_tool_call_input_tool_message(message_version: Literal["v0", "v1"]) -> None:
tool_call = {
"name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123",
"type": "tool_call",
}
- tool = _MockStructuredTool(output_version=output_version)
- if output_version == "v0":
+ tool = _MockStructuredTool(message_version=message_version)
+ if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"1 True {'img': 'base64string...'}",
tool_call_id="123",
@@ -1434,7 +1434,7 @@ def _mock_structured_tool_with_artifact(
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
-@tool("structured_api", response_format="content_and_artifact", output_version="v1")
+@tool("structured_api", response_format="content_and_artifact", message_version="v1")
def _mock_structured_tool_with_artifact_v1(
*, arg1: int, arg2: bool, arg3: Optional[dict] = None
) -> tuple[str, dict]:
@@ -1469,7 +1469,7 @@ def test_tool_call_input_tool_message_with_artifact(tool: BaseTool) -> None:
@pytest.mark.parametrize(
"tool",
[
- _MockStructuredToolWithRawOutput(output_version="v1"),
+ _MockStructuredToolWithRawOutput(message_version="v1"),
_mock_structured_tool_with_artifact_v1,
],
)
@@ -1603,7 +1603,7 @@ def injected_tool(x: int, y: Annotated[str, InjectedToolArg]) -> str:
return y
-@tool("foo", parse_docstring=True, output_version="v1")
+@tool("foo", parse_docstring=True, message_version="v1")
def injected_tool_v1(x: int, y: Annotated[str, InjectedToolArg]) -> str:
"""Foo.
@@ -1651,12 +1651,12 @@ def injected_tool_with_schema(x: int, y: str) -> str:
return y
-@tool("foo", args_schema=fooSchema, output_version="v1")
+@tool("foo", args_schema=fooSchema, message_version="v1")
def injected_tool_with_schema_v1(x: int, y: str) -> str:
return y
-@pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(output_version="v1")])
+@pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(message_version="v1")])
def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
assert _schema(tool_.get_input_schema()) == {
"title": "foo",
@@ -1676,7 +1676,7 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
- if tool_.output_version == "v0":
+ if tool_.message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
@@ -1718,7 +1718,7 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
injected_tool_with_schema,
InjectedToolWithSchema(),
injected_tool_with_schema_v1,
- InjectedToolWithSchema(output_version="v1"),
+ InjectedToolWithSchema(message_version="v1"),
],
)
def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
@@ -1740,7 +1740,7 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
- if tool_.output_version == "v0":
+ if tool_.message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
@@ -1776,9 +1776,9 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
}
-@pytest.mark.parametrize("output_version", ["v0", "v1"])
-def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
- tool_ = injected_tool if output_version == "v0" else injected_tool_v1
+@pytest.mark.parametrize("message_version", ["v0", "v1"])
+def test_tool_injected_arg(message_version: Literal["v0", "v1"]) -> None:
+ tool_ = injected_tool if message_version == "v0" else injected_tool_v1
assert _schema(tool_.get_input_schema()) == {
"title": "foo",
"description": "Foo.",
@@ -1797,7 +1797,7 @@ def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
- if output_version == "v0":
+ if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
@@ -1833,8 +1833,8 @@ def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
}
-@pytest.mark.parametrize("output_version", ["v0", "v1"])
-def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> None:
+@pytest.mark.parametrize("message_version", ["v0", "v1"])
+def test_tool_inherited_injected_arg(message_version: Literal["v0", "v1"]) -> None:
class BarSchema(BaseModel):
"""bar."""
@@ -1855,7 +1855,7 @@ def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> Non
def _run(self, x: int, y: str) -> Any:
return y
- tool_ = InheritedInjectedArgTool(output_version=output_version)
+ tool_ = InheritedInjectedArgTool(message_version=message_version)
assert tool_.get_input_schema().model_json_schema() == {
"title": "FooSchema", # Matches the title from the provided schema
"description": "foo.",
@@ -1875,7 +1875,7 @@ def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> Non
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
- if output_version == "v0":
+ if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
@@ -2253,8 +2253,8 @@ def test_tool_annotations_preserved() -> None:
assert schema.__annotations__ == expected_type_hints
-@pytest.mark.parametrize("output_version", ["v0", "v1"])
-def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
+@pytest.mark.parametrize("message_version", ["v0", "v1"])
+def test_create_retriever_tool(message_version: Literal["v0", "v1"]) -> None:
class MyRetriever(BaseRetriever):
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
@@ -2266,13 +2266,13 @@ def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
retriever,
"retriever_tool_content",
"Retriever Tool Content",
- output_version=output_version,
+ message_version=message_version,
)
assert isinstance(retriever_tool, BaseTool)
assert retriever_tool.name == "retriever_tool_content"
assert retriever_tool.description == "Retriever Tool Content"
assert retriever_tool.invoke("bar") == "foo bar\n\nbar"
- if output_version == "v0":
+ if message_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content"
)
@@ -2300,13 +2300,13 @@ def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
"retriever_tool_artifact",
"Retriever Tool Artifact",
response_format="content_and_artifact",
- output_version=output_version,
+ message_version=message_version,
)
assert isinstance(retriever_tool_artifact, BaseTool)
assert retriever_tool_artifact.name == "retriever_tool_artifact"
assert retriever_tool_artifact.description == "Retriever Tool Artifact"
assert retriever_tool_artifact.invoke("bar") == "foo bar\n\nbar"
- if output_version == "v0":
+ if message_version == "v0":
expected = ToolMessage(
"foo bar\n\nbar",
artifact=[Document(page_content="foo bar"), Document(page_content="bar")],
@@ -2666,7 +2666,7 @@ def test_empty_string_tool_call_id() -> None:
def test_empty_string_tool_call_id_v1() -> None:
- @tool(output_version="v1")
+ @tool(message_version="v1")
def foo(x: int) -> str:
"""Foo."""
return "hi"
diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py
index 2f8b46bcb59..f5a8a588b80 100644
--- a/libs/langchain/langchain/chat_models/base.py
+++ b/libs/langchain/langchain/chat_models/base.py
@@ -19,6 +19,7 @@ from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
from langchain_core.runnables.schema import StreamEvent
from langchain_core.tools import BaseTool
from langchain_core.tracers import RunLog, RunLogPatch
+from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1
from pydantic import BaseModel
from typing_extensions import TypeAlias, override
@@ -39,10 +40,23 @@ def init_chat_model(
model_provider: Optional[str] = None,
configurable_fields: Literal[None] = None,
config_prefix: Optional[str] = None,
+ message_version: Literal["v0"] = "v0",
**kwargs: Any,
) -> BaseChatModel: ...
+@overload
+def init_chat_model(
+ model: str,
+ *,
+ model_provider: Optional[str] = None,
+ configurable_fields: Literal[None] = None,
+ config_prefix: Optional[str] = None,
+ message_version: Literal["v1"] = "v1",
+ **kwargs: Any,
+) -> BaseChatModelV1: ...
+
+
@overload
def init_chat_model(
model: Literal[None] = None,
@@ -50,6 +64,7 @@ def init_chat_model(
model_provider: Optional[str] = None,
configurable_fields: Literal[None] = None,
config_prefix: Optional[str] = None,
+ message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any,
) -> _ConfigurableModel: ...
@@ -61,6 +76,7 @@ def init_chat_model(
model_provider: Optional[str] = None,
configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ...,
config_prefix: Optional[str] = None,
+ message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any,
) -> _ConfigurableModel: ...
@@ -76,8 +92,9 @@ def init_chat_model(
Union[Literal["any"], list[str], tuple[str, ...]]
] = None,
config_prefix: Optional[str] = None,
+ message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any,
-) -> Union[BaseChatModel, _ConfigurableModel]:
+) -> Union[BaseChatModel, BaseChatModelV1, _ConfigurableModel]:
"""Initialize a ChatModel in a single line using the model's name and provider.
.. note::
@@ -128,6 +145,20 @@ def init_chat_model(
- ``deepseek...`` -> ``deepseek``
- ``grok...`` -> ``xai``
- ``sonar...`` -> ``perplexity``
+
+ message_version: The version of the BaseChatModel to return. Either ``"v0"`` for
+ a v0 :class:`~langchain_core.language_models.chat_models.BaseChatModel` or
+ ``"v1"`` for a v1 :class:`~langchain_core.v1.chat_models.BaseChatModel`. The
+ output version determines what type of message objects the model will
+ generate.
+
+ .. note::
+ Currently supported for these providers:
+
+ - ``openai``
+
+ .. versionadded:: 0.4.0
+
configurable_fields: Which model parameters are configurable:
- None: No configurable fields.
@@ -316,6 +347,7 @@ def init_chat_model(
return _init_chat_model_helper(
cast("str", model),
model_provider=model_provider,
+ message_version=message_version,
**kwargs,
)
if model:
@@ -333,14 +365,27 @@ def _init_chat_model_helper(
model: str,
*,
model_provider: Optional[str] = None,
+ message_version: Literal["v0", "v1"] = "v0",
**kwargs: Any,
-) -> BaseChatModel:
+) -> Union[BaseChatModel, BaseChatModelV1]:
model, model_provider = _parse_model(model, model_provider)
+ if message_version != "v0" and model_provider not in ("openai",):
+ warnings.warn(
+ f"Model provider {model_provider} does not support "
+ f"message_version={message_version}. Defaulting to v0.",
+ stacklevel=2,
+ )
if model_provider == "openai":
_check_pkg("langchain_openai")
- from langchain_openai import ChatOpenAI
+ if message_version == "v0":
+ from langchain_openai import ChatOpenAI
+
+ return ChatOpenAI(model=model, **kwargs)
+ # v1
+ from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1
+
+ return ChatOpenAIV1(model=model, **kwargs)
- return ChatOpenAI(model=model, **kwargs)
if model_provider == "anthropic":
_check_pkg("langchain_anthropic")
from langchain_anthropic import ChatAnthropic
diff --git a/libs/langchain/tests/unit_tests/chat_models/test_base.py b/libs/langchain/tests/unit_tests/chat_models/test_base.py
index 8cd5e0631b8..e59ba54ff68 100644
--- a/libs/langchain/tests/unit_tests/chat_models/test_base.py
+++ b/libs/langchain/tests/unit_tests/chat_models/test_base.py
@@ -6,6 +6,7 @@ import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig, RunnableSequence
+from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1
from pydantic import SecretStr
from langchain.chat_models.base import __all__, init_chat_model
@@ -51,6 +52,22 @@ def test_init_chat_model(model_name: str, model_provider: Optional[str]) -> None
assert llm1.dict() == llm2.dict()
+@pytest.mark.requires("langchain_openai")
+def test_message_version() -> None:
+ model = init_chat_model("openai:gpt-4.1", api_key="foo")
+ assert isinstance(model, BaseChatModel)
+
+ model_v1 = init_chat_model("openai:gpt-4.1", api_key="foo", message_version="v1")
+ assert isinstance(model_v1, BaseChatModelV1)
+
+ # Test we emit a warning for unsupported providers
+ with (
+ pytest.warns(match="Model provider bar does not support message_version=v1"),
+ pytest.raises(ValueError, match="Unsupported model_provider='bar'."),
+ ):
+ init_chat_model("foo", model_provider="bar", message_version="v1")
+
+
def test_init_missing_dep() -> None:
with pytest.raises(ImportError):
init_chat_model("mixtral-8x7b-32768", model_provider="groq")
diff --git a/libs/partners/openai/langchain_openai/__init__.py b/libs/partners/openai/langchain_openai/__init__.py
index e8bcdb920ac..a1756f0526d 100644
--- a/libs/partners/openai/langchain_openai/__init__.py
+++ b/libs/partners/openai/langchain_openai/__init__.py
@@ -1,11 +1,10 @@
-from langchain_openai.chat_models import AzureChatOpenAI, ChatOpenAI, ChatOpenAIV1
+from langchain_openai.chat_models import AzureChatOpenAI, ChatOpenAI
from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
from langchain_openai.llms import AzureOpenAI, OpenAI
__all__ = [
"OpenAI",
"ChatOpenAI",
- "ChatOpenAIV1",
"OpenAIEmbeddings",
"AzureOpenAI",
"AzureChatOpenAI",
diff --git a/libs/partners/openai/langchain_openai/chat_models/__init__.py b/libs/partners/openai/langchain_openai/chat_models/__init__.py
index 8e2d4b53de7..574128d2704 100644
--- a/libs/partners/openai/langchain_openai/chat_models/__init__.py
+++ b/libs/partners/openai/langchain_openai/chat_models/__init__.py
@@ -1,5 +1,4 @@
from langchain_openai.chat_models.azure import AzureChatOpenAI
from langchain_openai.chat_models.base import ChatOpenAI
-from langchain_openai.chat_models.base_v1 import ChatOpenAI as ChatOpenAIV1
-__all__ = ["ChatOpenAI", "AzureChatOpenAI", "ChatOpenAIV1"]
+__all__ = ["ChatOpenAI", "AzureChatOpenAI"]
diff --git a/libs/partners/openai/langchain_openai/chat_models/_compat.py b/libs/partners/openai/langchain_openai/chat_models/_compat.py
index cfb36ccec0b..00f3b365c9b 100644
--- a/libs/partners/openai/langchain_openai/chat_models/_compat.py
+++ b/libs/partners/openai/langchain_openai/chat_models/_compat.py
@@ -1,7 +1,7 @@
"""
This module converts between AIMessage output formats, which are governed by the
-``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"``,
-``"responses/v1"``, and ``"v1"``.
+``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"`` and
+``"responses/v1"``.
``"v0"`` corresponds to the format as of ChatOpenAI v0.3. For the Responses API, it
stores reasoning and tool outputs in AIMessage.additional_kwargs:
@@ -60,8 +60,6 @@ representing these items in the content sequence:
There are other, small improvements as well-- e.g., we store message IDs on text
content blocks, rather than on the AIMessage.id, which now stores the response ID.
-``"v1"`` represents LangChain's cross-provider standard format.
-
For backwards compatibility, this module provides functions to convert between the
formats. The functions are used internally by ChatOpenAI.
""" # noqa: E501
diff --git a/libs/partners/openai/langchain_openai/v1/__init__.py b/libs/partners/openai/langchain_openai/v1/__init__.py
new file mode 100644
index 00000000000..2d6bafdd6d0
--- /dev/null
+++ b/libs/partners/openai/langchain_openai/v1/__init__.py
@@ -0,0 +1,3 @@
+from langchain_openai.v1.chat_models import ChatOpenAI
+
+__all__ = ["ChatOpenAI"]
diff --git a/libs/partners/openai/langchain_openai/v1/chat_models/__init__.py b/libs/partners/openai/langchain_openai/v1/chat_models/__init__.py
new file mode 100644
index 00000000000..0e63b2a441b
--- /dev/null
+++ b/libs/partners/openai/langchain_openai/v1/chat_models/__init__.py
@@ -0,0 +1,3 @@
+from langchain_openai.v1.chat_models.base import ChatOpenAI
+
+__all__ = ["ChatOpenAI"]
diff --git a/libs/partners/openai/langchain_openai/chat_models/base_v1.py b/libs/partners/openai/langchain_openai/v1/chat_models/base.py
similarity index 98%
rename from libs/partners/openai/langchain_openai/chat_models/base_v1.py
rename to libs/partners/openai/langchain_openai/v1/chat_models/base.py
index d7dca28d3e4..0015989dbbc 100644
--- a/libs/partners/openai/langchain_openai/chat_models/base_v1.py
+++ b/libs/partners/openai/langchain_openai/v1/chat_models/base.py
@@ -594,25 +594,6 @@ class BaseChatOpenAI(BaseChatModel):
.. versionadded:: 0.3.9
"""
- output_version: str = "v1"
- """Version of AIMessage output format to use.
-
- This field is used to roll-out new output formats for chat model AIMessages
- in a backwards-compatible way.
-
- Supported values:
-
- - ``"v0"``: AIMessage format as of langchain-openai 0.3.x.
- - ``"responses/v1"``: Formats Responses API output
- items into AIMessage content blocks.
- - ``"v1"``: v1 of LangChain cross-provider standard.
-
- ``output_version="v1"`` is recommended.
-
- .. versionadded:: 0.3.25
-
- """
-
model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="before")
@@ -1026,8 +1007,6 @@ class BaseChatOpenAI(BaseChatModel):
def _use_responses_api(self, payload: dict) -> bool:
if isinstance(self.use_responses_api, bool):
return self.use_responses_api
- elif self.output_version == "responses/v1":
- return True
elif self.include is not None:
return True
elif self.reasoning is not None:
@@ -1866,7 +1845,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
llm = ChatOpenAI(
model="gpt-4o",
@@ -1886,7 +1865,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
import openai
ChatOpenAI(..., frequency_penalty=0.2).invoke(...)
@@ -2100,23 +2079,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
`docs `_ for more
detail.
- .. note::
- ``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
- AIMessage format when using the Responses API. Setting
-
- .. code-block:: python
-
- llm = ChatOpenAI(model="...", output_version="responses/v1")
-
- will format output from reasoning summaries, built-in tool invocations, and
- other response items into the message's ``content`` field, rather than
- ``additional_kwargs``. We recommend this format for new applications.
-
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
- llm = ChatOpenAI(model="gpt-4.1-mini", output_version="responses/v1")
+ llm = ChatOpenAI(model="gpt-4.1-mini")
tool = {"type": "web_search_preview"}
llm_with_tools = llm.bind_tools([tool])
@@ -2157,7 +2124,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
response = llm.invoke("Hi, I'm Bob.")
@@ -2195,30 +2162,16 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
OpenAI's Responses API supports `reasoning models `_
that expose a summary of internal reasoning processes.
- .. note::
- ``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
- AIMessage format when using the Responses API. Setting
-
- .. code-block:: python
-
- llm = ChatOpenAI(model="...", output_version="responses/v1")
-
- will format output from reasoning summaries, built-in tool invocations, and
- other response items into the message's ``content`` field, rather than
- ``additional_kwargs``. We recommend this format for new applications.
-
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
reasoning = {
"effort": "medium", # 'low', 'medium', or 'high'
"summary": "auto", # 'detailed', 'auto', or None
}
- llm = ChatOpenAI(
- model="o4-mini", reasoning=reasoning, output_version="responses/v1"
- )
+ llm = ChatOpenAI(model="o4-mini", reasoning=reasoning)
response = llm.invoke("What is 3^3?")
# Response text
@@ -2436,7 +2389,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
llm = ChatOpenAI(model="o4-mini", service_tier="flex")
@@ -2688,7 +2641,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
from typing import Optional
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel, Field
@@ -2719,7 +2672,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
from typing import Optional
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel, Field
@@ -2750,7 +2703,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel
@@ -2783,7 +2736,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
# from typing_extensions, not from typing.
from typing_extensions import Annotated, TypedDict
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
class AnswerWithJustification(TypedDict):
@@ -2810,7 +2763,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
oai_schema = {
'name': 'AnswerWithJustification',
@@ -2840,7 +2793,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block::
- from langchain_openai import ChatOpenAI
+ from langchain_openai.v1 import ChatOpenAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py
index 7ba8b4a8aee..fbe2a5e6aa0 100644
--- a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py
+++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py
@@ -20,7 +20,8 @@ from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from pydantic import BaseModel
from typing_extensions import TypedDict
-from langchain_openai import ChatOpenAI, ChatOpenAIV1
+from langchain_openai import ChatOpenAI
+from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1
MODEL_NAME = "gpt-4o-mini"
diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py b/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py
index c2df88b0214..948e278b0fd 100644
--- a/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py
+++ b/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py
@@ -1,7 +1,10 @@
from langchain_openai.chat_models import __all__
+from langchain_openai.v1.chat_models import __all__ as v1_all
-EXPECTED_ALL = ["ChatOpenAI", "ChatOpenAIV1", "AzureChatOpenAI"]
+EXPECTED_ALL = ["ChatOpenAI", "AzureChatOpenAI"]
+EXPECTED_ALL_V1 = ["ChatOpenAI"]
def test_all_imports() -> None:
assert sorted(EXPECTED_ALL) == sorted(__all__)
+ assert sorted(EXPECTED_ALL_V1) == sorted(v1_all)
diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py b/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py
index b837fa94d70..6b5318d9b1c 100644
--- a/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py
+++ b/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py
@@ -37,7 +37,8 @@ from openai.types.responses.response_usage import (
from openai.types.shared.reasoning import Reasoning
from openai.types.shared.response_format_text import ResponseFormatText
-from langchain_openai import ChatOpenAI, ChatOpenAIV1
+from langchain_openai import ChatOpenAI
+from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1
from tests.unit_tests.chat_models.test_base import MockSyncContextManager
responses_stream = [
diff --git a/libs/partners/openai/tests/unit_tests/test_imports.py b/libs/partners/openai/tests/unit_tests/test_imports.py
index 6284bbc9ddb..d64cc6bf5f7 100644
--- a/libs/partners/openai/tests/unit_tests/test_imports.py
+++ b/libs/partners/openai/tests/unit_tests/test_imports.py
@@ -1,15 +1,18 @@
from langchain_openai import __all__
+from langchain_openai.v1 import __all__ as v1_all
EXPECTED_ALL = [
"OpenAI",
"ChatOpenAI",
- "ChatOpenAIV1",
"OpenAIEmbeddings",
"AzureOpenAI",
"AzureChatOpenAI",
"AzureOpenAIEmbeddings",
]
+EXPECTED_ALL_V1 = ["ChatOpenAI"]
+
def test_all_imports() -> None:
assert sorted(EXPECTED_ALL) == sorted(__all__)
+ assert sorted(EXPECTED_ALL_V1) == sorted(v1_all)