mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-14 07:07:34 +00:00
fix(core): rename output_version to message_version (#32412)
This commit is contained in:
parent
757bae0263
commit
56ee00cb1d
@ -569,7 +569,7 @@ def convert_to_messages_v1(
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
|
||||
if isinstance(messages, PromptValue):
|
||||
return messages.to_messages(output_version="v1")
|
||||
return messages.to_messages(message_version="v1")
|
||||
return [_convert_to_message_v1(m) for m in messages]
|
||||
|
||||
|
||||
|
@ -96,15 +96,15 @@ class PromptValue(Serializable, ABC):
|
||||
|
||||
@overload
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0"] = "v0"
|
||||
self, message_version: Literal["v0"] = "v0"
|
||||
) -> list[BaseMessage]: ...
|
||||
|
||||
@overload
|
||||
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
|
||||
@abstractmethod
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0", "v1"] = "v0"
|
||||
self, message_version: Literal["v0", "v1"] = "v0"
|
||||
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
|
||||
"""Return prompt as a list of Messages."""
|
||||
|
||||
@ -131,17 +131,17 @@ class StringPromptValue(PromptValue):
|
||||
|
||||
@overload
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0"] = "v0"
|
||||
self, message_version: Literal["v0"] = "v0"
|
||||
) -> list[BaseMessage]: ...
|
||||
|
||||
@overload
|
||||
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0", "v1"] = "v0"
|
||||
self, message_version: Literal["v0", "v1"] = "v0"
|
||||
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
|
||||
"""Return prompt as messages."""
|
||||
if output_version == "v1":
|
||||
if message_version == "v1":
|
||||
return [HumanMessageV1(content=self.text)]
|
||||
return [HumanMessage(content=self.text)]
|
||||
|
||||
@ -161,21 +161,21 @@ class ChatPromptValue(PromptValue):
|
||||
|
||||
@overload
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0"] = "v0"
|
||||
self, message_version: Literal["v0"] = "v0"
|
||||
) -> list[BaseMessage]: ...
|
||||
|
||||
@overload
|
||||
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0", "v1"] = "v0"
|
||||
self, message_version: Literal["v0", "v1"] = "v0"
|
||||
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
|
||||
"""Return prompt as a list of messages.
|
||||
|
||||
Args:
|
||||
output_version: The output version, either "v0" (default) or "v1".
|
||||
message_version: The output version, either "v0" (default) or "v1".
|
||||
"""
|
||||
if output_version == "v1":
|
||||
if message_version == "v1":
|
||||
return [_convert_to_v1(m) for m in self.messages]
|
||||
return list(self.messages)
|
||||
|
||||
@ -213,17 +213,17 @@ class ImagePromptValue(PromptValue):
|
||||
|
||||
@overload
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0"] = "v0"
|
||||
self, message_version: Literal["v0"] = "v0"
|
||||
) -> list[BaseMessage]: ...
|
||||
|
||||
@overload
|
||||
def to_messages(self, output_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
|
||||
|
||||
def to_messages(
|
||||
self, output_version: Literal["v0", "v1"] = "v0"
|
||||
self, message_version: Literal["v0", "v1"] = "v0"
|
||||
) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
|
||||
"""Return prompt (image URL) as messages."""
|
||||
if output_version == "v1":
|
||||
if message_version == "v1":
|
||||
block: types.ImageContentBlock = {
|
||||
"type": "image",
|
||||
"url": self.image_url["url"],
|
||||
|
@ -2361,7 +2361,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
arg_types: Optional[dict[str, type]] = None,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> BaseTool:
|
||||
"""Create a BaseTool from a Runnable.
|
||||
|
||||
@ -2377,7 +2377,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
name: The name of the tool. Defaults to None.
|
||||
description: The description of the tool. Defaults to None.
|
||||
arg_types: A dictionary of argument names to types. Defaults to None.
|
||||
output_version: Version of ToolMessage to return given
|
||||
message_version: Version of ToolMessage to return given
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
@ -2467,7 +2467,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
name=name,
|
||||
description=description,
|
||||
arg_types=arg_types,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
|
||||
|
||||
|
@ -499,7 +499,7 @@ class ChildTool(BaseTool):
|
||||
two-tuple corresponding to the (content, artifact) of a ToolMessage.
|
||||
"""
|
||||
|
||||
output_version: Literal["v0", "v1"] = "v0"
|
||||
message_version: Literal["v0", "v1"] = "v0"
|
||||
"""Version of ToolMessage to return given
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
@ -894,7 +894,7 @@ class ChildTool(BaseTool):
|
||||
tool_call_id,
|
||||
self.name,
|
||||
status,
|
||||
output_version=self.output_version,
|
||||
message_version=self.message_version,
|
||||
)
|
||||
run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
|
||||
return output
|
||||
@ -1015,7 +1015,7 @@ class ChildTool(BaseTool):
|
||||
tool_call_id,
|
||||
self.name,
|
||||
status,
|
||||
output_version=self.output_version,
|
||||
message_version=self.message_version,
|
||||
)
|
||||
await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
|
||||
return output
|
||||
@ -1156,7 +1156,7 @@ def _format_output(
|
||||
name: str,
|
||||
status: Literal["success", "error"],
|
||||
*,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> Union[ToolOutputMixin, Any]:
|
||||
"""Format tool output as a ToolMessage if appropriate.
|
||||
|
||||
@ -1166,7 +1166,7 @@ def _format_output(
|
||||
tool_call_id: The ID of the tool call.
|
||||
name: The name of the tool.
|
||||
status: The execution status.
|
||||
output_version: The version of the ToolMessage to return.
|
||||
message_version: The version of the ToolMessage to return.
|
||||
|
||||
Returns:
|
||||
The formatted output, either as a ToolMessage or the original content.
|
||||
@ -1175,7 +1175,7 @@ def _format_output(
|
||||
return content
|
||||
if not _is_message_content_type(content):
|
||||
content = _stringify(content)
|
||||
if output_version == "v0":
|
||||
if message_version == "v0":
|
||||
return ToolMessage(
|
||||
content,
|
||||
artifact=artifact,
|
||||
|
@ -22,7 +22,7 @@ def tool(
|
||||
response_format: Literal["content", "content_and_artifact"] = "content",
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = True,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
|
||||
|
||||
|
||||
@ -38,7 +38,7 @@ def tool(
|
||||
response_format: Literal["content", "content_and_artifact"] = "content",
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = True,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> BaseTool: ...
|
||||
|
||||
|
||||
@ -53,7 +53,7 @@ def tool(
|
||||
response_format: Literal["content", "content_and_artifact"] = "content",
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = True,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> BaseTool: ...
|
||||
|
||||
|
||||
@ -68,7 +68,7 @@ def tool(
|
||||
response_format: Literal["content", "content_and_artifact"] = "content",
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = True,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
|
||||
|
||||
|
||||
@ -83,7 +83,7 @@ def tool(
|
||||
response_format: Literal["content", "content_and_artifact"] = "content",
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = True,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> Union[
|
||||
BaseTool,
|
||||
Callable[[Union[Callable, Runnable]], BaseTool],
|
||||
@ -123,7 +123,7 @@ def tool(
|
||||
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
|
||||
whether to raise ValueError on invalid Google Style docstrings.
|
||||
Defaults to True.
|
||||
output_version: Version of ToolMessage to return given
|
||||
message_version: Version of ToolMessage to return given
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
@ -284,7 +284,7 @@ def tool(
|
||||
response_format=response_format,
|
||||
parse_docstring=parse_docstring,
|
||||
error_on_invalid_docstring=error_on_invalid_docstring,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
# If someone doesn't want a schema applied, we must treat it as
|
||||
# a simple string->string function
|
||||
@ -301,7 +301,7 @@ def tool(
|
||||
return_direct=return_direct,
|
||||
coroutine=coroutine,
|
||||
response_format=response_format,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
|
||||
return _tool_factory
|
||||
@ -395,7 +395,7 @@ def convert_runnable_to_tool(
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
arg_types: Optional[dict[str, type]] = None,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
) -> BaseTool:
|
||||
"""Convert a Runnable into a BaseTool.
|
||||
|
||||
@ -405,7 +405,7 @@ def convert_runnable_to_tool(
|
||||
name: The name of the tool. Defaults to None.
|
||||
description: The description of the tool. Defaults to None.
|
||||
arg_types: The types of the arguments. Defaults to None.
|
||||
output_version: Version of ToolMessage to return given
|
||||
message_version: Version of ToolMessage to return given
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
@ -426,7 +426,7 @@ def convert_runnable_to_tool(
|
||||
func=runnable.invoke,
|
||||
coroutine=runnable.ainvoke,
|
||||
description=description,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
|
||||
async def ainvoke_wrapper(
|
||||
@ -454,5 +454,5 @@ def convert_runnable_to_tool(
|
||||
coroutine=ainvoke_wrapper,
|
||||
description=description,
|
||||
args_schema=args_schema,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
|
@ -72,7 +72,7 @@ def create_retriever_tool(
|
||||
document_prompt: Optional[BasePromptTemplate] = None,
|
||||
document_separator: str = "\n\n",
|
||||
response_format: Literal["content", "content_and_artifact"] = "content",
|
||||
output_version: Literal["v0", "v1"] = "v1",
|
||||
message_version: Literal["v0", "v1"] = "v1",
|
||||
) -> Tool:
|
||||
r"""Create a tool to do retrieval of documents.
|
||||
|
||||
@ -89,7 +89,7 @@ def create_retriever_tool(
|
||||
"content_and_artifact" then the output is expected to be a two-tuple
|
||||
corresponding to the (content, artifact) of a ToolMessage (artifact
|
||||
being a list of documents in this case). Defaults to "content".
|
||||
output_version: Version of ToolMessage to return given
|
||||
message_version: Version of ToolMessage to return given
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
@ -120,5 +120,5 @@ def create_retriever_tool(
|
||||
coroutine=afunc,
|
||||
args_schema=RetrieverInput,
|
||||
response_format=response_format,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
|
@ -129,7 +129,7 @@ class StructuredTool(BaseTool):
|
||||
response_format: Literal["content", "content_and_artifact"] = "content",
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = False,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
**kwargs: Any,
|
||||
) -> StructuredTool:
|
||||
"""Create tool from a given function.
|
||||
@ -158,7 +158,7 @@ class StructuredTool(BaseTool):
|
||||
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
|
||||
whether to raise ValueError on invalid Google Style docstrings.
|
||||
Defaults to False.
|
||||
output_version: Version of ToolMessage to return given
|
||||
message_version: Version of ToolMessage to return given
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
@ -239,7 +239,7 @@ class StructuredTool(BaseTool):
|
||||
description=description_,
|
||||
return_direct=return_direct,
|
||||
response_format=response_format,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
@ -362,7 +362,7 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC):
|
||||
|
||||
def _convert_input(self, model_input: LanguageModelInput) -> list[MessageV1]:
|
||||
if isinstance(model_input, PromptValue):
|
||||
return model_input.to_messages(output_version="v1")
|
||||
return model_input.to_messages(message_version="v1")
|
||||
if isinstance(model_input, str):
|
||||
return [HumanMessageV1(content=model_input)]
|
||||
if isinstance(model_input, Sequence):
|
||||
|
@ -1381,16 +1381,16 @@ def test_tool_annotated_descriptions() -> None:
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("output_version", ["v0", "v1"])
|
||||
def test_tool_call_input_tool_message(output_version: Literal["v0", "v1"]) -> None:
|
||||
@pytest.mark.parametrize("message_version", ["v0", "v1"])
|
||||
def test_tool_call_input_tool_message(message_version: Literal["v0", "v1"]) -> None:
|
||||
tool_call = {
|
||||
"name": "structured_api",
|
||||
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
|
||||
"id": "123",
|
||||
"type": "tool_call",
|
||||
}
|
||||
tool = _MockStructuredTool(output_version=output_version)
|
||||
if output_version == "v0":
|
||||
tool = _MockStructuredTool(message_version=message_version)
|
||||
if message_version == "v0":
|
||||
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
|
||||
"1 True {'img': 'base64string...'}",
|
||||
tool_call_id="123",
|
||||
@ -1434,7 +1434,7 @@ def _mock_structured_tool_with_artifact(
|
||||
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
|
||||
|
||||
|
||||
@tool("structured_api", response_format="content_and_artifact", output_version="v1")
|
||||
@tool("structured_api", response_format="content_and_artifact", message_version="v1")
|
||||
def _mock_structured_tool_with_artifact_v1(
|
||||
*, arg1: int, arg2: bool, arg3: Optional[dict] = None
|
||||
) -> tuple[str, dict]:
|
||||
@ -1469,7 +1469,7 @@ def test_tool_call_input_tool_message_with_artifact(tool: BaseTool) -> None:
|
||||
@pytest.mark.parametrize(
|
||||
"tool",
|
||||
[
|
||||
_MockStructuredToolWithRawOutput(output_version="v1"),
|
||||
_MockStructuredToolWithRawOutput(message_version="v1"),
|
||||
_mock_structured_tool_with_artifact_v1,
|
||||
],
|
||||
)
|
||||
@ -1603,7 +1603,7 @@ def injected_tool(x: int, y: Annotated[str, InjectedToolArg]) -> str:
|
||||
return y
|
||||
|
||||
|
||||
@tool("foo", parse_docstring=True, output_version="v1")
|
||||
@tool("foo", parse_docstring=True, message_version="v1")
|
||||
def injected_tool_v1(x: int, y: Annotated[str, InjectedToolArg]) -> str:
|
||||
"""Foo.
|
||||
|
||||
@ -1651,12 +1651,12 @@ def injected_tool_with_schema(x: int, y: str) -> str:
|
||||
return y
|
||||
|
||||
|
||||
@tool("foo", args_schema=fooSchema, output_version="v1")
|
||||
@tool("foo", args_schema=fooSchema, message_version="v1")
|
||||
def injected_tool_with_schema_v1(x: int, y: str) -> str:
|
||||
return y
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(output_version="v1")])
|
||||
@pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(message_version="v1")])
|
||||
def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
|
||||
assert _schema(tool_.get_input_schema()) == {
|
||||
"title": "foo",
|
||||
@ -1676,7 +1676,7 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
|
||||
"required": ["x"],
|
||||
}
|
||||
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
|
||||
if tool_.output_version == "v0":
|
||||
if tool_.message_version == "v0":
|
||||
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
|
||||
"bar", tool_call_id="123", name="foo"
|
||||
)
|
||||
@ -1718,7 +1718,7 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
|
||||
injected_tool_with_schema,
|
||||
InjectedToolWithSchema(),
|
||||
injected_tool_with_schema_v1,
|
||||
InjectedToolWithSchema(output_version="v1"),
|
||||
InjectedToolWithSchema(message_version="v1"),
|
||||
],
|
||||
)
|
||||
def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
|
||||
@ -1740,7 +1740,7 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
|
||||
"required": ["x"],
|
||||
}
|
||||
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
|
||||
if tool_.output_version == "v0":
|
||||
if tool_.message_version == "v0":
|
||||
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
|
||||
"bar", tool_call_id="123", name="foo"
|
||||
)
|
||||
@ -1776,9 +1776,9 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("output_version", ["v0", "v1"])
|
||||
def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
|
||||
tool_ = injected_tool if output_version == "v0" else injected_tool_v1
|
||||
@pytest.mark.parametrize("message_version", ["v0", "v1"])
|
||||
def test_tool_injected_arg(message_version: Literal["v0", "v1"]) -> None:
|
||||
tool_ = injected_tool if message_version == "v0" else injected_tool_v1
|
||||
assert _schema(tool_.get_input_schema()) == {
|
||||
"title": "foo",
|
||||
"description": "Foo.",
|
||||
@ -1797,7 +1797,7 @@ def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
|
||||
"required": ["x"],
|
||||
}
|
||||
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
|
||||
if output_version == "v0":
|
||||
if message_version == "v0":
|
||||
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
|
||||
"bar", tool_call_id="123", name="foo"
|
||||
)
|
||||
@ -1833,8 +1833,8 @@ def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("output_version", ["v0", "v1"])
|
||||
def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> None:
|
||||
@pytest.mark.parametrize("message_version", ["v0", "v1"])
|
||||
def test_tool_inherited_injected_arg(message_version: Literal["v0", "v1"]) -> None:
|
||||
class BarSchema(BaseModel):
|
||||
"""bar."""
|
||||
|
||||
@ -1855,7 +1855,7 @@ def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> Non
|
||||
def _run(self, x: int, y: str) -> Any:
|
||||
return y
|
||||
|
||||
tool_ = InheritedInjectedArgTool(output_version=output_version)
|
||||
tool_ = InheritedInjectedArgTool(message_version=message_version)
|
||||
assert tool_.get_input_schema().model_json_schema() == {
|
||||
"title": "FooSchema", # Matches the title from the provided schema
|
||||
"description": "foo.",
|
||||
@ -1875,7 +1875,7 @@ def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> Non
|
||||
"required": ["x"],
|
||||
}
|
||||
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
|
||||
if output_version == "v0":
|
||||
if message_version == "v0":
|
||||
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
|
||||
"bar", tool_call_id="123", name="foo"
|
||||
)
|
||||
@ -2253,8 +2253,8 @@ def test_tool_annotations_preserved() -> None:
|
||||
assert schema.__annotations__ == expected_type_hints
|
||||
|
||||
|
||||
@pytest.mark.parametrize("output_version", ["v0", "v1"])
|
||||
def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
|
||||
@pytest.mark.parametrize("message_version", ["v0", "v1"])
|
||||
def test_create_retriever_tool(message_version: Literal["v0", "v1"]) -> None:
|
||||
class MyRetriever(BaseRetriever):
|
||||
def _get_relevant_documents(
|
||||
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
|
||||
@ -2266,13 +2266,13 @@ def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
|
||||
retriever,
|
||||
"retriever_tool_content",
|
||||
"Retriever Tool Content",
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
assert isinstance(retriever_tool, BaseTool)
|
||||
assert retriever_tool.name == "retriever_tool_content"
|
||||
assert retriever_tool.description == "Retriever Tool Content"
|
||||
assert retriever_tool.invoke("bar") == "foo bar\n\nbar"
|
||||
if output_version == "v0":
|
||||
if message_version == "v0":
|
||||
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
|
||||
"foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content"
|
||||
)
|
||||
@ -2300,13 +2300,13 @@ def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
|
||||
"retriever_tool_artifact",
|
||||
"Retriever Tool Artifact",
|
||||
response_format="content_and_artifact",
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
)
|
||||
assert isinstance(retriever_tool_artifact, BaseTool)
|
||||
assert retriever_tool_artifact.name == "retriever_tool_artifact"
|
||||
assert retriever_tool_artifact.description == "Retriever Tool Artifact"
|
||||
assert retriever_tool_artifact.invoke("bar") == "foo bar\n\nbar"
|
||||
if output_version == "v0":
|
||||
if message_version == "v0":
|
||||
expected = ToolMessage(
|
||||
"foo bar\n\nbar",
|
||||
artifact=[Document(page_content="foo bar"), Document(page_content="bar")],
|
||||
@ -2666,7 +2666,7 @@ def test_empty_string_tool_call_id() -> None:
|
||||
|
||||
|
||||
def test_empty_string_tool_call_id_v1() -> None:
|
||||
@tool(output_version="v1")
|
||||
@tool(message_version="v1")
|
||||
def foo(x: int) -> str:
|
||||
"""Foo."""
|
||||
return "hi"
|
||||
|
@ -40,7 +40,7 @@ def init_chat_model(
|
||||
model_provider: Optional[str] = None,
|
||||
configurable_fields: Literal[None] = None,
|
||||
config_prefix: Optional[str] = None,
|
||||
output_version: Literal["v0"] = "v0",
|
||||
message_version: Literal["v0"] = "v0",
|
||||
**kwargs: Any,
|
||||
) -> BaseChatModel: ...
|
||||
|
||||
@ -52,7 +52,7 @@ def init_chat_model(
|
||||
model_provider: Optional[str] = None,
|
||||
configurable_fields: Literal[None] = None,
|
||||
config_prefix: Optional[str] = None,
|
||||
output_version: Literal["v1"] = "v1",
|
||||
message_version: Literal["v1"] = "v1",
|
||||
**kwargs: Any,
|
||||
) -> BaseChatModelV1: ...
|
||||
|
||||
@ -64,7 +64,7 @@ def init_chat_model(
|
||||
model_provider: Optional[str] = None,
|
||||
configurable_fields: Literal[None] = None,
|
||||
config_prefix: Optional[str] = None,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
**kwargs: Any,
|
||||
) -> _ConfigurableModel: ...
|
||||
|
||||
@ -76,7 +76,7 @@ def init_chat_model(
|
||||
model_provider: Optional[str] = None,
|
||||
configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ...,
|
||||
config_prefix: Optional[str] = None,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
**kwargs: Any,
|
||||
) -> _ConfigurableModel: ...
|
||||
|
||||
@ -92,7 +92,7 @@ def init_chat_model(
|
||||
Union[Literal["any"], list[str], tuple[str, ...]]
|
||||
] = None,
|
||||
config_prefix: Optional[str] = None,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
**kwargs: Any,
|
||||
) -> Union[BaseChatModel, BaseChatModelV1, _ConfigurableModel]:
|
||||
"""Initialize a ChatModel in a single line using the model's name and provider.
|
||||
@ -146,7 +146,7 @@ def init_chat_model(
|
||||
- ``grok...`` -> ``xai``
|
||||
- ``sonar...`` -> ``perplexity``
|
||||
|
||||
output_version: The version of the BaseChatModel to return. Either ``"v0"`` for
|
||||
message_version: The version of the BaseChatModel to return. Either ``"v0"`` for
|
||||
a v0 :class:`~langchain_core.language_models.chat_models.BaseChatModel` or
|
||||
``"v1"`` for a v1 :class:`~langchain_core.v1.chat_models.BaseChatModel`. The
|
||||
output version determines what type of message objects the model will
|
||||
@ -347,7 +347,7 @@ def init_chat_model(
|
||||
return _init_chat_model_helper(
|
||||
cast("str", model),
|
||||
model_provider=model_provider,
|
||||
output_version=output_version,
|
||||
message_version=message_version,
|
||||
**kwargs,
|
||||
)
|
||||
if model:
|
||||
@ -365,13 +365,13 @@ def _init_chat_model_helper(
|
||||
model: str,
|
||||
*,
|
||||
model_provider: Optional[str] = None,
|
||||
output_version: Literal["v0", "v1"] = "v0",
|
||||
message_version: Literal["v0", "v1"] = "v0",
|
||||
**kwargs: Any,
|
||||
) -> Union[BaseChatModel, BaseChatModelV1]:
|
||||
model, model_provider = _parse_model(model, model_provider)
|
||||
if model_provider == "openai":
|
||||
_check_pkg("langchain_openai")
|
||||
if output_version == "v0":
|
||||
if message_version == "v0":
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
return ChatOpenAI(model=model, **kwargs)
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""
|
||||
This module converts between AIMessage output formats, which are governed by the
|
||||
``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"``,
|
||||
``"responses/v1"``, and ``"v1"``.
|
||||
``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"`` and
|
||||
``"responses/v1"``.
|
||||
|
||||
``"v0"`` corresponds to the format as of ChatOpenAI v0.3. For the Responses API, it
|
||||
stores reasoning and tool outputs in AIMessage.additional_kwargs:
|
||||
@ -60,8 +60,6 @@ representing these items in the content sequence:
|
||||
There are other, small improvements as well-- e.g., we store message IDs on text
|
||||
content blocks, rather than on the AIMessage.id, which now stores the response ID.
|
||||
|
||||
``"v1"`` represents LangChain's cross-provider standard format.
|
||||
|
||||
For backwards compatibility, this module provides functions to convert between the
|
||||
formats. The functions are used internally by ChatOpenAI.
|
||||
""" # noqa: E501
|
||||
|
@ -594,25 +594,6 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
.. versionadded:: 0.3.9
|
||||
"""
|
||||
|
||||
output_version: str = "v1"
|
||||
"""Version of AIMessage output format to use.
|
||||
|
||||
This field is used to roll-out new output formats for chat model AIMessages
|
||||
in a backwards-compatible way.
|
||||
|
||||
Supported values:
|
||||
|
||||
- ``"v0"``: AIMessage format as of langchain-openai 0.3.x.
|
||||
- ``"responses/v1"``: Formats Responses API output
|
||||
items into AIMessage content blocks.
|
||||
- ``"v1"``: v1 of LangChain cross-provider standard.
|
||||
|
||||
``output_version="v1"`` is recommended.
|
||||
|
||||
.. versionadded:: 0.3.25
|
||||
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(populate_by_name=True)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@ -1026,8 +1007,6 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
def _use_responses_api(self, payload: dict) -> bool:
|
||||
if isinstance(self.use_responses_api, bool):
|
||||
return self.use_responses_api
|
||||
elif self.output_version == "responses/v1":
|
||||
return True
|
||||
elif self.include is not None:
|
||||
return True
|
||||
elif self.reasoning is not None:
|
||||
@ -1866,7 +1845,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-4o",
|
||||
@ -1886,7 +1865,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
import openai
|
||||
|
||||
ChatOpenAI(..., frequency_penalty=0.2).invoke(...)
|
||||
@ -2100,23 +2079,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
`docs <https://python.langchain.com/docs/integrations/chat/openai/>`_ for more
|
||||
detail.
|
||||
|
||||
.. note::
|
||||
``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
|
||||
AIMessage format when using the Responses API. Setting
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
llm = ChatOpenAI(model="...", output_version="responses/v1")
|
||||
|
||||
will format output from reasoning summaries, built-in tool invocations, and
|
||||
other response items into the message's ``content`` field, rather than
|
||||
``additional_kwargs``. We recommend this format for new applications.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4.1-mini", output_version="responses/v1")
|
||||
llm = ChatOpenAI(model="gpt-4.1-mini")
|
||||
|
||||
tool = {"type": "web_search_preview"}
|
||||
llm_with_tools = llm.bind_tools([tool])
|
||||
@ -2157,7 +2124,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
|
||||
response = llm.invoke("Hi, I'm Bob.")
|
||||
@ -2195,30 +2162,16 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
OpenAI's Responses API supports `reasoning models <https://platform.openai.com/docs/guides/reasoning?api-mode=responses>`_
|
||||
that expose a summary of internal reasoning processes.
|
||||
|
||||
.. note::
|
||||
``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
|
||||
AIMessage format when using the Responses API. Setting
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
llm = ChatOpenAI(model="...", output_version="responses/v1")
|
||||
|
||||
will format output from reasoning summaries, built-in tool invocations, and
|
||||
other response items into the message's ``content`` field, rather than
|
||||
``additional_kwargs``. We recommend this format for new applications.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
|
||||
reasoning = {
|
||||
"effort": "medium", # 'low', 'medium', or 'high'
|
||||
"summary": "auto", # 'detailed', 'auto', or None
|
||||
}
|
||||
|
||||
llm = ChatOpenAI(
|
||||
model="o4-mini", reasoning=reasoning, output_version="responses/v1"
|
||||
)
|
||||
llm = ChatOpenAI(model="o4-mini", reasoning=reasoning)
|
||||
response = llm.invoke("What is 3^3?")
|
||||
|
||||
# Response text
|
||||
@ -2436,7 +2389,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="o4-mini", service_tier="flex")
|
||||
|
||||
@ -2688,7 +2641,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
@ -2719,7 +2672,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
@ -2750,7 +2703,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@ -2783,7 +2736,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
# from typing_extensions, not from typing.
|
||||
from typing_extensions import Annotated, TypedDict
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
|
||||
|
||||
class AnswerWithJustification(TypedDict):
|
||||
@ -2810,7 +2763,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
|
||||
oai_schema = {
|
||||
'name': 'AnswerWithJustification',
|
||||
@ -2840,7 +2793,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
.. code-block::
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.v1 import ChatOpenAI
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
|
Loading…
Reference in New Issue
Block a user