mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-06 03:27:55 +00:00
carry over changes
This commit is contained in:
parent
f33a25773e
commit
59b12f7e46
@ -311,6 +311,18 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
does not properly support streaming.
|
does not properly support streaming.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
output_version: str = "v0"
|
||||||
|
"""Version of AIMessage output format to use.
|
||||||
|
|
||||||
|
This field is used to roll-out new output formats for chat model AIMessages
|
||||||
|
in a backwards-compatible way.
|
||||||
|
|
||||||
|
All chat models currently support the default of ``"v0"``. Chat model subclasses
|
||||||
|
can override with (customizable) supported values.
|
||||||
|
|
||||||
|
.. versionadded:: 0.3.68
|
||||||
|
"""
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def raise_deprecation(cls, values: dict) -> Any:
|
def raise_deprecation(cls, values: dict) -> Any:
|
||||||
|
@ -33,6 +33,15 @@ if TYPE_CHECKING:
|
|||||||
)
|
)
|
||||||
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
|
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
|
||||||
from langchain_core.messages.content_blocks import (
|
from langchain_core.messages.content_blocks import (
|
||||||
|
Base64ContentBlock,
|
||||||
|
ContentBlock,
|
||||||
|
DocumentCitation,
|
||||||
|
NonStandardAnnotation,
|
||||||
|
NonStandardContentBlock,
|
||||||
|
ReasoningContentBlock,
|
||||||
|
TextContentBlock,
|
||||||
|
ToolCallContentBlock,
|
||||||
|
UrlCitation,
|
||||||
convert_to_openai_data_block,
|
convert_to_openai_data_block,
|
||||||
convert_to_openai_image_block,
|
convert_to_openai_image_block,
|
||||||
is_data_content_block,
|
is_data_content_block,
|
||||||
@ -66,23 +75,32 @@ __all__ = (
|
|||||||
"AIMessage",
|
"AIMessage",
|
||||||
"AIMessageChunk",
|
"AIMessageChunk",
|
||||||
"AnyMessage",
|
"AnyMessage",
|
||||||
|
"Base64ContentBlock",
|
||||||
"BaseMessage",
|
"BaseMessage",
|
||||||
"BaseMessageChunk",
|
"BaseMessageChunk",
|
||||||
"ChatMessage",
|
"ChatMessage",
|
||||||
"ChatMessageChunk",
|
"ChatMessageChunk",
|
||||||
|
"ContentBlock",
|
||||||
|
"DocumentCitation",
|
||||||
"FunctionMessage",
|
"FunctionMessage",
|
||||||
"FunctionMessageChunk",
|
"FunctionMessageChunk",
|
||||||
"HumanMessage",
|
"HumanMessage",
|
||||||
"HumanMessageChunk",
|
"HumanMessageChunk",
|
||||||
"InvalidToolCall",
|
"InvalidToolCall",
|
||||||
"MessageLikeRepresentation",
|
"MessageLikeRepresentation",
|
||||||
|
"NonStandardAnnotation",
|
||||||
|
"NonStandardContentBlock",
|
||||||
|
"ReasoningContentBlock",
|
||||||
"RemoveMessage",
|
"RemoveMessage",
|
||||||
"SystemMessage",
|
"SystemMessage",
|
||||||
"SystemMessageChunk",
|
"SystemMessageChunk",
|
||||||
|
"TextContentBlock",
|
||||||
"ToolCall",
|
"ToolCall",
|
||||||
"ToolCallChunk",
|
"ToolCallChunk",
|
||||||
|
"ToolCallContentBlock",
|
||||||
"ToolMessage",
|
"ToolMessage",
|
||||||
"ToolMessageChunk",
|
"ToolMessageChunk",
|
||||||
|
"UrlCitation",
|
||||||
"_message_from_dict",
|
"_message_from_dict",
|
||||||
"convert_to_messages",
|
"convert_to_messages",
|
||||||
"convert_to_openai_data_block",
|
"convert_to_openai_data_block",
|
||||||
@ -103,25 +121,34 @@ __all__ = (
|
|||||||
_dynamic_imports = {
|
_dynamic_imports = {
|
||||||
"AIMessage": "ai",
|
"AIMessage": "ai",
|
||||||
"AIMessageChunk": "ai",
|
"AIMessageChunk": "ai",
|
||||||
|
"Base64ContentBlock": "content_blocks",
|
||||||
"BaseMessage": "base",
|
"BaseMessage": "base",
|
||||||
"BaseMessageChunk": "base",
|
"BaseMessageChunk": "base",
|
||||||
"merge_content": "base",
|
"merge_content": "base",
|
||||||
"message_to_dict": "base",
|
"message_to_dict": "base",
|
||||||
"messages_to_dict": "base",
|
"messages_to_dict": "base",
|
||||||
|
"ContentBlock": "content_blocks",
|
||||||
"ChatMessage": "chat",
|
"ChatMessage": "chat",
|
||||||
"ChatMessageChunk": "chat",
|
"ChatMessageChunk": "chat",
|
||||||
|
"DocumentCitation": "content_blocks",
|
||||||
"FunctionMessage": "function",
|
"FunctionMessage": "function",
|
||||||
"FunctionMessageChunk": "function",
|
"FunctionMessageChunk": "function",
|
||||||
"HumanMessage": "human",
|
"HumanMessage": "human",
|
||||||
"HumanMessageChunk": "human",
|
"HumanMessageChunk": "human",
|
||||||
|
"NonStandardAnnotation": "content_blocks",
|
||||||
|
"NonStandardContentBlock": "content_blocks",
|
||||||
|
"ReasoningContentBlock": "content_blocks",
|
||||||
"RemoveMessage": "modifier",
|
"RemoveMessage": "modifier",
|
||||||
"SystemMessage": "system",
|
"SystemMessage": "system",
|
||||||
"SystemMessageChunk": "system",
|
"SystemMessageChunk": "system",
|
||||||
"InvalidToolCall": "tool",
|
"InvalidToolCall": "tool",
|
||||||
|
"TextContentBlock": "content_blocks",
|
||||||
"ToolCall": "tool",
|
"ToolCall": "tool",
|
||||||
"ToolCallChunk": "tool",
|
"ToolCallChunk": "tool",
|
||||||
|
"ToolCallContentBlock": "content_blocks",
|
||||||
"ToolMessage": "tool",
|
"ToolMessage": "tool",
|
||||||
"ToolMessageChunk": "tool",
|
"ToolMessageChunk": "tool",
|
||||||
|
"UrlCitation": "content_blocks",
|
||||||
"AnyMessage": "utils",
|
"AnyMessage": "utils",
|
||||||
"MessageLikeRepresentation": "utils",
|
"MessageLikeRepresentation": "utils",
|
||||||
"_message_from_dict": "utils",
|
"_message_from_dict": "utils",
|
||||||
|
@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
|||||||
from pydantic import ConfigDict, Field
|
from pydantic import ConfigDict, Field
|
||||||
|
|
||||||
from langchain_core.load.serializable import Serializable
|
from langchain_core.load.serializable import Serializable
|
||||||
|
from langchain_core.messages import ContentBlock
|
||||||
from langchain_core.utils import get_bolded_text
|
from langchain_core.utils import get_bolded_text
|
||||||
from langchain_core.utils._merge import merge_dicts, merge_lists
|
from langchain_core.utils._merge import merge_dicts, merge_lists
|
||||||
from langchain_core.utils.interactive_env import is_interactive_env
|
from langchain_core.utils.interactive_env import is_interactive_env
|
||||||
@ -23,7 +24,7 @@ class BaseMessage(Serializable):
|
|||||||
Messages are the inputs and outputs of ChatModels.
|
Messages are the inputs and outputs of ChatModels.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
content: Union[str, list[Union[str, dict]]]
|
content: Union[str, list[Union[str, ContentBlock, dict]]]
|
||||||
"""The string contents of the message."""
|
"""The string contents of the message."""
|
||||||
|
|
||||||
additional_kwargs: dict = Field(default_factory=dict)
|
additional_kwargs: dict = Field(default_factory=dict)
|
||||||
|
@ -7,6 +7,93 @@ from pydantic import TypeAdapter, ValidationError
|
|||||||
from typing_extensions import NotRequired, TypedDict
|
from typing_extensions import NotRequired, TypedDict
|
||||||
|
|
||||||
|
|
||||||
|
# Text and annotations
|
||||||
|
class UrlCitation(TypedDict, total=False):
|
||||||
|
"""Citation from a URL."""
|
||||||
|
|
||||||
|
type: Literal["url_citation"]
|
||||||
|
|
||||||
|
url: str
|
||||||
|
"""Source URL."""
|
||||||
|
|
||||||
|
title: NotRequired[str]
|
||||||
|
"""Source title."""
|
||||||
|
|
||||||
|
cited_text: NotRequired[str]
|
||||||
|
"""Text from the source that is being cited."""
|
||||||
|
|
||||||
|
start_index: NotRequired[int]
|
||||||
|
"""Start index of the response text for which the annotation applies."""
|
||||||
|
|
||||||
|
end_index: NotRequired[int]
|
||||||
|
"""End index of the response text for which the annotation applies."""
|
||||||
|
|
||||||
|
|
||||||
|
class DocumentCitation(TypedDict, total=False):
|
||||||
|
"""Annotation for data from a document."""
|
||||||
|
|
||||||
|
type: Literal["document_citation"]
|
||||||
|
|
||||||
|
title: NotRequired[str]
|
||||||
|
"""Source title."""
|
||||||
|
|
||||||
|
cited_text: NotRequired[str]
|
||||||
|
"""Text from the source that is being cited."""
|
||||||
|
|
||||||
|
start_index: NotRequired[int]
|
||||||
|
"""Start index of the response text for which the annotation applies."""
|
||||||
|
|
||||||
|
end_index: NotRequired[int]
|
||||||
|
"""End index of the response text for which the annotation applies."""
|
||||||
|
|
||||||
|
|
||||||
|
class NonStandardAnnotation(TypedDict, total=False):
|
||||||
|
"""Provider-specific annotation format."""
|
||||||
|
|
||||||
|
type: Literal["non_standard_annotation"]
|
||||||
|
"""Type of the content block."""
|
||||||
|
value: dict[str, Any]
|
||||||
|
"""Provider-specific annotation data."""
|
||||||
|
|
||||||
|
|
||||||
|
class TextContentBlock(TypedDict, total=False):
|
||||||
|
"""Content block for text output."""
|
||||||
|
|
||||||
|
type: Literal["text"]
|
||||||
|
"""Type of the content block."""
|
||||||
|
text: str
|
||||||
|
"""Block text."""
|
||||||
|
annotations: NotRequired[
|
||||||
|
list[Union[UrlCitation, DocumentCitation, NonStandardAnnotation]]
|
||||||
|
]
|
||||||
|
"""Citations and other annotations."""
|
||||||
|
|
||||||
|
|
||||||
|
# Tool calls
|
||||||
|
class ToolCallContentBlock(TypedDict, total=False):
|
||||||
|
"""Content block for tool calls.
|
||||||
|
|
||||||
|
These are references to a :class:`~langchain_core.messages.tool.ToolCall` in the
|
||||||
|
message's ``tool_calls`` attribute.
|
||||||
|
"""
|
||||||
|
|
||||||
|
type: Literal["tool_call"]
|
||||||
|
"""Type of the content block."""
|
||||||
|
id: str
|
||||||
|
"""Tool call ID."""
|
||||||
|
|
||||||
|
|
||||||
|
# Reasoning
|
||||||
|
class ReasoningContentBlock(TypedDict, total=False):
|
||||||
|
"""Content block for reasoning output."""
|
||||||
|
|
||||||
|
type: Literal["reasoning"]
|
||||||
|
"""Type of the content block."""
|
||||||
|
reasoning: NotRequired[str]
|
||||||
|
"""Reasoning text."""
|
||||||
|
|
||||||
|
|
||||||
|
# Multi-modal
|
||||||
class BaseDataContentBlock(TypedDict, total=False):
|
class BaseDataContentBlock(TypedDict, total=False):
|
||||||
"""Base class for data content blocks."""
|
"""Base class for data content blocks."""
|
||||||
|
|
||||||
@ -68,6 +155,28 @@ DataContentBlock = Union[
|
|||||||
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
|
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
|
||||||
|
|
||||||
|
|
||||||
|
# Non-standard
|
||||||
|
class NonStandardContentBlock(TypedDict, total=False):
|
||||||
|
"""Content block provider-specific data.
|
||||||
|
|
||||||
|
This block contains data for which there is not yet a standard type.
|
||||||
|
"""
|
||||||
|
|
||||||
|
type: Literal["non_standard"]
|
||||||
|
"""Type of the content block."""
|
||||||
|
value: dict[str, Any]
|
||||||
|
"""Provider-specific data."""
|
||||||
|
|
||||||
|
|
||||||
|
ContentBlock = Union[
|
||||||
|
TextContentBlock,
|
||||||
|
ToolCallContentBlock,
|
||||||
|
ReasoningContentBlock,
|
||||||
|
DataContentBlock,
|
||||||
|
NonStandardContentBlock,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def is_data_content_block(
|
def is_data_content_block(
|
||||||
content_block: dict,
|
content_block: dict,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
|
@ -300,8 +300,9 @@ def test_llm_representation_for_serializable() -> None:
|
|||||||
assert chat._get_llm_string() == (
|
assert chat._get_llm_string() == (
|
||||||
'{"id": ["tests", "unit_tests", "language_models", "chat_models", '
|
'{"id": ["tests", "unit_tests", "language_models", "chat_models", '
|
||||||
'"test_cache", "CustomChat"], "kwargs": {"messages": {"id": '
|
'"test_cache", "CustomChat"], "kwargs": {"messages": {"id": '
|
||||||
'["builtins", "list_iterator"], "lc": 1, "type": "not_implemented"}}, "lc": '
|
'["builtins", "list_iterator"], "lc": 1, "type": "not_implemented"}, '
|
||||||
'1, "name": "CustomChat", "type": "constructor"}---[(\'stop\', None)]'
|
'"output_version": "v0"}, "lc": 1, "name": "CustomChat", "type": '
|
||||||
|
"\"constructor\"}---[('stop', None)]"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
"""
|
"""
|
||||||
This module converts between AIMessage output formats for the Responses API.
|
This module converts between AIMessage output formats, which are governed by the
|
||||||
|
``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"``,
|
||||||
|
``"responses/v1"``, and ``"v1"``.
|
||||||
|
|
||||||
ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs:
|
``"v0"`` corresponds to the format as of ChatOpenAI v0.3. For the Responses API, it
|
||||||
|
stores reasoning and tool outputs in AIMessage.additional_kwargs:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@ -24,8 +27,9 @@ ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs
|
|||||||
id="msg_123",
|
id="msg_123",
|
||||||
)
|
)
|
||||||
|
|
||||||
To retain information about response item sequencing (and to accommodate multiple
|
``"responses/v1"`` is only applicable to the Responses API. It retains information
|
||||||
reasoning items), ChatOpenAI now stores these items in the content sequence:
|
about response item sequencing and accommodates multiple reasoning items by
|
||||||
|
representing these items in the content sequence:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@ -52,18 +56,39 @@ reasoning items), ChatOpenAI now stores these items in the content sequence:
|
|||||||
There are other, small improvements as well-- e.g., we store message IDs on text
|
There are other, small improvements as well-- e.g., we store message IDs on text
|
||||||
content blocks, rather than on the AIMessage.id, which now stores the response ID.
|
content blocks, rather than on the AIMessage.id, which now stores the response ID.
|
||||||
|
|
||||||
|
``"v1"`` represents LangChain's cross-provider standard format.
|
||||||
|
|
||||||
For backwards compatibility, this module provides functions to convert between the
|
For backwards compatibility, this module provides functions to convert between the
|
||||||
old and new formats. The functions are used internally by ChatOpenAI.
|
formats. The functions are used internally by ChatOpenAI.
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from typing import Union
|
from collections.abc import Iterable
|
||||||
|
from typing import TYPE_CHECKING, Any, Union, cast
|
||||||
|
|
||||||
from langchain_core.messages import AIMessage
|
from langchain_core.messages import (
|
||||||
|
AIMessage,
|
||||||
|
AIMessageChunk,
|
||||||
|
DocumentCitation,
|
||||||
|
NonStandardAnnotation,
|
||||||
|
ReasoningContentBlock,
|
||||||
|
UrlCitation,
|
||||||
|
is_data_content_block,
|
||||||
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from langchain_core.messages import (
|
||||||
|
Base64ContentBlock,
|
||||||
|
NonStandardContentBlock,
|
||||||
|
ReasoningContentBlock,
|
||||||
|
TextContentBlock,
|
||||||
|
ToolCallContentBlock,
|
||||||
|
)
|
||||||
|
|
||||||
_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
|
_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
|
||||||
|
|
||||||
|
|
||||||
|
# v0.3 / Responses
|
||||||
def _convert_to_v03_ai_message(
|
def _convert_to_v03_ai_message(
|
||||||
message: AIMessage, has_reasoning: bool = False
|
message: AIMessage, has_reasoning: bool = False
|
||||||
) -> AIMessage:
|
) -> AIMessage:
|
||||||
@ -248,3 +273,279 @@ def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
|
|||||||
},
|
},
|
||||||
deep=False,
|
deep=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# v1 / Chat Completions
|
||||||
|
def _convert_to_v1_from_chat_completions(message: AIMessage) -> AIMessage:
|
||||||
|
"""Mutate a Chat Completions message to v1 format."""
|
||||||
|
if isinstance(message.content, str):
|
||||||
|
if message.content:
|
||||||
|
block: TextContentBlock = {"type": "text", "text": message.content}
|
||||||
|
message.content = [block]
|
||||||
|
else:
|
||||||
|
message.content = []
|
||||||
|
|
||||||
|
for tool_call in message.tool_calls:
|
||||||
|
if id_ := tool_call.get("id"):
|
||||||
|
tool_callblock: ToolCallContentBlock = {"type": "tool_call", "id": id_}
|
||||||
|
message.content.append(tool_callblock)
|
||||||
|
|
||||||
|
if "tool_calls" in message.additional_kwargs:
|
||||||
|
_ = message.additional_kwargs.pop("tool_calls")
|
||||||
|
|
||||||
|
if "token_usage" in message.response_metadata:
|
||||||
|
_ = message.response_metadata.pop("token_usage")
|
||||||
|
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_to_v1_from_chat_completions_chunk(chunk: AIMessageChunk) -> AIMessageChunk:
|
||||||
|
result = _convert_to_v1_from_chat_completions(cast(AIMessage, chunk))
|
||||||
|
return cast(AIMessageChunk, result)
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_from_v1_to_chat_completions(message: AIMessage) -> AIMessage:
|
||||||
|
"""Convert a v1 message to the Chat Completions format."""
|
||||||
|
if isinstance(message.content, list):
|
||||||
|
new_content: list = []
|
||||||
|
for block in message.content:
|
||||||
|
if isinstance(block, dict):
|
||||||
|
block_type = block.get("type")
|
||||||
|
if block_type == "text":
|
||||||
|
# Strip annotations
|
||||||
|
new_content.append({"type": "text", "text": block["text"]})
|
||||||
|
elif block_type in ("reasoning", "tool_call"):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
new_content.append(block)
|
||||||
|
else:
|
||||||
|
new_content.append(block)
|
||||||
|
return message.model_copy(update={"content": new_content})
|
||||||
|
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
# v1 / Responses
|
||||||
|
def _convert_annotation_to_v1(
|
||||||
|
annotation: dict[str, Any],
|
||||||
|
) -> Union[UrlCitation, DocumentCitation, NonStandardAnnotation]:
|
||||||
|
annotation_type = annotation.get("type")
|
||||||
|
|
||||||
|
if annotation_type == "url_citation":
|
||||||
|
new_annotation: UrlCitation = {"type": "url_citation", "url": annotation["url"]}
|
||||||
|
for field in ("title", "start_index", "end_index"):
|
||||||
|
if field in annotation:
|
||||||
|
new_annotation[field] = annotation[field]
|
||||||
|
return new_annotation
|
||||||
|
|
||||||
|
elif annotation_type == "file_citation":
|
||||||
|
new_annotation: DocumentCitation = {"type": "document_citation"}
|
||||||
|
if "filename" in annotation:
|
||||||
|
new_annotation["title"] = annotation["filename"]
|
||||||
|
for field in ("file_id", "index"): # OpenAI-specific
|
||||||
|
if field in annotation:
|
||||||
|
new_annotation[field] = annotation[field]
|
||||||
|
return new_annotation
|
||||||
|
|
||||||
|
# TODO: standardise container_file_citation?
|
||||||
|
else:
|
||||||
|
new_annotation: NonStandardAnnotation = {
|
||||||
|
"type": "non_standard_annotation",
|
||||||
|
"value": annotation,
|
||||||
|
}
|
||||||
|
return new_annotation
|
||||||
|
|
||||||
|
|
||||||
|
def _explode_reasoning(block: dict[str, Any]) -> Iterable[ReasoningContentBlock]:
|
||||||
|
if block.get("type") != "reasoning" or "summary" not in block:
|
||||||
|
yield block
|
||||||
|
return
|
||||||
|
|
||||||
|
if not block["summary"]:
|
||||||
|
_ = block.pop("summary", None)
|
||||||
|
yield block
|
||||||
|
return
|
||||||
|
|
||||||
|
# Common part for every exploded line, except 'summary'
|
||||||
|
common = {k: v for k, v in block.items() if k != "summary"}
|
||||||
|
|
||||||
|
# Optional keys that must appear only in the first exploded item
|
||||||
|
first_only = {
|
||||||
|
k: common.pop(k) for k in ("encrypted_content", "status") if k in common
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, part in enumerate(block["summary"]):
|
||||||
|
new_block = dict(common)
|
||||||
|
new_block["reasoning"] = part.get("text", "")
|
||||||
|
if idx == 0:
|
||||||
|
new_block.update(first_only)
|
||||||
|
yield cast(ReasoningContentBlock, new_block)
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_to_v1_from_responses(message: AIMessage) -> AIMessage:
|
||||||
|
"""Mutate a Responses message to v1 format."""
|
||||||
|
if not isinstance(message.content, list):
|
||||||
|
return message
|
||||||
|
|
||||||
|
def _iter_blocks() -> Iterable[dict[str, Any]]:
|
||||||
|
for block in message.content:
|
||||||
|
block_type = block.get("type")
|
||||||
|
|
||||||
|
if block_type == "text":
|
||||||
|
if "annotations" in block:
|
||||||
|
block["annotations"] = [
|
||||||
|
_convert_annotation_to_v1(a) for a in block["annotations"]
|
||||||
|
]
|
||||||
|
yield block
|
||||||
|
|
||||||
|
elif block_type == "reasoning":
|
||||||
|
yield from _explode_reasoning(block)
|
||||||
|
|
||||||
|
elif block_type == "image_generation_call" and (
|
||||||
|
result := block.get("result")
|
||||||
|
):
|
||||||
|
new_block: Base64ContentBlock = {
|
||||||
|
"type": "image",
|
||||||
|
"source_type": "base64",
|
||||||
|
"data": result,
|
||||||
|
}
|
||||||
|
for extra_key in ("id", "status"):
|
||||||
|
if extra_key in block:
|
||||||
|
new_block[extra_key] = block[extra_key]
|
||||||
|
yield new_block
|
||||||
|
|
||||||
|
elif block_type == "function_call":
|
||||||
|
new_block: ToolCallContentBlock = {
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": block["call_id"],
|
||||||
|
}
|
||||||
|
if "id" in block:
|
||||||
|
new_block["item_id"] = block["id"]
|
||||||
|
for extra_key in ("arguments", "name"):
|
||||||
|
if extra_key in block:
|
||||||
|
new_block[extra_key] = block[extra_key]
|
||||||
|
yield new_block
|
||||||
|
|
||||||
|
else:
|
||||||
|
new_block: NonStandardContentBlock = {
|
||||||
|
"type": "non_standard",
|
||||||
|
"value": block,
|
||||||
|
}
|
||||||
|
if "index" in new_block["value"]:
|
||||||
|
new_block["index"] = new_block["value"].pop("index")
|
||||||
|
yield new_block
|
||||||
|
|
||||||
|
# Replace the list with the fully converted one
|
||||||
|
message.content = list(_iter_blocks())
|
||||||
|
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_annotation_from_v1(annotation: dict[str, Any]) -> dict[str, Any]:
|
||||||
|
annotation_type = annotation.get("type")
|
||||||
|
|
||||||
|
if annotation_type == "document_citation":
|
||||||
|
new_ann: dict[str, Any] = {"type": "file_citation"}
|
||||||
|
|
||||||
|
if "title" in annotation:
|
||||||
|
new_ann["filename"] = annotation["title"]
|
||||||
|
|
||||||
|
for fld in ("file_id", "index"):
|
||||||
|
if fld in annotation:
|
||||||
|
new_ann[fld] = annotation[fld]
|
||||||
|
|
||||||
|
return new_ann
|
||||||
|
|
||||||
|
elif annotation_type == "non_standard_annotation":
|
||||||
|
return annotation["value"]
|
||||||
|
|
||||||
|
else:
|
||||||
|
return dict(annotation)
|
||||||
|
|
||||||
|
|
||||||
|
def _implode_reasoning_blocks(blocks: list[dict[str, Any]]) -> Iterable[dict[str, Any]]:
|
||||||
|
i = 0
|
||||||
|
n = len(blocks)
|
||||||
|
|
||||||
|
while i < n:
|
||||||
|
block = blocks[i]
|
||||||
|
|
||||||
|
# Ordinary block – just yield a shallow copy
|
||||||
|
if block.get("type") != "reasoning" or "reasoning" not in block:
|
||||||
|
yield dict(block)
|
||||||
|
i += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
summary: list[dict[str, str]] = [
|
||||||
|
{"type": "summary_text", "text": block.get("reasoning", "")}
|
||||||
|
]
|
||||||
|
# 'common' is every field except the exploded 'reasoning'
|
||||||
|
common = {k: v for k, v in block.items() if k != "reasoning"}
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
while i < n:
|
||||||
|
next_ = blocks[i]
|
||||||
|
if next_.get("type") == "reasoning" and "reasoning" in next_:
|
||||||
|
summary.append(
|
||||||
|
{"type": "summary_text", "text": next_.get("reasoning", "")}
|
||||||
|
)
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
merged = dict(common)
|
||||||
|
merged["summary"] = summary
|
||||||
|
yield merged
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_from_v1_to_responses(message: AIMessage) -> AIMessage:
|
||||||
|
if not isinstance(message.content, list):
|
||||||
|
return message
|
||||||
|
|
||||||
|
new_content: list = []
|
||||||
|
for block in message.content:
|
||||||
|
if isinstance(block, dict):
|
||||||
|
block_type = block.get("type")
|
||||||
|
if block_type == "text" and "annotations" in block:
|
||||||
|
# Need a copy because we’re changing the annotations list
|
||||||
|
new_block = dict(block)
|
||||||
|
new_block["annotations"] = [
|
||||||
|
_convert_annotation_from_v1(a) for a in block["annotations"]
|
||||||
|
]
|
||||||
|
new_content.append(new_block)
|
||||||
|
elif block_type == "tool_call":
|
||||||
|
new_block = {"type": "function_call", "call_id": block["id"]}
|
||||||
|
if "item_id" in block:
|
||||||
|
new_block["id"] = block["item_id"]
|
||||||
|
if "name" in block and "arguments" in block:
|
||||||
|
new_block["name"] = block["name"]
|
||||||
|
new_block["arguments"] = block["arguments"]
|
||||||
|
else:
|
||||||
|
tool_call = next(
|
||||||
|
call for call in message.tool_calls if call["id"] == block["id"]
|
||||||
|
)
|
||||||
|
if "name" not in block:
|
||||||
|
new_block["name"] = tool_call["name"]
|
||||||
|
if "arguments" not in block:
|
||||||
|
new_block["arguments"] = json.dumps(tool_call["args"])
|
||||||
|
new_content.append(new_block)
|
||||||
|
elif (
|
||||||
|
is_data_content_block(block)
|
||||||
|
and block["type"] == "image"
|
||||||
|
and block["source_type"] == "base64"
|
||||||
|
):
|
||||||
|
new_block = {"type": "image_generation_call", "result": block["data"]}
|
||||||
|
for extra_key in ("id", "status"):
|
||||||
|
if extra_key in block:
|
||||||
|
new_block[extra_key] = block[extra_key]
|
||||||
|
new_content.append(new_block)
|
||||||
|
elif block_type == "non_standard" and "value" in block:
|
||||||
|
new_content.append(block["value"])
|
||||||
|
else:
|
||||||
|
new_content.append(block)
|
||||||
|
else:
|
||||||
|
new_content.append(block)
|
||||||
|
|
||||||
|
new_content = list(_implode_reasoning_blocks(new_content))
|
||||||
|
|
||||||
|
return message.model_copy(update={"content": new_content})
|
||||||
|
@ -108,7 +108,12 @@ from langchain_openai.chat_models._client_utils import (
|
|||||||
)
|
)
|
||||||
from langchain_openai.chat_models._compat import (
|
from langchain_openai.chat_models._compat import (
|
||||||
_convert_from_v03_ai_message,
|
_convert_from_v03_ai_message,
|
||||||
|
_convert_from_v1_to_chat_completions,
|
||||||
|
_convert_from_v1_to_responses,
|
||||||
_convert_to_v03_ai_message,
|
_convert_to_v03_ai_message,
|
||||||
|
_convert_to_v1_from_chat_completions,
|
||||||
|
_convert_to_v1_from_chat_completions_chunk,
|
||||||
|
_convert_to_v1_from_responses,
|
||||||
)
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@ -649,7 +654,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
.. versionadded:: 0.3.9
|
.. versionadded:: 0.3.9
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_version: Literal["v0", "responses/v1"] = "v0"
|
output_version: str = "v0"
|
||||||
"""Version of AIMessage output format to use.
|
"""Version of AIMessage output format to use.
|
||||||
|
|
||||||
This field is used to roll-out new output formats for chat model AIMessages
|
This field is used to roll-out new output formats for chat model AIMessages
|
||||||
@ -660,9 +665,9 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
- ``"v0"``: AIMessage format as of langchain-openai 0.3.x.
|
- ``"v0"``: AIMessage format as of langchain-openai 0.3.x.
|
||||||
- ``"responses/v1"``: Formats Responses API output
|
- ``"responses/v1"``: Formats Responses API output
|
||||||
items into AIMessage content blocks.
|
items into AIMessage content blocks.
|
||||||
|
- ``"v1"``: v1 of LangChain cross-provider standard.
|
||||||
|
|
||||||
Currently only impacts the Responses API. ``output_version="responses/v1"`` is
|
``output_version="v1"`` is recommended.
|
||||||
recommended.
|
|
||||||
|
|
||||||
.. versionadded:: 0.3.25
|
.. versionadded:: 0.3.25
|
||||||
|
|
||||||
@ -849,6 +854,10 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
message=default_chunk_class(content="", usage_metadata=usage_metadata),
|
message=default_chunk_class(content="", usage_metadata=usage_metadata),
|
||||||
generation_info=base_generation_info,
|
generation_info=base_generation_info,
|
||||||
)
|
)
|
||||||
|
if self.output_version == "v1":
|
||||||
|
generation_chunk.message = _convert_to_v1_from_chat_completions_chunk(
|
||||||
|
cast(AIMessageChunk, generation_chunk.message)
|
||||||
|
)
|
||||||
return generation_chunk
|
return generation_chunk
|
||||||
|
|
||||||
choice = choices[0]
|
choice = choices[0]
|
||||||
@ -876,6 +885,20 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
|
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
|
||||||
message_chunk.usage_metadata = usage_metadata
|
message_chunk.usage_metadata = usage_metadata
|
||||||
|
|
||||||
|
if self.output_version == "v1":
|
||||||
|
message_chunk = cast(AIMessageChunk, message_chunk)
|
||||||
|
# Convert to v1 format
|
||||||
|
if isinstance(message_chunk.content, str):
|
||||||
|
message_chunk = _convert_to_v1_from_chat_completions_chunk(
|
||||||
|
message_chunk
|
||||||
|
)
|
||||||
|
if message_chunk.content:
|
||||||
|
message_chunk.content[0]["index"] = 0 # type: ignore[index]
|
||||||
|
else:
|
||||||
|
message_chunk = _convert_to_v1_from_chat_completions_chunk(
|
||||||
|
message_chunk
|
||||||
|
)
|
||||||
|
|
||||||
generation_chunk = ChatGenerationChunk(
|
generation_chunk = ChatGenerationChunk(
|
||||||
message=message_chunk, generation_info=generation_info or None
|
message=message_chunk, generation_info=generation_info or None
|
||||||
)
|
)
|
||||||
@ -1168,7 +1191,12 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
else:
|
else:
|
||||||
payload = _construct_responses_api_payload(messages, payload)
|
payload = _construct_responses_api_payload(messages, payload)
|
||||||
else:
|
else:
|
||||||
payload["messages"] = [_convert_message_to_dict(m) for m in messages]
|
payload["messages"] = [
|
||||||
|
_convert_message_to_dict(_convert_from_v1_to_chat_completions(m))
|
||||||
|
if isinstance(m, AIMessage)
|
||||||
|
else _convert_message_to_dict(m)
|
||||||
|
for m in messages
|
||||||
|
]
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
def _create_chat_result(
|
def _create_chat_result(
|
||||||
@ -1234,6 +1262,11 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
if hasattr(message, "refusal"):
|
if hasattr(message, "refusal"):
|
||||||
generations[0].message.additional_kwargs["refusal"] = message.refusal
|
generations[0].message.additional_kwargs["refusal"] = message.refusal
|
||||||
|
|
||||||
|
if self.output_version == "v1":
|
||||||
|
_ = llm_output.pop("token_usage", None)
|
||||||
|
generations[0].message = _convert_to_v1_from_chat_completions(
|
||||||
|
cast(AIMessage, generations[0].message)
|
||||||
|
)
|
||||||
return ChatResult(generations=generations, llm_output=llm_output)
|
return ChatResult(generations=generations, llm_output=llm_output)
|
||||||
|
|
||||||
async def _astream(
|
async def _astream(
|
||||||
@ -3464,6 +3497,7 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
|||||||
for lc_msg in messages:
|
for lc_msg in messages:
|
||||||
if isinstance(lc_msg, AIMessage):
|
if isinstance(lc_msg, AIMessage):
|
||||||
lc_msg = _convert_from_v03_ai_message(lc_msg)
|
lc_msg = _convert_from_v03_ai_message(lc_msg)
|
||||||
|
lc_msg = _convert_from_v1_to_responses(lc_msg)
|
||||||
msg = _convert_message_to_dict(lc_msg)
|
msg = _convert_message_to_dict(lc_msg)
|
||||||
# "name" parameter unsupported
|
# "name" parameter unsupported
|
||||||
if "name" in msg:
|
if "name" in msg:
|
||||||
@ -3607,7 +3641,7 @@ def _construct_lc_result_from_responses_api(
|
|||||||
response: Response,
|
response: Response,
|
||||||
schema: Optional[type[_BM]] = None,
|
schema: Optional[type[_BM]] = None,
|
||||||
metadata: Optional[dict] = None,
|
metadata: Optional[dict] = None,
|
||||||
output_version: Literal["v0", "responses/v1"] = "v0",
|
output_version: str = "v0",
|
||||||
) -> ChatResult:
|
) -> ChatResult:
|
||||||
"""Construct ChatResponse from OpenAI Response API response."""
|
"""Construct ChatResponse from OpenAI Response API response."""
|
||||||
if response.error:
|
if response.error:
|
||||||
@ -3746,6 +3780,8 @@ def _construct_lc_result_from_responses_api(
|
|||||||
)
|
)
|
||||||
if output_version == "v0":
|
if output_version == "v0":
|
||||||
message = _convert_to_v03_ai_message(message)
|
message = _convert_to_v03_ai_message(message)
|
||||||
|
elif output_version == "v1":
|
||||||
|
message = _convert_to_v1_from_responses(message)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||||
@ -3759,7 +3795,7 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
schema: Optional[type[_BM]] = None,
|
schema: Optional[type[_BM]] = None,
|
||||||
metadata: Optional[dict] = None,
|
metadata: Optional[dict] = None,
|
||||||
has_reasoning: bool = False,
|
has_reasoning: bool = False,
|
||||||
output_version: Literal["v0", "responses/v1"] = "v0",
|
output_version: str = "v0",
|
||||||
) -> tuple[int, int, int, Optional[ChatGenerationChunk]]:
|
) -> tuple[int, int, int, Optional[ChatGenerationChunk]]:
|
||||||
def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None:
|
def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None:
|
||||||
"""Advance indexes tracked during streaming.
|
"""Advance indexes tracked during streaming.
|
||||||
@ -3826,6 +3862,16 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
annotation = chunk.annotation.model_dump(exclude_none=True, mode="json")
|
annotation = chunk.annotation.model_dump(exclude_none=True, mode="json")
|
||||||
content.append({"annotations": [annotation], "index": current_index})
|
content.append({"annotations": [annotation], "index": current_index})
|
||||||
elif chunk.type == "response.output_text.done":
|
elif chunk.type == "response.output_text.done":
|
||||||
|
if output_version == "v1":
|
||||||
|
content.append(
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "",
|
||||||
|
"id": chunk.item_id,
|
||||||
|
"index": current_index,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
else:
|
||||||
content.append({"id": chunk.item_id, "index": current_index})
|
content.append({"id": chunk.item_id, "index": current_index})
|
||||||
elif chunk.type == "response.created":
|
elif chunk.type == "response.created":
|
||||||
id = chunk.response.id
|
id = chunk.response.id
|
||||||
@ -3902,21 +3948,34 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
content.append({"type": "refusal", "refusal": chunk.refusal})
|
content.append({"type": "refusal", "refusal": chunk.refusal})
|
||||||
elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning":
|
elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning":
|
||||||
_advance(chunk.output_index)
|
_advance(chunk.output_index)
|
||||||
|
current_sub_index = 0
|
||||||
reasoning = chunk.item.model_dump(exclude_none=True, mode="json")
|
reasoning = chunk.item.model_dump(exclude_none=True, mode="json")
|
||||||
reasoning["index"] = current_index
|
reasoning["index"] = current_index
|
||||||
content.append(reasoning)
|
content.append(reasoning)
|
||||||
elif chunk.type == "response.reasoning_summary_part.added":
|
elif chunk.type == "response.reasoning_summary_part.added":
|
||||||
|
if output_version in ("v0", "responses/v1"):
|
||||||
_advance(chunk.output_index)
|
_advance(chunk.output_index)
|
||||||
content.append(
|
content.append(
|
||||||
{
|
{
|
||||||
# langchain-core uses the `index` key to aggregate text blocks.
|
# langchain-core uses the `index` key to aggregate text blocks.
|
||||||
"summary": [
|
"summary": [
|
||||||
{"index": chunk.summary_index, "type": "summary_text", "text": ""}
|
{
|
||||||
|
"index": chunk.summary_index,
|
||||||
|
"type": "summary_text",
|
||||||
|
"text": "",
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"index": current_index,
|
"index": current_index,
|
||||||
"type": "reasoning",
|
"type": "reasoning",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
block = {"type": "reasoning", "reasoning": ""}
|
||||||
|
if chunk.summary_index > 0:
|
||||||
|
_advance(chunk.output_index, chunk.summary_index)
|
||||||
|
block["id"] = chunk.item_id
|
||||||
|
block["index"] = current_index
|
||||||
|
content.append(block)
|
||||||
elif chunk.type == "response.image_generation_call.partial_image":
|
elif chunk.type == "response.image_generation_call.partial_image":
|
||||||
# Partial images are not supported yet.
|
# Partial images are not supported yet.
|
||||||
pass
|
pass
|
||||||
@ -3951,6 +4010,8 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
AIMessageChunk,
|
AIMessageChunk,
|
||||||
_convert_to_v03_ai_message(message, has_reasoning=has_reasoning),
|
_convert_to_v03_ai_message(message, has_reasoning=has_reasoning),
|
||||||
)
|
)
|
||||||
|
elif output_version == "v1":
|
||||||
|
message = _convert_to_v1_from_responses(message)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
return (
|
return (
|
||||||
|
Binary file not shown.
Binary file not shown.
@ -52,9 +52,11 @@ def _check_response(response: Optional[BaseMessage]) -> None:
|
|||||||
assert response.response_metadata["service_tier"]
|
assert response.response_metadata["service_tier"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.default_cassette("test_web_search.yaml.gz")
|
||||||
@pytest.mark.vcr
|
@pytest.mark.vcr
|
||||||
def test_web_search() -> None:
|
@pytest.mark.parametrize("output_version", ["responses/v1", "v1"])
|
||||||
llm = ChatOpenAI(model=MODEL_NAME, output_version="responses/v1")
|
def test_web_search(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
|
||||||
|
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
|
||||||
first_response = llm.invoke(
|
first_response = llm.invoke(
|
||||||
"What was a positive news story from today?",
|
"What was a positive news story from today?",
|
||||||
tools=[{"type": "web_search_preview"}],
|
tools=[{"type": "web_search_preview"}],
|
||||||
@ -141,13 +143,15 @@ async def test_web_search_async() -> None:
|
|||||||
assert tool_output["type"] == "web_search_call"
|
assert tool_output["type"] == "web_search_call"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.flaky(retries=3, delay=1)
|
@pytest.mark.default_cassette("test_function_calling.yaml.gz")
|
||||||
def test_function_calling() -> None:
|
@pytest.mark.vcr
|
||||||
|
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||||
|
def test_function_calling(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
|
||||||
def multiply(x: int, y: int) -> int:
|
def multiply(x: int, y: int) -> int:
|
||||||
"""return x * y"""
|
"""return x * y"""
|
||||||
return x * y
|
return x * y
|
||||||
|
|
||||||
llm = ChatOpenAI(model=MODEL_NAME)
|
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
|
||||||
bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}])
|
bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}])
|
||||||
ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
|
ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
|
||||||
assert len(ai_msg.tool_calls) == 1
|
assert len(ai_msg.tool_calls) == 1
|
||||||
@ -297,8 +301,8 @@ def test_function_calling_and_structured_output() -> None:
|
|||||||
|
|
||||||
@pytest.mark.default_cassette("test_reasoning.yaml.gz")
|
@pytest.mark.default_cassette("test_reasoning.yaml.gz")
|
||||||
@pytest.mark.vcr
|
@pytest.mark.vcr
|
||||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||||
def test_reasoning(output_version: Literal["v0", "responses/v1"]) -> None:
|
def test_reasoning(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="o4-mini", use_responses_api=True, output_version=output_version
|
model="o4-mini", use_responses_api=True, output_version=output_version
|
||||||
)
|
)
|
||||||
@ -376,9 +380,9 @@ def test_file_search() -> None:
|
|||||||
|
|
||||||
@pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz")
|
@pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz")
|
||||||
@pytest.mark.vcr
|
@pytest.mark.vcr
|
||||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||||
def test_stream_reasoning_summary(
|
def test_stream_reasoning_summary(
|
||||||
output_version: Literal["v0", "responses/v1"],
|
output_version: Literal["v0", "responses/v1", "v1"],
|
||||||
) -> None:
|
) -> None:
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="o4-mini",
|
model="o4-mini",
|
||||||
@ -398,7 +402,14 @@ def test_stream_reasoning_summary(
|
|||||||
if output_version == "v0":
|
if output_version == "v0":
|
||||||
reasoning = response_1.additional_kwargs["reasoning"]
|
reasoning = response_1.additional_kwargs["reasoning"]
|
||||||
assert set(reasoning.keys()) == {"id", "type", "summary"}
|
assert set(reasoning.keys()) == {"id", "type", "summary"}
|
||||||
else:
|
summary = reasoning["summary"]
|
||||||
|
assert isinstance(summary, list)
|
||||||
|
for block in summary:
|
||||||
|
assert isinstance(block, dict)
|
||||||
|
assert isinstance(block["type"], str)
|
||||||
|
assert isinstance(block["text"], str)
|
||||||
|
assert block["text"]
|
||||||
|
elif output_version == "responses/v1":
|
||||||
reasoning = next(
|
reasoning = next(
|
||||||
block
|
block
|
||||||
for block in response_1.content
|
for block in response_1.content
|
||||||
@ -412,6 +423,18 @@ def test_stream_reasoning_summary(
|
|||||||
assert isinstance(block["type"], str)
|
assert isinstance(block["type"], str)
|
||||||
assert isinstance(block["text"], str)
|
assert isinstance(block["text"], str)
|
||||||
assert block["text"]
|
assert block["text"]
|
||||||
|
else:
|
||||||
|
# v1
|
||||||
|
total_reasoning_blocks = 0
|
||||||
|
for block in response_1.content:
|
||||||
|
if block["type"] == "reasoning":
|
||||||
|
total_reasoning_blocks += 1
|
||||||
|
assert isinstance(block["id"], str) and block["id"].startswith("rs_")
|
||||||
|
assert isinstance(block["reasoning"], str)
|
||||||
|
assert isinstance(block["index"], int)
|
||||||
|
assert (
|
||||||
|
total_reasoning_blocks > 1
|
||||||
|
) # This query typically generates multiple reasoning blocks
|
||||||
|
|
||||||
# Check we can pass back summaries
|
# Check we can pass back summaries
|
||||||
message_2 = {"role": "user", "content": "Thank you."}
|
message_2 = {"role": "user", "content": "Thank you."}
|
||||||
|
@ -51,7 +51,11 @@ from langchain_openai import ChatOpenAI
|
|||||||
from langchain_openai.chat_models._compat import (
|
from langchain_openai.chat_models._compat import (
|
||||||
_FUNCTION_CALL_IDS_MAP_KEY,
|
_FUNCTION_CALL_IDS_MAP_KEY,
|
||||||
_convert_from_v03_ai_message,
|
_convert_from_v03_ai_message,
|
||||||
|
_convert_from_v1_to_chat_completions,
|
||||||
|
_convert_from_v1_to_responses,
|
||||||
_convert_to_v03_ai_message,
|
_convert_to_v03_ai_message,
|
||||||
|
_convert_to_v1_from_chat_completions,
|
||||||
|
_convert_to_v1_from_responses,
|
||||||
)
|
)
|
||||||
from langchain_openai.chat_models.base import (
|
from langchain_openai.chat_models.base import (
|
||||||
_construct_lc_result_from_responses_api,
|
_construct_lc_result_from_responses_api,
|
||||||
@ -2296,7 +2300,7 @@ def test_mcp_tracing() -> None:
|
|||||||
assert payload["tools"][0]["headers"]["Authorization"] == "Bearer PLACEHOLDER"
|
assert payload["tools"][0]["headers"]["Authorization"] == "Bearer PLACEHOLDER"
|
||||||
|
|
||||||
|
|
||||||
def test_compat() -> None:
|
def test_compat_responses_v1() -> None:
|
||||||
# Check compatibility with v0.3 message format
|
# Check compatibility with v0.3 message format
|
||||||
message_v03 = AIMessage(
|
message_v03 = AIMessage(
|
||||||
content=[
|
content=[
|
||||||
@ -2357,6 +2361,421 @@ def test_compat() -> None:
|
|||||||
assert message_v03_output is not message_v03
|
assert message_v03_output is not message_v03
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"message_v1, expected",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
AIMessage(
|
||||||
|
[
|
||||||
|
{"type": "reasoning", "reasoning": "Reasoning text"},
|
||||||
|
{"type": "tool_call", "id": "call_123"},
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "Hello, world!",
|
||||||
|
"annotations": [
|
||||||
|
{"type": "url_citation", "url": "https://example.com"}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
id="chatcmpl-123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
AIMessage(
|
||||||
|
[{"type": "text", "text": "Hello, world!"}],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
id="chatcmpl-123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_convert_from_v1_to_chat_completions(
|
||||||
|
message_v1: AIMessage, expected: AIMessage
|
||||||
|
) -> None:
|
||||||
|
result = _convert_from_v1_to_chat_completions(message_v1)
|
||||||
|
assert result == expected
|
||||||
|
|
||||||
|
# Check no mutation
|
||||||
|
assert message_v1 != result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"message_chat_completions, expected",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
AIMessage(
|
||||||
|
"Hello, world!", id="chatcmpl-123", response_metadata={"foo": "bar"}
|
||||||
|
),
|
||||||
|
AIMessage(
|
||||||
|
[{"type": "text", "text": "Hello, world!"}],
|
||||||
|
id="chatcmpl-123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
AIMessage(
|
||||||
|
[{"type": "text", "text": "Hello, world!"}],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
id="chatcmpl-123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
AIMessage(
|
||||||
|
[
|
||||||
|
{"type": "text", "text": "Hello, world!"},
|
||||||
|
{"type": "tool_call", "id": "call_123"},
|
||||||
|
],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
id="chatcmpl-123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
AIMessage(
|
||||||
|
"",
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
id="chatcmpl-123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
additional_kwargs={"tool_calls": [{"foo": "bar"}]},
|
||||||
|
),
|
||||||
|
AIMessage(
|
||||||
|
[{"type": "tool_call", "id": "call_123"}],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
id="chatcmpl-123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_convert_to_v1_from_chat_completions(
|
||||||
|
message_chat_completions: AIMessage, expected: AIMessage
|
||||||
|
) -> None:
|
||||||
|
result = _convert_to_v1_from_chat_completions(message_chat_completions)
|
||||||
|
assert result == expected
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"message_v1, expected",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
AIMessage(
|
||||||
|
[
|
||||||
|
{"type": "reasoning", "id": "abc123"},
|
||||||
|
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
||||||
|
{"type": "reasoning", "id": "abc234", "reasoning": "bar"},
|
||||||
|
{"type": "tool_call", "id": "call_123"},
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_234",
|
||||||
|
"name": "get_weather_2",
|
||||||
|
"arguments": '{"location": "New York"}',
|
||||||
|
"item_id": "fc_123",
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "Hello "},
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "world",
|
||||||
|
"annotations": [
|
||||||
|
{"type": "url_citation", "url": "https://example.com"},
|
||||||
|
{
|
||||||
|
"type": "document_citation",
|
||||||
|
"title": "my doc",
|
||||||
|
"index": 1,
|
||||||
|
"file_id": "file_123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "non_standard_annotation",
|
||||||
|
"value": {"bar": "baz"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "image",
|
||||||
|
"source_type": "base64",
|
||||||
|
"data": "...",
|
||||||
|
"id": "img_123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "non_standard",
|
||||||
|
"value": {"type": "something_else", "foo": "bar"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# Make values different to check we pull from content when
|
||||||
|
# available
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_234",
|
||||||
|
"name": "get_weather_3",
|
||||||
|
"args": {"location": "Boston"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
id="resp123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
AIMessage(
|
||||||
|
[
|
||||||
|
{"type": "reasoning", "id": "abc123"},
|
||||||
|
{
|
||||||
|
"type": "reasoning",
|
||||||
|
"id": "abc234",
|
||||||
|
"summary": [
|
||||||
|
{"type": "summary_text", "text": "foo "},
|
||||||
|
{"type": "summary_text", "text": "bar"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function_call",
|
||||||
|
"call_id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"arguments": '{"location": "San Francisco"}',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function_call",
|
||||||
|
"call_id": "call_234",
|
||||||
|
"name": "get_weather_2",
|
||||||
|
"arguments": '{"location": "New York"}',
|
||||||
|
"id": "fc_123",
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "Hello "},
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "world",
|
||||||
|
"annotations": [
|
||||||
|
{"type": "url_citation", "url": "https://example.com"},
|
||||||
|
{
|
||||||
|
"type": "file_citation",
|
||||||
|
"filename": "my doc",
|
||||||
|
"index": 1,
|
||||||
|
"file_id": "file_123",
|
||||||
|
},
|
||||||
|
{"bar": "baz"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{"type": "image_generation_call", "id": "img_123", "result": "..."},
|
||||||
|
{"type": "something_else", "foo": "bar"},
|
||||||
|
],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# Make values different to check we pull from content when
|
||||||
|
# available
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_234",
|
||||||
|
"name": "get_weather_3",
|
||||||
|
"args": {"location": "Boston"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
id="resp123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_convert_from_v1_to_responses(
|
||||||
|
message_v1: AIMessage, expected: AIMessage
|
||||||
|
) -> None:
|
||||||
|
result = _convert_from_v1_to_responses(message_v1)
|
||||||
|
assert result == expected
|
||||||
|
|
||||||
|
# Check no mutation
|
||||||
|
assert message_v1 != result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"message_responses, expected",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
AIMessage(
|
||||||
|
[
|
||||||
|
{"type": "reasoning", "id": "abc123"},
|
||||||
|
{
|
||||||
|
"type": "reasoning",
|
||||||
|
"id": "abc234",
|
||||||
|
"summary": [
|
||||||
|
{"type": "summary_text", "text": "foo "},
|
||||||
|
{"type": "summary_text", "text": "bar"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function_call",
|
||||||
|
"call_id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"arguments": '{"location": "San Francisco"}',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function_call",
|
||||||
|
"call_id": "call_234",
|
||||||
|
"name": "get_weather_2",
|
||||||
|
"arguments": '{"location": "New York"}',
|
||||||
|
"id": "fc_123",
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "Hello "},
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "world",
|
||||||
|
"annotations": [
|
||||||
|
{"type": "url_citation", "url": "https://example.com"},
|
||||||
|
{
|
||||||
|
"type": "file_citation",
|
||||||
|
"filename": "my doc",
|
||||||
|
"index": 1,
|
||||||
|
"file_id": "file_123",
|
||||||
|
},
|
||||||
|
{"bar": "baz"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{"type": "image_generation_call", "id": "img_123", "result": "..."},
|
||||||
|
{"type": "something_else", "foo": "bar"},
|
||||||
|
],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# Make values different to check we pull from content when
|
||||||
|
# available
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_234",
|
||||||
|
"name": "get_weather_3",
|
||||||
|
"args": {"location": "Boston"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
id="resp123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
AIMessage(
|
||||||
|
[
|
||||||
|
{"type": "reasoning", "id": "abc123"},
|
||||||
|
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
||||||
|
{"type": "reasoning", "id": "abc234", "reasoning": "bar"},
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"arguments": '{"location": "San Francisco"}',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_234",
|
||||||
|
"name": "get_weather_2",
|
||||||
|
"arguments": '{"location": "New York"}',
|
||||||
|
"item_id": "fc_123",
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "Hello "},
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "world",
|
||||||
|
"annotations": [
|
||||||
|
{"type": "url_citation", "url": "https://example.com"},
|
||||||
|
{
|
||||||
|
"type": "document_citation",
|
||||||
|
"title": "my doc",
|
||||||
|
"index": 1,
|
||||||
|
"file_id": "file_123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "non_standard_annotation",
|
||||||
|
"value": {"bar": "baz"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "image",
|
||||||
|
"source_type": "base64",
|
||||||
|
"data": "...",
|
||||||
|
"id": "img_123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "non_standard",
|
||||||
|
"value": {"type": "something_else", "foo": "bar"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# Make values different to check we pull from content when
|
||||||
|
# available
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_234",
|
||||||
|
"name": "get_weather_3",
|
||||||
|
"args": {"location": "Boston"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
id="resp123",
|
||||||
|
response_metadata={"foo": "bar"},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_convert_to_v1_from_responses(
|
||||||
|
message_responses: AIMessage, expected: AIMessage
|
||||||
|
) -> None:
|
||||||
|
result = _convert_to_v1_from_responses(message_responses)
|
||||||
|
assert result == expected
|
||||||
|
|
||||||
|
|
||||||
def test_get_last_messages() -> None:
|
def test_get_last_messages() -> None:
|
||||||
messages: list[BaseMessage] = [HumanMessage("Hello")]
|
messages: list[BaseMessage] = [HumanMessage("Hello")]
|
||||||
last_messages, previous_response_id = _get_last_messages(messages)
|
last_messages, previous_response_id = _get_last_messages(messages)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
|
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
|
||||||
from openai.types.responses import (
|
from openai.types.responses import (
|
||||||
ResponseCompletedEvent,
|
ResponseCompletedEvent,
|
||||||
@ -610,8 +611,97 @@ def _strip_none(obj: Any) -> Any:
|
|||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
|
||||||
def test_responses_stream() -> None:
|
@pytest.mark.parametrize(
|
||||||
llm = ChatOpenAI(model="o4-mini", output_version="responses/v1")
|
"output_version, expected_content",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
"responses/v1",
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "rs_123",
|
||||||
|
"summary": [
|
||||||
|
{
|
||||||
|
"index": 0,
|
||||||
|
"type": "summary_text",
|
||||||
|
"text": "reasoning block one",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"index": 1,
|
||||||
|
"type": "summary_text",
|
||||||
|
"text": "another reasoning block",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"type": "reasoning",
|
||||||
|
"index": 0,
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "text block one", "index": 1, "id": "msg_123"},
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "another text block",
|
||||||
|
"index": 2,
|
||||||
|
"id": "msg_123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "rs_234",
|
||||||
|
"summary": [
|
||||||
|
{"index": 0, "type": "summary_text", "text": "more reasoning"},
|
||||||
|
{
|
||||||
|
"index": 1,
|
||||||
|
"type": "summary_text",
|
||||||
|
"text": "still more reasoning",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"type": "reasoning",
|
||||||
|
"index": 3,
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "more", "index": 4, "id": "msg_234"},
|
||||||
|
{"type": "text", "text": "text", "index": 5, "id": "msg_234"},
|
||||||
|
],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"v1",
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "reasoning",
|
||||||
|
"reasoning": "reasoning block one",
|
||||||
|
"id": "rs_123",
|
||||||
|
"index": 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "reasoning",
|
||||||
|
"reasoning": "another reasoning block",
|
||||||
|
"id": "rs_123",
|
||||||
|
"index": 1,
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "text block one", "index": 2, "id": "msg_123"},
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "another text block",
|
||||||
|
"index": 3,
|
||||||
|
"id": "msg_123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "reasoning",
|
||||||
|
"reasoning": "more reasoning",
|
||||||
|
"id": "rs_234",
|
||||||
|
"index": 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "reasoning",
|
||||||
|
"reasoning": "still more reasoning",
|
||||||
|
"id": "rs_234",
|
||||||
|
"index": 5,
|
||||||
|
},
|
||||||
|
{"type": "text", "text": "more", "index": 6, "id": "msg_234"},
|
||||||
|
{"type": "text", "text": "text", "index": 7, "id": "msg_234"},
|
||||||
|
],
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_responses_stream(output_version: str, expected_content: list[dict]) -> None:
|
||||||
|
llm = ChatOpenAI(
|
||||||
|
model="o4-mini", use_responses_api=True, output_version=output_version
|
||||||
|
)
|
||||||
mock_client = MagicMock()
|
mock_client = MagicMock()
|
||||||
|
|
||||||
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
|
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
|
||||||
@ -620,36 +710,14 @@ def test_responses_stream() -> None:
|
|||||||
mock_client.responses.create = mock_create
|
mock_client.responses.create = mock_create
|
||||||
|
|
||||||
full: Optional[BaseMessageChunk] = None
|
full: Optional[BaseMessageChunk] = None
|
||||||
|
chunks = []
|
||||||
with patch.object(llm, "root_client", mock_client):
|
with patch.object(llm, "root_client", mock_client):
|
||||||
for chunk in llm.stream("test"):
|
for chunk in llm.stream("test"):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
full = chunk if full is None else full + chunk
|
full = chunk if full is None else full + chunk
|
||||||
assert isinstance(full, AIMessageChunk)
|
chunks.append(chunk)
|
||||||
|
|
||||||
expected_content = [
|
assert isinstance(full, AIMessageChunk)
|
||||||
{
|
|
||||||
"id": "rs_123",
|
|
||||||
"summary": [
|
|
||||||
{"index": 0, "type": "summary_text", "text": "reasoning block one"},
|
|
||||||
{"index": 1, "type": "summary_text", "text": "another reasoning block"},
|
|
||||||
],
|
|
||||||
"type": "reasoning",
|
|
||||||
"index": 0,
|
|
||||||
},
|
|
||||||
{"type": "text", "text": "text block one", "index": 1, "id": "msg_123"},
|
|
||||||
{"type": "text", "text": "another text block", "index": 2, "id": "msg_123"},
|
|
||||||
{
|
|
||||||
"id": "rs_234",
|
|
||||||
"summary": [
|
|
||||||
{"index": 0, "type": "summary_text", "text": "more reasoning"},
|
|
||||||
{"index": 1, "type": "summary_text", "text": "still more reasoning"},
|
|
||||||
],
|
|
||||||
"type": "reasoning",
|
|
||||||
"index": 3,
|
|
||||||
},
|
|
||||||
{"type": "text", "text": "more", "index": 4, "id": "msg_234"},
|
|
||||||
{"type": "text", "text": "text", "index": 5, "id": "msg_234"},
|
|
||||||
]
|
|
||||||
assert full.content == expected_content
|
assert full.content == expected_content
|
||||||
assert full.additional_kwargs == {}
|
assert full.additional_kwargs == {}
|
||||||
assert full.id == "resp_123"
|
assert full.id == "resp_123"
|
||||||
|
Loading…
Reference in New Issue
Block a user