mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-13 14:21:27 +00:00
Compare commits
12 Commits
jk/30jan/i
...
eugene/add
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2276392fae | ||
|
|
5de65de99a | ||
|
|
20b80a1ef8 | ||
|
|
20a6bdf510 | ||
|
|
36dbb20da7 | ||
|
|
3768bb1d58 | ||
|
|
8d85a25f87 | ||
|
|
81a4a051ab | ||
|
|
bc5e8e0c17 | ||
|
|
93c1aeebb6 | ||
|
|
ac23607a61 | ||
|
|
59b12f7e46 |
@@ -311,6 +311,18 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
does not properly support streaming.
|
||||
"""
|
||||
|
||||
output_version: str = "v0"
|
||||
"""Version of AIMessage output format to use.
|
||||
|
||||
This field is used to roll-out new output formats for chat model AIMessages
|
||||
in a backwards-compatible way.
|
||||
|
||||
All chat models currently support the default of ``"v0"``. Chat model subclasses
|
||||
can override with (customizable) supported values.
|
||||
|
||||
.. versionadded:: 0.3.68
|
||||
"""
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def raise_deprecation(cls, values: dict) -> Any:
|
||||
|
||||
@@ -33,6 +33,15 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
|
||||
from langchain_core.messages.content_blocks import (
|
||||
Base64ContentBlock,
|
||||
ContentBlock,
|
||||
DocumentCitation,
|
||||
NonStandardAnnotation,
|
||||
NonStandardContentBlock,
|
||||
ReasoningContentBlock,
|
||||
TextContentBlock,
|
||||
ToolCallContentBlock,
|
||||
UrlCitation,
|
||||
convert_to_openai_data_block,
|
||||
convert_to_openai_image_block,
|
||||
is_data_content_block,
|
||||
@@ -66,23 +75,32 @@ __all__ = (
|
||||
"AIMessage",
|
||||
"AIMessageChunk",
|
||||
"AnyMessage",
|
||||
"Base64ContentBlock",
|
||||
"BaseMessage",
|
||||
"BaseMessageChunk",
|
||||
"ChatMessage",
|
||||
"ChatMessageChunk",
|
||||
"ContentBlock",
|
||||
"DocumentCitation",
|
||||
"FunctionMessage",
|
||||
"FunctionMessageChunk",
|
||||
"HumanMessage",
|
||||
"HumanMessageChunk",
|
||||
"InvalidToolCall",
|
||||
"MessageLikeRepresentation",
|
||||
"NonStandardAnnotation",
|
||||
"NonStandardContentBlock",
|
||||
"ReasoningContentBlock",
|
||||
"RemoveMessage",
|
||||
"SystemMessage",
|
||||
"SystemMessageChunk",
|
||||
"TextContentBlock",
|
||||
"ToolCall",
|
||||
"ToolCallChunk",
|
||||
"ToolCallContentBlock",
|
||||
"ToolMessage",
|
||||
"ToolMessageChunk",
|
||||
"UrlCitation",
|
||||
"_message_from_dict",
|
||||
"convert_to_messages",
|
||||
"convert_to_openai_data_block",
|
||||
@@ -103,25 +121,34 @@ __all__ = (
|
||||
_dynamic_imports = {
|
||||
"AIMessage": "ai",
|
||||
"AIMessageChunk": "ai",
|
||||
"Base64ContentBlock": "content_blocks",
|
||||
"BaseMessage": "base",
|
||||
"BaseMessageChunk": "base",
|
||||
"merge_content": "base",
|
||||
"message_to_dict": "base",
|
||||
"messages_to_dict": "base",
|
||||
"ContentBlock": "content_blocks",
|
||||
"ChatMessage": "chat",
|
||||
"ChatMessageChunk": "chat",
|
||||
"DocumentCitation": "content_blocks",
|
||||
"FunctionMessage": "function",
|
||||
"FunctionMessageChunk": "function",
|
||||
"HumanMessage": "human",
|
||||
"HumanMessageChunk": "human",
|
||||
"NonStandardAnnotation": "content_blocks",
|
||||
"NonStandardContentBlock": "content_blocks",
|
||||
"ReasoningContentBlock": "content_blocks",
|
||||
"RemoveMessage": "modifier",
|
||||
"SystemMessage": "system",
|
||||
"SystemMessageChunk": "system",
|
||||
"InvalidToolCall": "tool",
|
||||
"TextContentBlock": "content_blocks",
|
||||
"ToolCall": "tool",
|
||||
"ToolCallChunk": "tool",
|
||||
"ToolCallContentBlock": "content_blocks",
|
||||
"ToolMessage": "tool",
|
||||
"ToolMessageChunk": "tool",
|
||||
"UrlCitation": "content_blocks",
|
||||
"AnyMessage": "utils",
|
||||
"MessageLikeRepresentation": "utils",
|
||||
"_message_from_dict": "utils",
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing import Any, Literal, Optional, Union, cast
|
||||
from pydantic import model_validator
|
||||
from typing_extensions import NotRequired, Self, TypedDict, override
|
||||
|
||||
from langchain_core.messages import ContentBlock
|
||||
from langchain_core.messages.base import (
|
||||
BaseMessage,
|
||||
BaseMessageChunk,
|
||||
@@ -178,7 +179,7 @@ class AIMessage(BaseMessage):
|
||||
"""The type of the message (used for deserialization). Defaults to "ai"."""
|
||||
|
||||
def __init__(
|
||||
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
|
||||
self, content: Union[str, list[Union[str, ContentBlock, dict]]], **kwargs: Any
|
||||
) -> None:
|
||||
"""Pass in content as positional arg.
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
||||
from pydantic import ConfigDict, Field
|
||||
|
||||
from langchain_core.load.serializable import Serializable
|
||||
from langchain_core.messages import ContentBlock
|
||||
from langchain_core.utils import get_bolded_text
|
||||
from langchain_core.utils._merge import merge_dicts, merge_lists
|
||||
from langchain_core.utils.interactive_env import is_interactive_env
|
||||
@@ -23,7 +24,7 @@ class BaseMessage(Serializable):
|
||||
Messages are the inputs and outputs of ChatModels.
|
||||
"""
|
||||
|
||||
content: Union[str, list[Union[str, dict]]]
|
||||
content: Union[str, list[Union[str, ContentBlock, dict]]]
|
||||
"""The string contents of the message."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict)
|
||||
|
||||
@@ -7,7 +7,94 @@ from pydantic import TypeAdapter, ValidationError
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
||||
|
||||
class BaseDataContentBlock(TypedDict, total=False):
|
||||
# Text and annotations
|
||||
class UrlCitation(TypedDict):
|
||||
"""Citation from a URL."""
|
||||
|
||||
type: Literal["url_citation"]
|
||||
|
||||
url: str
|
||||
"""Source URL."""
|
||||
|
||||
title: NotRequired[str]
|
||||
"""Source title."""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Text from the source that is being cited."""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the response text for which the annotation applies."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the response text for which the annotation applies."""
|
||||
|
||||
|
||||
class DocumentCitation(TypedDict):
|
||||
"""Annotation for data from a document."""
|
||||
|
||||
type: Literal["document_citation"]
|
||||
|
||||
title: NotRequired[str]
|
||||
"""Source title."""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Text from the source that is being cited."""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the response text for which the annotation applies."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the response text for which the annotation applies."""
|
||||
|
||||
|
||||
class NonStandardAnnotation(TypedDict):
|
||||
"""Provider-specific annotation format."""
|
||||
|
||||
type: Literal["non_standard_annotation"]
|
||||
"""Type of the content block."""
|
||||
value: dict[str, Any]
|
||||
"""Provider-specific annotation data."""
|
||||
|
||||
|
||||
class TextContentBlock(TypedDict):
|
||||
"""Content block for text output."""
|
||||
|
||||
type: Literal["text"]
|
||||
"""Type of the content block."""
|
||||
text: str
|
||||
"""Block text."""
|
||||
annotations: NotRequired[
|
||||
list[Union[UrlCitation, DocumentCitation, NonStandardAnnotation]]
|
||||
]
|
||||
"""Citations and other annotations."""
|
||||
|
||||
|
||||
# Tool calls
|
||||
class ToolCallContentBlock(TypedDict):
|
||||
"""Content block for tool calls.
|
||||
|
||||
These are references to a :class:`~langchain_core.messages.tool.ToolCall` in the
|
||||
message's ``tool_calls`` attribute.
|
||||
"""
|
||||
|
||||
type: Literal["tool_call"]
|
||||
"""Type of the content block."""
|
||||
id: str
|
||||
"""Tool call ID."""
|
||||
|
||||
|
||||
# Reasoning
|
||||
class ReasoningContentBlock(TypedDict):
|
||||
"""Content block for reasoning output."""
|
||||
|
||||
type: Literal["reasoning"]
|
||||
"""Type of the content block."""
|
||||
reasoning: NotRequired[str]
|
||||
"""Reasoning text."""
|
||||
|
||||
|
||||
# Multi-modal
|
||||
class BaseDataContentBlock(TypedDict):
|
||||
"""Base class for data content blocks."""
|
||||
|
||||
mime_type: NotRequired[str]
|
||||
@@ -47,7 +134,7 @@ class PlainTextContentBlock(BaseDataContentBlock):
|
||||
"""Text data."""
|
||||
|
||||
|
||||
class IDContentBlock(TypedDict):
|
||||
class IDContentBlock(BaseDataContentBlock):
|
||||
"""Content block for data specified by an identifier."""
|
||||
|
||||
type: Literal["image", "audio", "file"]
|
||||
@@ -68,6 +155,28 @@ DataContentBlock = Union[
|
||||
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
|
||||
|
||||
|
||||
# Non-standard
|
||||
class NonStandardContentBlock(TypedDict, total=False):
|
||||
"""Content block provider-specific data.
|
||||
|
||||
This block contains data for which there is not yet a standard type.
|
||||
"""
|
||||
|
||||
type: Literal["non_standard"]
|
||||
"""Type of the content block."""
|
||||
value: dict[str, Any]
|
||||
"""Provider-specific data."""
|
||||
|
||||
|
||||
ContentBlock = Union[
|
||||
TextContentBlock,
|
||||
ToolCallContentBlock,
|
||||
ReasoningContentBlock,
|
||||
DataContentBlock,
|
||||
NonStandardContentBlock,
|
||||
]
|
||||
|
||||
|
||||
def is_data_content_block(
|
||||
content_block: dict,
|
||||
) -> bool:
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
from langchain_core.messages import ContentBlock
|
||||
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
|
||||
|
||||
@@ -41,7 +42,7 @@ class HumanMessage(BaseMessage):
|
||||
"""The type of the message (used for serialization). Defaults to "human"."""
|
||||
|
||||
def __init__(
|
||||
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
|
||||
self, content: Union[str, list[Union[str, ContentBlock, dict]]], **kwargs: Any
|
||||
) -> None:
|
||||
"""Pass in content as positional arg.
|
||||
|
||||
|
||||
@@ -31,7 +31,10 @@ from typing import (
|
||||
from pydantic import Discriminator, Field, Tag
|
||||
|
||||
from langchain_core.exceptions import ErrorCode, create_message
|
||||
from langchain_core.messages import convert_to_openai_data_block, is_data_content_block
|
||||
from langchain_core.messages import (
|
||||
convert_to_openai_data_block,
|
||||
is_data_content_block,
|
||||
)
|
||||
from langchain_core.messages.ai import AIMessage, AIMessageChunk
|
||||
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
|
||||
@@ -1011,8 +1014,6 @@ def convert_to_openai_messages(
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
oai_msg: dict = {"role": _get_message_openai_role(message)}
|
||||
tool_messages: list = []
|
||||
content: Union[str, list[dict]]
|
||||
|
||||
if message.name:
|
||||
oai_msg["name"] = message.name
|
||||
@@ -1023,257 +1024,7 @@ def convert_to_openai_messages(
|
||||
if isinstance(message, ToolMessage):
|
||||
oai_msg["tool_call_id"] = message.tool_call_id
|
||||
|
||||
if not message.content:
|
||||
content = "" if text_format == "string" else []
|
||||
elif isinstance(message.content, str):
|
||||
if text_format == "string":
|
||||
content = message.content
|
||||
else:
|
||||
content = [{"type": "text", "text": message.content}]
|
||||
elif text_format == "string" and all(
|
||||
isinstance(block, str) or block.get("type") == "text"
|
||||
for block in message.content
|
||||
):
|
||||
content = "\n".join(
|
||||
block if isinstance(block, str) else block["text"]
|
||||
for block in message.content
|
||||
)
|
||||
else:
|
||||
content = []
|
||||
for j, block in enumerate(message.content):
|
||||
# OpenAI format
|
||||
if isinstance(block, str):
|
||||
content.append({"type": "text", "text": block})
|
||||
elif block.get("type") == "text":
|
||||
if missing := [k for k in ("text",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'text' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append({"type": block["type"], "text": block["text"]})
|
||||
elif block.get("type") == "image_url":
|
||||
if missing := [k for k in ("image_url",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image_url' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": block["image_url"],
|
||||
}
|
||||
)
|
||||
# Standard multi-modal content block
|
||||
elif is_data_content_block(block):
|
||||
formatted_block = convert_to_openai_data_block(block)
|
||||
if (
|
||||
formatted_block.get("type") == "file"
|
||||
and "file" in formatted_block
|
||||
and "filename" not in formatted_block["file"]
|
||||
):
|
||||
logger.info("Generating a fallback filename.")
|
||||
formatted_block["file"]["filename"] = "LC_AUTOGENERATED"
|
||||
content.append(formatted_block)
|
||||
# Anthropic and Bedrock converse format
|
||||
elif (block.get("type") == "image") or "image" in block:
|
||||
# Anthropic
|
||||
if source := block.get("source"):
|
||||
if missing := [
|
||||
k for k in ("media_type", "type", "data") if k not in source
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image' "
|
||||
f"but 'source' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:{source['media_type']};"
|
||||
f"{source['type']},{source['data']}"
|
||||
)
|
||||
},
|
||||
}
|
||||
)
|
||||
# Bedrock converse
|
||||
elif image := block.get("image"):
|
||||
if missing := [
|
||||
k for k in ("source", "format") if k not in image
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has key 'image', "
|
||||
f"but 'image' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(image["source"]["bytes"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:image/{image['format']};base64,{b64_image}"
|
||||
)
|
||||
},
|
||||
}
|
||||
)
|
||||
else:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image' "
|
||||
f"but does not have a 'source' or 'image' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
# OpenAI file format
|
||||
elif (
|
||||
block.get("type") == "file"
|
||||
and isinstance(block.get("file"), dict)
|
||||
and isinstance(block.get("file", {}).get("file_data"), str)
|
||||
):
|
||||
if block.get("file", {}).get("filename") is None:
|
||||
logger.info("Generating a fallback filename.")
|
||||
block["file"]["filename"] = "LC_AUTOGENERATED"
|
||||
content.append(block)
|
||||
# OpenAI audio format
|
||||
elif (
|
||||
block.get("type") == "input_audio"
|
||||
and isinstance(block.get("input_audio"), dict)
|
||||
and isinstance(block.get("input_audio", {}).get("data"), str)
|
||||
and isinstance(block.get("input_audio", {}).get("format"), str)
|
||||
):
|
||||
content.append(block)
|
||||
elif block.get("type") == "tool_use":
|
||||
if missing := [
|
||||
k for k in ("id", "name", "input") if k not in block
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'tool_use', but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if not any(
|
||||
tool_call["id"] == block["id"]
|
||||
for tool_call in cast("AIMessage", message).tool_calls
|
||||
):
|
||||
oai_msg["tool_calls"] = oai_msg.get("tool_calls", [])
|
||||
oai_msg["tool_calls"].append(
|
||||
{
|
||||
"type": "function",
|
||||
"id": block["id"],
|
||||
"function": {
|
||||
"name": block["name"],
|
||||
"arguments": json.dumps(block["input"]),
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "tool_result":
|
||||
if missing := [
|
||||
k for k in ("content", "tool_use_id") if k not in block
|
||||
]:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'tool_result', but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
tool_message = ToolMessage(
|
||||
block["content"],
|
||||
tool_call_id=block["tool_use_id"],
|
||||
status="error" if block.get("is_error") else "success",
|
||||
)
|
||||
# Recurse to make sure tool message contents are OpenAI format.
|
||||
tool_messages.extend(
|
||||
convert_to_openai_messages(
|
||||
[tool_message], text_format=text_format
|
||||
)
|
||||
)
|
||||
elif (block.get("type") == "json") or "json" in block:
|
||||
if "json" not in block:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'json' "
|
||||
f"but does not have a 'json' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": json.dumps(block["json"]),
|
||||
}
|
||||
)
|
||||
elif (block.get("type") == "guard_content") or "guard_content" in block:
|
||||
if (
|
||||
"guard_content" not in block
|
||||
or "text" not in block["guard_content"]
|
||||
):
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'guard_content' but does not have a "
|
||||
f"messages[{i}].content[{j}]['guard_content']['text'] "
|
||||
f"key. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
text = block["guard_content"]["text"]
|
||||
if isinstance(text, dict):
|
||||
text = text["text"]
|
||||
content.append({"type": "text", "text": text})
|
||||
# VertexAI format
|
||||
elif block.get("type") == "media":
|
||||
if missing := [k for k in ("mime_type", "data") if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'media' but does not have key(s) {missing}. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if "image" not in block["mime_type"]:
|
||||
err = (
|
||||
f"OpenAI messages can only support text and image data."
|
||||
f" Received content block with media of type:"
|
||||
f" {block['mime_type']}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(block["data"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (f"data:{block['mime_type']};base64,{b64_image}")
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "thinking":
|
||||
content.append(block)
|
||||
else:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] does not match OpenAI, "
|
||||
f"Anthropic, Bedrock Converse, or VertexAI format. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if text_format == "string" and not any(
|
||||
block["type"] != "text" for block in content
|
||||
):
|
||||
content = "\n".join(block["text"] for block in content)
|
||||
content, tool_messages = _extract_content(i, message, oai_msg, text_format)
|
||||
oai_msg["content"] = content
|
||||
if message.content and not oai_msg["content"] and tool_messages:
|
||||
oai_messages.extend(tool_messages)
|
||||
@@ -1285,6 +1036,263 @@ def convert_to_openai_messages(
|
||||
return oai_messages
|
||||
|
||||
|
||||
def _extract_content(
|
||||
idx: int,
|
||||
message: BaseMessage,
|
||||
oai_msg: dict,
|
||||
text_format: Literal["string", "block"],
|
||||
) -> tuple[Union[str, list[dict]], list]:
|
||||
"""Extract content from a message and format it according to OpenAI standards."""
|
||||
content: Union[str, list[dict]]
|
||||
tool_messages: list = []
|
||||
if not message.content:
|
||||
content = "" if text_format == "string" else []
|
||||
return content, tool_messages
|
||||
if isinstance(message.content, str):
|
||||
if text_format == "string":
|
||||
content = message.content
|
||||
else:
|
||||
content = [{"type": "text", "text": message.content}]
|
||||
return content, tool_messages
|
||||
if text_format == "string" and all(
|
||||
isinstance(block, str) or block.get("type") == "text"
|
||||
for block in message.content
|
||||
):
|
||||
content = "\n".join(
|
||||
block if isinstance(block, str) else block["text"]
|
||||
for block in message.content
|
||||
)
|
||||
return content, tool_messages
|
||||
|
||||
content = []
|
||||
for block_idx, block in enumerate(message.content):
|
||||
# OpenAI format
|
||||
if isinstance(block, str):
|
||||
content.append({"type": "text", "text": block})
|
||||
continue
|
||||
|
||||
block = cast("dict", block)
|
||||
|
||||
if block.get("type") == "text":
|
||||
if missing := [k for k in ("text",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': 'text' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append({"type": block["type"], "text": block["text"]})
|
||||
elif block.get("type") == "image_url":
|
||||
if missing := [k for k in ("image_url",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': 'image_url' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": block["image_url"],
|
||||
}
|
||||
)
|
||||
# Standard multi-modal content block
|
||||
elif is_data_content_block(block):
|
||||
formatted_block = convert_to_openai_data_block(block)
|
||||
if (
|
||||
formatted_block.get("type") == "file"
|
||||
and "file" in formatted_block
|
||||
and "filename" not in formatted_block["file"]
|
||||
):
|
||||
logger.info("Generating a fallback filename.")
|
||||
formatted_block["file"]["filename"] = "LC_AUTOGENERATED"
|
||||
content.append(formatted_block)
|
||||
# Anthropic and Bedrock converse format
|
||||
elif (block.get("type") == "image") or "image" in block:
|
||||
# Anthropic
|
||||
if source := block.get("source"):
|
||||
if missing := [
|
||||
k for k in ("media_type", "type", "data") if k not in source
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': 'image' "
|
||||
f"but 'source' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:{source['media_type']};"
|
||||
f"{source['type']},{source['data']}"
|
||||
)
|
||||
},
|
||||
}
|
||||
)
|
||||
# Bedrock converse
|
||||
elif image := block.get("image"):
|
||||
if missing := [k for k in ("source", "format") if k not in image]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has key 'image', "
|
||||
f"but 'image' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(image["source"]["bytes"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (f"data:image/{image['format']};base64,{b64_image}")
|
||||
},
|
||||
}
|
||||
)
|
||||
else:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': 'image' "
|
||||
f"but does not have a 'source' or 'image' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
# OpenAI file format
|
||||
elif (
|
||||
block.get("type") == "file"
|
||||
and isinstance(block.get("file"), dict)
|
||||
and isinstance(block.get("file", {}).get("file_data"), str)
|
||||
):
|
||||
if block.get("file", {}).get("filename") is None:
|
||||
logger.info("Generating a fallback filename.")
|
||||
block["file"]["filename"] = "LC_AUTOGENERATED"
|
||||
content.append(block)
|
||||
# OpenAI audio format
|
||||
elif (
|
||||
block.get("type") == "input_audio"
|
||||
and isinstance(block.get("input_audio"), dict)
|
||||
and isinstance(block.get("input_audio", {}).get("data"), str)
|
||||
and isinstance(block.get("input_audio", {}).get("format"), str)
|
||||
):
|
||||
content.append(block)
|
||||
elif block.get("type") == "tool_use":
|
||||
if missing := [k for k in ("id", "name", "input") if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': "
|
||||
f"'tool_use', but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if not any(
|
||||
tool_call["id"] == block["id"]
|
||||
for tool_call in cast("AIMessage", message).tool_calls
|
||||
):
|
||||
oai_msg["tool_calls"] = oai_msg.get("tool_calls", [])
|
||||
oai_msg["tool_calls"].append(
|
||||
{
|
||||
"type": "function",
|
||||
"id": block["id"],
|
||||
"function": {
|
||||
"name": block["name"],
|
||||
"arguments": json.dumps(block["input"]),
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "tool_result":
|
||||
if missing := [k for k in ("content", "tool_use_id") if k not in block]:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': "
|
||||
f"'tool_result', but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
tool_message = ToolMessage(
|
||||
block["content"],
|
||||
tool_call_id=block["tool_use_id"],
|
||||
status="error" if block.get("is_error") else "success",
|
||||
)
|
||||
# Recurse to make sure tool message contents are OpenAI format.
|
||||
tool_messages.extend(
|
||||
convert_to_openai_messages([tool_message], text_format=text_format)
|
||||
)
|
||||
elif (block.get("type") == "json") or "json" in block:
|
||||
if "json" not in block:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': 'json' "
|
||||
f"but does not have a 'json' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": json.dumps(block["json"]),
|
||||
}
|
||||
)
|
||||
elif (block.get("type") == "guard_content") or "guard_content" in block:
|
||||
if "guard_content" not in block or "text" not in block["guard_content"]:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': "
|
||||
f"'guard_content' but does not have a "
|
||||
f"messages[{idx}].content[{block_idx}]['guard_content']['text'] "
|
||||
f"key. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
text = block["guard_content"]["text"]
|
||||
if isinstance(text, dict):
|
||||
text = text["text"]
|
||||
content.append({"type": "text", "text": text})
|
||||
# VertexAI format
|
||||
elif block.get("type") == "media":
|
||||
if missing := [k for k in ("mime_type", "data") if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] has 'type': "
|
||||
f"'media' but does not have key(s) {missing}. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if "image" not in block["mime_type"]:
|
||||
err = (
|
||||
f"OpenAI messages can only support text and image data."
|
||||
f" Received content block with media of type:"
|
||||
f" {block['mime_type']}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(block["data"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (f"data:{block['mime_type']};base64,{b64_image}")
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "thinking":
|
||||
content.append(block)
|
||||
else:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{idx}].content[{block_idx}] does not match OpenAI, "
|
||||
f"Anthropic, Bedrock Converse, or VertexAI format. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if text_format == "string" and not any(
|
||||
block["type"] != "text" for block in content
|
||||
):
|
||||
content = "\n".join(block["text"] for block in content)
|
||||
return content, tool_messages
|
||||
|
||||
|
||||
def _first_max_tokens(
|
||||
messages: Sequence[BaseMessage],
|
||||
*,
|
||||
|
||||
@@ -300,8 +300,9 @@ def test_llm_representation_for_serializable() -> None:
|
||||
assert chat._get_llm_string() == (
|
||||
'{"id": ["tests", "unit_tests", "language_models", "chat_models", '
|
||||
'"test_cache", "CustomChat"], "kwargs": {"messages": {"id": '
|
||||
'["builtins", "list_iterator"], "lc": 1, "type": "not_implemented"}}, "lc": '
|
||||
'1, "name": "CustomChat", "type": "constructor"}---[(\'stop\', None)]'
|
||||
'["builtins", "list_iterator"], "lc": 1, "type": "not_implemented"}, '
|
||||
'"output_version": "v0"}, "lc": 1, "name": "CustomChat", "type": '
|
||||
"\"constructor\"}---[('stop', None)]"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, Callable, Optional, Union
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any, Callable, Optional, Union, cast
|
||||
|
||||
import pytest
|
||||
from typing_extensions import override
|
||||
from typing_extensions import TypeGuard, override
|
||||
|
||||
from langchain_core.language_models.fake_chat_models import FakeChatModel
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
ReasoningContentBlock,
|
||||
SystemMessage,
|
||||
TextContentBlock,
|
||||
ToolCall,
|
||||
ToolMessage,
|
||||
)
|
||||
@@ -1457,3 +1459,32 @@ def test_get_buffer_string_with_empty_content() -> None:
|
||||
expected = "Human: \nAI: \nSystem: "
|
||||
actual = get_buffer_string(messages)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def is_reasoning_block(block: Mapping[str, Any]) -> TypeGuard[ReasoningContentBlock]:
|
||||
"""Check if a block is a ReasoningContentBlock."""
|
||||
return block.get("type") == "reasoning"
|
||||
|
||||
|
||||
def is_text_block(block: Mapping[str, Any]) -> TypeGuard[TextContentBlock]:
|
||||
"""Check if a block is a TextContentBlock."""
|
||||
return block.get("type") == "text"
|
||||
|
||||
|
||||
def test_typing() -> None:
|
||||
"""Test typing on things"""
|
||||
message = AIMessage(
|
||||
content="Hello",
|
||||
)
|
||||
if isinstance(message.content, str):
|
||||
# This should not raise an error
|
||||
message.content = message.content + " world"
|
||||
elif isinstance(message.content, list):
|
||||
all_contents = []
|
||||
for block in message.content:
|
||||
if isinstance(block, dict):
|
||||
block = cast("dict", block)
|
||||
if is_text_block(block):
|
||||
all_contents.append(block["text"])
|
||||
if is_reasoning_block(block):
|
||||
all_contents.append(block.get("reasoning", "foo"))
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
"""
|
||||
This module converts between AIMessage output formats for the Responses API.
|
||||
This module converts between AIMessage output formats, which are governed by the
|
||||
``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"``,
|
||||
``"responses/v1"``, and ``"v1"``.
|
||||
|
||||
ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs:
|
||||
``"v0"`` corresponds to the format as of ChatOpenAI v0.3. For the Responses API, it
|
||||
stores reasoning and tool outputs in AIMessage.additional_kwargs:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -24,8 +27,9 @@ ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs
|
||||
id="msg_123",
|
||||
)
|
||||
|
||||
To retain information about response item sequencing (and to accommodate multiple
|
||||
reasoning items), ChatOpenAI now stores these items in the content sequence:
|
||||
``"responses/v1"`` is only applicable to the Responses API. It retains information
|
||||
about response item sequencing and accommodates multiple reasoning items by
|
||||
representing these items in the content sequence:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -52,18 +56,39 @@ reasoning items), ChatOpenAI now stores these items in the content sequence:
|
||||
There are other, small improvements as well-- e.g., we store message IDs on text
|
||||
content blocks, rather than on the AIMessage.id, which now stores the response ID.
|
||||
|
||||
``"v1"`` represents LangChain's cross-provider standard format.
|
||||
|
||||
For backwards compatibility, this module provides functions to convert between the
|
||||
old and new formats. The functions are used internally by ChatOpenAI.
|
||||
formats. The functions are used internally by ChatOpenAI.
|
||||
""" # noqa: E501
|
||||
|
||||
import json
|
||||
from typing import Union
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING, Any, Union, cast
|
||||
|
||||
from langchain_core.messages import AIMessage
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
AIMessageChunk,
|
||||
DocumentCitation,
|
||||
NonStandardAnnotation,
|
||||
ReasoningContentBlock,
|
||||
UrlCitation,
|
||||
is_data_content_block,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.messages import (
|
||||
Base64ContentBlock,
|
||||
NonStandardContentBlock,
|
||||
ReasoningContentBlock,
|
||||
TextContentBlock,
|
||||
ToolCallContentBlock,
|
||||
)
|
||||
|
||||
_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
|
||||
|
||||
|
||||
# v0.3 / Responses
|
||||
def _convert_to_v03_ai_message(
|
||||
message: AIMessage, has_reasoning: bool = False
|
||||
) -> AIMessage:
|
||||
@@ -248,3 +273,296 @@ def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
|
||||
},
|
||||
deep=False,
|
||||
)
|
||||
|
||||
|
||||
# v1 / Chat Completions
|
||||
def _convert_to_v1_from_chat_completions(message: AIMessage) -> AIMessage:
|
||||
"""Mutate a Chat Completions message to v1 format."""
|
||||
if isinstance(message.content, str):
|
||||
if message.content:
|
||||
block: TextContentBlock = {"type": "text", "text": message.content}
|
||||
message.content = [block]
|
||||
else:
|
||||
message.content = []
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
if id_ := tool_call.get("id"):
|
||||
tool_call_block: ToolCallContentBlock = {"type": "tool_call", "id": id_}
|
||||
message.content.append(tool_call_block)
|
||||
|
||||
if "tool_calls" in message.additional_kwargs:
|
||||
_ = message.additional_kwargs.pop("tool_calls")
|
||||
|
||||
if "token_usage" in message.response_metadata:
|
||||
_ = message.response_metadata.pop("token_usage")
|
||||
|
||||
return message
|
||||
|
||||
|
||||
def _convert_to_v1_from_chat_completions_chunk(chunk: AIMessageChunk) -> AIMessageChunk:
|
||||
result = _convert_to_v1_from_chat_completions(cast(AIMessage, chunk))
|
||||
return cast(AIMessageChunk, result)
|
||||
|
||||
|
||||
def _convert_from_v1_to_chat_completions(message: AIMessage) -> AIMessage:
|
||||
"""Convert a v1 message to the Chat Completions format."""
|
||||
if isinstance(message.content, list):
|
||||
new_content: list = []
|
||||
for block in message.content:
|
||||
if isinstance(block, dict):
|
||||
block_type = block.get("type")
|
||||
if block_type == "text":
|
||||
# Strip annotations
|
||||
new_content.append({"type": "text", "text": block["text"]})
|
||||
elif block_type in ("reasoning", "tool_call"):
|
||||
pass
|
||||
else:
|
||||
new_content.append(block)
|
||||
else:
|
||||
new_content.append(block)
|
||||
return message.model_copy(update={"content": new_content})
|
||||
|
||||
return message
|
||||
|
||||
|
||||
# v1 / Responses
|
||||
def _convert_annotation_to_v1(
|
||||
annotation: dict[str, Any],
|
||||
) -> Union[UrlCitation, DocumentCitation, NonStandardAnnotation]:
|
||||
annotation_type = annotation.get("type")
|
||||
|
||||
if annotation_type == "url_citation":
|
||||
new_annotation: UrlCitation = {"type": "url_citation", "url": annotation["url"]}
|
||||
for field in ("title", "start_index", "end_index"):
|
||||
if field in annotation:
|
||||
new_annotation[field] = annotation[field]
|
||||
return new_annotation
|
||||
|
||||
elif annotation_type == "file_citation":
|
||||
new_annotation: DocumentCitation = {"type": "document_citation"}
|
||||
if "filename" in annotation:
|
||||
new_annotation["title"] = annotation["filename"]
|
||||
for field in ("file_id", "index"): # OpenAI-specific
|
||||
if field in annotation:
|
||||
new_annotation[field] = annotation[field]
|
||||
return new_annotation
|
||||
|
||||
# TODO: standardise container_file_citation?
|
||||
else:
|
||||
new_annotation: NonStandardAnnotation = {
|
||||
"type": "non_standard_annotation",
|
||||
"value": annotation,
|
||||
}
|
||||
return new_annotation
|
||||
|
||||
|
||||
def _explode_reasoning(block: dict[str, Any]) -> Iterable[ReasoningContentBlock]:
|
||||
if block.get("type") != "reasoning" or "summary" not in block:
|
||||
yield block
|
||||
return
|
||||
|
||||
if not block["summary"]:
|
||||
_ = block.pop("summary", None)
|
||||
yield block
|
||||
return
|
||||
|
||||
# Common part for every exploded line, except 'summary'
|
||||
common = {k: v for k, v in block.items() if k != "summary"}
|
||||
|
||||
# Optional keys that must appear only in the first exploded item
|
||||
first_only = {
|
||||
k: common.pop(k) for k in ("encrypted_content", "status") if k in common
|
||||
}
|
||||
|
||||
for idx, part in enumerate(block["summary"]):
|
||||
new_block = dict(common)
|
||||
new_block["reasoning"] = part.get("text", "")
|
||||
if idx == 0:
|
||||
new_block.update(first_only)
|
||||
yield cast(ReasoningContentBlock, new_block)
|
||||
|
||||
|
||||
def _convert_to_v1_from_responses(message: AIMessage) -> AIMessage:
|
||||
"""Mutate a Responses message to v1 format."""
|
||||
if not isinstance(message.content, list):
|
||||
return message
|
||||
|
||||
def _iter_blocks() -> Iterable[dict[str, Any]]:
|
||||
for block in message.content:
|
||||
block_type = block.get("type")
|
||||
|
||||
if block_type == "text":
|
||||
if "annotations" in block:
|
||||
block["annotations"] = [
|
||||
_convert_annotation_to_v1(a) for a in block["annotations"]
|
||||
]
|
||||
yield block
|
||||
|
||||
elif block_type == "reasoning":
|
||||
yield from _explode_reasoning(block)
|
||||
|
||||
elif block_type == "image_generation_call" and (
|
||||
result := block.get("result")
|
||||
):
|
||||
new_block: Base64ContentBlock = {
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": result,
|
||||
}
|
||||
if output_format := block.get("output_format"):
|
||||
new_block["mime_type"] = f"image/{output_format}"
|
||||
for extra_key in (
|
||||
"id",
|
||||
"index",
|
||||
"status",
|
||||
"background",
|
||||
"output_format",
|
||||
"quality",
|
||||
"revised_prompt",
|
||||
"size",
|
||||
):
|
||||
if extra_key in block:
|
||||
new_block[extra_key] = block[extra_key]
|
||||
yield new_block
|
||||
|
||||
elif block_type == "function_call":
|
||||
new_block: ToolCallContentBlock = {
|
||||
"type": "tool_call",
|
||||
"id": block.get("call_id", ""),
|
||||
}
|
||||
if "id" in block:
|
||||
new_block["item_id"] = block["id"]
|
||||
for extra_key in ("arguments", "name", "index"):
|
||||
if extra_key in block:
|
||||
new_block[extra_key] = block[extra_key]
|
||||
yield new_block
|
||||
|
||||
else:
|
||||
new_block: NonStandardContentBlock = {
|
||||
"type": "non_standard",
|
||||
"value": block,
|
||||
}
|
||||
if "index" in new_block["value"]:
|
||||
new_block["index"] = new_block["value"].pop("index")
|
||||
yield new_block
|
||||
|
||||
# Replace the list with the fully converted one
|
||||
message.content = list(_iter_blocks())
|
||||
|
||||
return message
|
||||
|
||||
|
||||
def _convert_annotation_from_v1(annotation: dict[str, Any]) -> dict[str, Any]:
|
||||
annotation_type = annotation.get("type")
|
||||
|
||||
if annotation_type == "document_citation":
|
||||
new_ann: dict[str, Any] = {"type": "file_citation"}
|
||||
|
||||
if "title" in annotation:
|
||||
new_ann["filename"] = annotation["title"]
|
||||
|
||||
for fld in ("file_id", "index"):
|
||||
if fld in annotation:
|
||||
new_ann[fld] = annotation[fld]
|
||||
|
||||
return new_ann
|
||||
|
||||
elif annotation_type == "non_standard_annotation":
|
||||
return annotation["value"]
|
||||
|
||||
else:
|
||||
return dict(annotation)
|
||||
|
||||
|
||||
def _implode_reasoning_blocks(blocks: list[dict[str, Any]]) -> Iterable[dict[str, Any]]:
|
||||
i = 0
|
||||
n = len(blocks)
|
||||
|
||||
while i < n:
|
||||
block = blocks[i]
|
||||
|
||||
# Ordinary block – just yield a shallow copy
|
||||
if block.get("type") != "reasoning":
|
||||
yield dict(block)
|
||||
i += 1
|
||||
continue
|
||||
elif "reasoning" not in block:
|
||||
yield {**block, "summary": []}
|
||||
i += 1
|
||||
continue
|
||||
else:
|
||||
pass
|
||||
|
||||
summary: list[dict[str, str]] = [
|
||||
{"type": "summary_text", "text": block.get("reasoning", "")}
|
||||
]
|
||||
# 'common' is every field except the exploded 'reasoning'
|
||||
common = {k: v for k, v in block.items() if k != "reasoning"}
|
||||
|
||||
i += 1
|
||||
while i < n:
|
||||
next_ = blocks[i]
|
||||
if next_.get("type") == "reasoning" and "reasoning" in next_:
|
||||
summary.append(
|
||||
{"type": "summary_text", "text": next_.get("reasoning", "")}
|
||||
)
|
||||
i += 1
|
||||
else:
|
||||
break
|
||||
|
||||
merged = dict(common)
|
||||
merged["summary"] = summary
|
||||
yield merged
|
||||
|
||||
|
||||
def _convert_from_v1_to_responses(message: AIMessage) -> AIMessage:
|
||||
if not isinstance(message.content, list):
|
||||
return message
|
||||
|
||||
new_content: list = []
|
||||
for block in message.content:
|
||||
if isinstance(block, dict):
|
||||
block_type = block.get("type")
|
||||
if block_type == "text" and "annotations" in block:
|
||||
# Need a copy because we’re changing the annotations list
|
||||
new_block = dict(block)
|
||||
new_block["annotations"] = [
|
||||
_convert_annotation_from_v1(a) for a in block["annotations"]
|
||||
]
|
||||
new_content.append(new_block)
|
||||
elif block_type == "tool_call":
|
||||
new_block = {"type": "function_call", "call_id": block["id"]}
|
||||
if "item_id" in block:
|
||||
new_block["id"] = block["item_id"]
|
||||
if "name" in block and "arguments" in block:
|
||||
new_block["name"] = block["name"]
|
||||
new_block["arguments"] = block["arguments"]
|
||||
else:
|
||||
tool_call = next(
|
||||
call for call in message.tool_calls if call["id"] == block["id"]
|
||||
)
|
||||
if "name" not in block:
|
||||
new_block["name"] = tool_call["name"]
|
||||
if "arguments" not in block:
|
||||
new_block["arguments"] = json.dumps(tool_call["args"])
|
||||
new_content.append(new_block)
|
||||
elif (
|
||||
is_data_content_block(block)
|
||||
and block["type"] == "image"
|
||||
and block["source_type"] == "base64"
|
||||
):
|
||||
new_block = {"type": "image_generation_call", "result": block["data"]}
|
||||
for extra_key in ("id", "status"):
|
||||
if extra_key in block:
|
||||
new_block[extra_key] = block[extra_key]
|
||||
new_content.append(new_block)
|
||||
elif block_type == "non_standard" and "value" in block:
|
||||
new_content.append(block["value"])
|
||||
else:
|
||||
new_content.append(block)
|
||||
else:
|
||||
new_content.append(block)
|
||||
|
||||
new_content = list(_implode_reasoning_blocks(new_content))
|
||||
|
||||
return message.model_copy(update={"content": new_content})
|
||||
|
||||
@@ -108,7 +108,12 @@ from langchain_openai.chat_models._client_utils import (
|
||||
)
|
||||
from langchain_openai.chat_models._compat import (
|
||||
_convert_from_v03_ai_message,
|
||||
_convert_from_v1_to_chat_completions,
|
||||
_convert_from_v1_to_responses,
|
||||
_convert_to_v03_ai_message,
|
||||
_convert_to_v1_from_chat_completions,
|
||||
_convert_to_v1_from_chat_completions_chunk,
|
||||
_convert_to_v1_from_responses,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -650,7 +655,7 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
.. versionadded:: 0.3.9
|
||||
"""
|
||||
|
||||
output_version: Literal["v0", "responses/v1"] = "v0"
|
||||
output_version: str = "v0"
|
||||
"""Version of AIMessage output format to use.
|
||||
|
||||
This field is used to roll-out new output formats for chat model AIMessages
|
||||
@@ -661,9 +666,9 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
- ``"v0"``: AIMessage format as of langchain-openai 0.3.x.
|
||||
- ``"responses/v1"``: Formats Responses API output
|
||||
items into AIMessage content blocks.
|
||||
- ``"v1"``: v1 of LangChain cross-provider standard.
|
||||
|
||||
Currently only impacts the Responses API. ``output_version="responses/v1"`` is
|
||||
recommended.
|
||||
``output_version="v1"`` is recommended.
|
||||
|
||||
.. versionadded:: 0.3.25
|
||||
|
||||
@@ -850,6 +855,10 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
message=default_chunk_class(content="", usage_metadata=usage_metadata),
|
||||
generation_info=base_generation_info,
|
||||
)
|
||||
if self.output_version == "v1":
|
||||
generation_chunk.message = _convert_to_v1_from_chat_completions_chunk(
|
||||
cast(AIMessageChunk, generation_chunk.message)
|
||||
)
|
||||
return generation_chunk
|
||||
|
||||
choice = choices[0]
|
||||
@@ -877,6 +886,20 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
|
||||
message_chunk.usage_metadata = usage_metadata
|
||||
|
||||
if self.output_version == "v1":
|
||||
message_chunk = cast(AIMessageChunk, message_chunk)
|
||||
# Convert to v1 format
|
||||
if isinstance(message_chunk.content, str):
|
||||
message_chunk = _convert_to_v1_from_chat_completions_chunk(
|
||||
message_chunk
|
||||
)
|
||||
if message_chunk.content:
|
||||
message_chunk.content[0]["index"] = 0 # type: ignore[index]
|
||||
else:
|
||||
message_chunk = _convert_to_v1_from_chat_completions_chunk(
|
||||
message_chunk
|
||||
)
|
||||
|
||||
generation_chunk = ChatGenerationChunk(
|
||||
message=message_chunk, generation_info=generation_info or None
|
||||
)
|
||||
@@ -1169,7 +1192,12 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
else:
|
||||
payload = _construct_responses_api_payload(messages, payload)
|
||||
else:
|
||||
payload["messages"] = [_convert_message_to_dict(m) for m in messages]
|
||||
payload["messages"] = [
|
||||
_convert_message_to_dict(_convert_from_v1_to_chat_completions(m))
|
||||
if isinstance(m, AIMessage)
|
||||
else _convert_message_to_dict(m)
|
||||
for m in messages
|
||||
]
|
||||
return payload
|
||||
|
||||
def _create_chat_result(
|
||||
@@ -1235,6 +1263,11 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
if hasattr(message, "refusal"):
|
||||
generations[0].message.additional_kwargs["refusal"] = message.refusal
|
||||
|
||||
if self.output_version == "v1":
|
||||
_ = llm_output.pop("token_usage", None)
|
||||
generations[0].message = _convert_to_v1_from_chat_completions(
|
||||
cast(AIMessage, generations[0].message)
|
||||
)
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
async def _astream(
|
||||
@@ -3464,6 +3497,7 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
||||
for lc_msg in messages:
|
||||
if isinstance(lc_msg, AIMessage):
|
||||
lc_msg = _convert_from_v03_ai_message(lc_msg)
|
||||
lc_msg = _convert_from_v1_to_responses(lc_msg)
|
||||
msg = _convert_message_to_dict(lc_msg)
|
||||
# "name" parameter unsupported
|
||||
if "name" in msg:
|
||||
@@ -3607,7 +3641,7 @@ def _construct_lc_result_from_responses_api(
|
||||
response: Response,
|
||||
schema: Optional[type[_BM]] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
output_version: Literal["v0", "responses/v1"] = "v0",
|
||||
output_version: str = "v0",
|
||||
) -> ChatResult:
|
||||
"""Construct ChatResponse from OpenAI Response API response."""
|
||||
if response.error:
|
||||
@@ -3746,6 +3780,26 @@ def _construct_lc_result_from_responses_api(
|
||||
)
|
||||
if output_version == "v0":
|
||||
message = _convert_to_v03_ai_message(message)
|
||||
elif output_version == "v1":
|
||||
message = _convert_to_v1_from_responses(message)
|
||||
if response.tools and any(
|
||||
tool.type == "image_generation" for tool in response.tools
|
||||
):
|
||||
# Get mime_time from tool definition and add to image generations
|
||||
# if missing (primarily for tracing purposes).
|
||||
image_generation_call = next(
|
||||
tool for tool in response.tools if tool.type == "image_generation"
|
||||
)
|
||||
if image_generation_call.output_format:
|
||||
mime_type = f"image/{image_generation_call.output_format}"
|
||||
for block in message.content:
|
||||
# OK to mutate output message
|
||||
if (
|
||||
block.get("type") == "image"
|
||||
and block["source_type"] == "base64"
|
||||
and "mime_type" not in block
|
||||
):
|
||||
block["mime_type"] = mime_type
|
||||
else:
|
||||
pass
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
@@ -3759,7 +3813,7 @@ def _convert_responses_chunk_to_generation_chunk(
|
||||
schema: Optional[type[_BM]] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
has_reasoning: bool = False,
|
||||
output_version: Literal["v0", "responses/v1"] = "v0",
|
||||
output_version: str = "v0",
|
||||
) -> tuple[int, int, int, Optional[ChatGenerationChunk]]:
|
||||
def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None:
|
||||
"""Advance indexes tracked during streaming.
|
||||
@@ -3824,9 +3878,29 @@ def _convert_responses_chunk_to_generation_chunk(
|
||||
annotation = chunk.annotation
|
||||
else:
|
||||
annotation = chunk.annotation.model_dump(exclude_none=True, mode="json")
|
||||
content.append({"annotations": [annotation], "index": current_index})
|
||||
if output_version == "v1":
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": "",
|
||||
"annotations": [annotation],
|
||||
"index": current_index
|
||||
}
|
||||
)
|
||||
else:
|
||||
content.append({"annotations": [annotation], "index": current_index})
|
||||
elif chunk.type == "response.output_text.done":
|
||||
content.append({"id": chunk.item_id, "index": current_index})
|
||||
if output_version == "v1":
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": "",
|
||||
"id": chunk.item_id,
|
||||
"index": current_index,
|
||||
}
|
||||
)
|
||||
else:
|
||||
content.append({"id": chunk.item_id, "index": current_index})
|
||||
elif chunk.type == "response.created":
|
||||
id = chunk.response.id
|
||||
response_metadata["id"] = chunk.response.id # Backwards compatibility
|
||||
@@ -3902,21 +3976,34 @@ def _convert_responses_chunk_to_generation_chunk(
|
||||
content.append({"type": "refusal", "refusal": chunk.refusal})
|
||||
elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning":
|
||||
_advance(chunk.output_index)
|
||||
current_sub_index = 0
|
||||
reasoning = chunk.item.model_dump(exclude_none=True, mode="json")
|
||||
reasoning["index"] = current_index
|
||||
content.append(reasoning)
|
||||
elif chunk.type == "response.reasoning_summary_part.added":
|
||||
_advance(chunk.output_index)
|
||||
content.append(
|
||||
{
|
||||
# langchain-core uses the `index` key to aggregate text blocks.
|
||||
"summary": [
|
||||
{"index": chunk.summary_index, "type": "summary_text", "text": ""}
|
||||
],
|
||||
"index": current_index,
|
||||
"type": "reasoning",
|
||||
}
|
||||
)
|
||||
if output_version in ("v0", "responses/v1"):
|
||||
_advance(chunk.output_index)
|
||||
content.append(
|
||||
{
|
||||
# langchain-core uses the `index` key to aggregate text blocks.
|
||||
"summary": [
|
||||
{
|
||||
"index": chunk.summary_index,
|
||||
"type": "summary_text",
|
||||
"text": "",
|
||||
}
|
||||
],
|
||||
"index": current_index,
|
||||
"type": "reasoning",
|
||||
}
|
||||
)
|
||||
else:
|
||||
block = {"type": "reasoning", "reasoning": ""}
|
||||
if chunk.summary_index > 0:
|
||||
_advance(chunk.output_index, chunk.summary_index)
|
||||
block["id"] = chunk.item_id
|
||||
block["index"] = current_index
|
||||
content.append(block)
|
||||
elif chunk.type == "response.image_generation_call.partial_image":
|
||||
# Partial images are not supported yet.
|
||||
pass
|
||||
@@ -3951,6 +4038,8 @@ def _convert_responses_chunk_to_generation_chunk(
|
||||
AIMessageChunk,
|
||||
_convert_to_v03_ai_message(message, has_reasoning=has_reasoning),
|
||||
)
|
||||
elif output_version == "v1":
|
||||
message = _convert_to_v1_from_responses(message)
|
||||
else:
|
||||
pass
|
||||
return (
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -52,9 +52,11 @@ def _check_response(response: Optional[BaseMessage]) -> None:
|
||||
assert response.response_metadata["service_tier"]
|
||||
|
||||
|
||||
@pytest.mark.default_cassette("test_web_search.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
def test_web_search() -> None:
|
||||
llm = ChatOpenAI(model=MODEL_NAME, output_version="responses/v1")
|
||||
@pytest.mark.parametrize("output_version", ["responses/v1", "v1"])
|
||||
def test_web_search(output_version: Literal["responses/v1", "v1"]) -> None:
|
||||
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
|
||||
first_response = llm.invoke(
|
||||
"What was a positive news story from today?",
|
||||
tools=[{"type": "web_search_preview"}],
|
||||
@@ -110,7 +112,10 @@ def test_web_search() -> None:
|
||||
for msg in [first_response, full, response]:
|
||||
assert isinstance(msg, AIMessage)
|
||||
block_types = [block["type"] for block in msg.content] # type: ignore[index]
|
||||
assert block_types == ["web_search_call", "text"]
|
||||
if output_version == "responses/v1":
|
||||
assert block_types == ["web_search_call", "text"]
|
||||
else:
|
||||
assert block_types == ["non_standard", "text"]
|
||||
|
||||
|
||||
@pytest.mark.flaky(retries=3, delay=1)
|
||||
@@ -141,13 +146,15 @@ async def test_web_search_async() -> None:
|
||||
assert tool_output["type"] == "web_search_call"
|
||||
|
||||
|
||||
@pytest.mark.flaky(retries=3, delay=1)
|
||||
def test_function_calling() -> None:
|
||||
@pytest.mark.default_cassette("test_function_calling.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||
def test_function_calling(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
|
||||
def multiply(x: int, y: int) -> int:
|
||||
"""return x * y"""
|
||||
return x * y
|
||||
|
||||
llm = ChatOpenAI(model=MODEL_NAME)
|
||||
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
|
||||
bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}])
|
||||
ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
|
||||
assert len(ai_msg.tool_calls) == 1
|
||||
@@ -174,8 +181,13 @@ class FooDict(TypedDict):
|
||||
response: str
|
||||
|
||||
|
||||
def test_parsed_pydantic_schema() -> None:
|
||||
llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True)
|
||||
@pytest.mark.default_cassette("test_parsed_pydantic_schema.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||
def test_parsed_pydantic_schema(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
|
||||
llm = ChatOpenAI(
|
||||
model=MODEL_NAME, use_responses_api=True, output_version=output_version
|
||||
)
|
||||
response = llm.invoke("how are ya", response_format=Foo)
|
||||
parsed = Foo(**json.loads(response.text()))
|
||||
assert parsed == response.additional_kwargs["parsed"]
|
||||
@@ -297,8 +309,8 @@ def test_function_calling_and_structured_output() -> None:
|
||||
|
||||
@pytest.mark.default_cassette("test_reasoning.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
||||
def test_reasoning(output_version: Literal["v0", "responses/v1"]) -> None:
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||
def test_reasoning(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
|
||||
llm = ChatOpenAI(
|
||||
model="o4-mini", use_responses_api=True, output_version=output_version
|
||||
)
|
||||
@@ -358,27 +370,32 @@ def test_computer_calls() -> None:
|
||||
|
||||
def test_file_search() -> None:
|
||||
pytest.skip() # TODO: set up infra
|
||||
llm = ChatOpenAI(model=MODEL_NAME)
|
||||
llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True)
|
||||
tool = {
|
||||
"type": "file_search",
|
||||
"vector_store_ids": [os.environ["OPENAI_VECTOR_STORE_ID"]],
|
||||
}
|
||||
response = llm.invoke("What is deep research by OpenAI?", tools=[tool])
|
||||
|
||||
input_message = {"role": "user", "content": "What is deep research by OpenAI?"}
|
||||
response = llm.invoke([input_message], tools=[tool])
|
||||
_check_response(response)
|
||||
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in llm.stream("What is deep research by OpenAI?", tools=[tool]):
|
||||
for chunk in llm.stream([input_message], tools=[tool]):
|
||||
assert isinstance(chunk, AIMessageChunk)
|
||||
full = chunk if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
_check_response(full)
|
||||
|
||||
next_message = {"role": "user", "content": "Thank you."}
|
||||
_ = llm.invoke([input_message, full, next_message])
|
||||
|
||||
|
||||
@pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||
def test_stream_reasoning_summary(
|
||||
output_version: Literal["v0", "responses/v1"],
|
||||
output_version: Literal["v0", "responses/v1", "v1"],
|
||||
) -> None:
|
||||
llm = ChatOpenAI(
|
||||
model="o4-mini",
|
||||
@@ -398,20 +415,39 @@ def test_stream_reasoning_summary(
|
||||
if output_version == "v0":
|
||||
reasoning = response_1.additional_kwargs["reasoning"]
|
||||
assert set(reasoning.keys()) == {"id", "type", "summary"}
|
||||
else:
|
||||
summary = reasoning["summary"]
|
||||
assert isinstance(summary, list)
|
||||
for block in summary:
|
||||
assert isinstance(block, dict)
|
||||
assert isinstance(block["type"], str)
|
||||
assert isinstance(block["text"], str)
|
||||
assert block["text"]
|
||||
elif output_version == "responses/v1":
|
||||
reasoning = next(
|
||||
block
|
||||
for block in response_1.content
|
||||
if block["type"] == "reasoning" # type: ignore[index]
|
||||
)
|
||||
assert set(reasoning.keys()) == {"id", "type", "summary", "index"}
|
||||
summary = reasoning["summary"]
|
||||
assert isinstance(summary, list)
|
||||
for block in summary:
|
||||
assert isinstance(block, dict)
|
||||
assert isinstance(block["type"], str)
|
||||
assert isinstance(block["text"], str)
|
||||
assert block["text"]
|
||||
summary = reasoning["summary"]
|
||||
assert isinstance(summary, list)
|
||||
for block in summary:
|
||||
assert isinstance(block, dict)
|
||||
assert isinstance(block["type"], str)
|
||||
assert isinstance(block["text"], str)
|
||||
assert block["text"]
|
||||
else:
|
||||
# v1
|
||||
total_reasoning_blocks = 0
|
||||
for block in response_1.content:
|
||||
if block["type"] == "reasoning":
|
||||
total_reasoning_blocks += 1
|
||||
assert isinstance(block["id"], str) and block["id"].startswith("rs_")
|
||||
assert isinstance(block["reasoning"], str)
|
||||
assert isinstance(block["index"], int)
|
||||
assert (
|
||||
total_reasoning_blocks > 1
|
||||
) # This query typically generates multiple reasoning blocks
|
||||
|
||||
# Check we can pass back summaries
|
||||
message_2 = {"role": "user", "content": "Thank you."}
|
||||
@@ -419,9 +455,15 @@ def test_stream_reasoning_summary(
|
||||
assert isinstance(response_2, AIMessage)
|
||||
|
||||
|
||||
@pytest.mark.default_cassette("test_code_interpreter.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
def test_code_interpreter() -> None:
|
||||
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||
def test_code_interpreter(
|
||||
output_version: Literal["v0", "responses/v1", "v1"],
|
||||
) -> None:
|
||||
llm = ChatOpenAI(
|
||||
model="o4-mini", use_responses_api=True, output_version=output_version
|
||||
)
|
||||
llm_with_tools = llm.bind_tools(
|
||||
[{"type": "code_interpreter", "container": {"type": "auto"}}]
|
||||
)
|
||||
@@ -431,14 +473,26 @@ def test_code_interpreter() -> None:
|
||||
}
|
||||
response = llm_with_tools.invoke([input_message])
|
||||
_check_response(response)
|
||||
tool_outputs = response.additional_kwargs["tool_outputs"]
|
||||
assert tool_outputs
|
||||
assert any(output["type"] == "code_interpreter_call" for output in tool_outputs)
|
||||
if output_version == "v0":
|
||||
tool_outputs = [
|
||||
item
|
||||
for item in response.additional_kwargs["tool_outputs"]
|
||||
if item["type"] == "code_interpreter_call"
|
||||
]
|
||||
elif output_version == "responses/v1":
|
||||
tool_outputs = [
|
||||
item for item in response.content if item["type"] == "code_interpreter_call"
|
||||
]
|
||||
else:
|
||||
# v1
|
||||
tool_outputs = [
|
||||
item["value"] for item in response.content if item["type"] == "non_standard"
|
||||
]
|
||||
assert tool_outputs[0]["type"] == "code_interpreter_call"
|
||||
assert len(tool_outputs) == 1
|
||||
|
||||
# Test streaming
|
||||
# Use same container
|
||||
tool_outputs = response.additional_kwargs["tool_outputs"]
|
||||
assert len(tool_outputs) == 1
|
||||
container_id = tool_outputs[0]["container_id"]
|
||||
llm_with_tools = llm.bind_tools(
|
||||
[{"type": "code_interpreter", "container": container_id}]
|
||||
@@ -449,9 +503,22 @@ def test_code_interpreter() -> None:
|
||||
assert isinstance(chunk, AIMessageChunk)
|
||||
full = chunk if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
tool_outputs = full.additional_kwargs["tool_outputs"]
|
||||
if output_version == "v0":
|
||||
tool_outputs = [
|
||||
item
|
||||
for item in response.additional_kwargs["tool_outputs"]
|
||||
if item["type"] == "code_interpreter_call"
|
||||
]
|
||||
elif output_version == "responses/v1":
|
||||
tool_outputs = [
|
||||
item for item in response.content if item["type"] == "code_interpreter_call"
|
||||
]
|
||||
else:
|
||||
tool_outputs = [
|
||||
item["value"] for item in response.content if item["type"] == "non_standard"
|
||||
]
|
||||
assert tool_outputs[0]["type"] == "code_interpreter_call"
|
||||
assert tool_outputs
|
||||
assert any(output["type"] == "code_interpreter_call" for output in tool_outputs)
|
||||
|
||||
# Test we can pass back in
|
||||
next_message = {"role": "user", "content": "Please add more comments to the code."}
|
||||
@@ -546,10 +613,14 @@ def test_mcp_builtin_zdr() -> None:
|
||||
_ = llm_with_tools.invoke([input_message, full, approval_message])
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_image_generation_streaming() -> None:
|
||||
@pytest.mark.default_cassette("test_image_generation_streaming.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||
def test_image_generation_streaming(output_version: str) -> None:
|
||||
"""Test image generation streaming."""
|
||||
llm = ChatOpenAI(model="gpt-4.1", use_responses_api=True)
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-4.1", use_responses_api=True, output_version=output_version
|
||||
)
|
||||
tool = {
|
||||
"type": "image_generation",
|
||||
# For testing purposes let's keep the quality low, so the test runs faster.
|
||||
@@ -596,15 +667,35 @@ def test_image_generation_streaming() -> None:
|
||||
# At the moment, the streaming API does not pick up annotations fully.
|
||||
# So the following check is commented out.
|
||||
# _check_response(complete_ai_message)
|
||||
tool_output = complete_ai_message.additional_kwargs["tool_outputs"][0]
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
if output_version == "v0":
|
||||
assert complete_ai_message.additional_kwargs["tool_outputs"]
|
||||
tool_output = complete_ai_message.additional_kwargs["tool_outputs"][0]
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
elif output_version == "responses/v1":
|
||||
tool_output = next(
|
||||
block
|
||||
for block in complete_ai_message.content
|
||||
if block["type"] == "image_generation_call"
|
||||
)
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
else:
|
||||
# v1
|
||||
standard_keys = {"type", "source_type", "data", "id", "status", "index"}
|
||||
tool_output = next(
|
||||
block for block in complete_ai_message.content if block["type"] == "image"
|
||||
)
|
||||
assert set(standard_keys).issubset(tool_output.keys())
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_image_generation_multi_turn() -> None:
|
||||
@pytest.mark.default_cassette("test_image_generation_multi_turn.yaml.gz")
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
|
||||
def test_image_generation_multi_turn(output_version: str) -> None:
|
||||
"""Test multi-turn editing of image generation by passing in history."""
|
||||
# Test multi-turn
|
||||
llm = ChatOpenAI(model="gpt-4.1", use_responses_api=True)
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-4.1", use_responses_api=True, output_version=output_version
|
||||
)
|
||||
# Test invocation
|
||||
tool = {
|
||||
"type": "image_generation",
|
||||
@@ -621,9 +712,37 @@ def test_image_generation_multi_turn() -> None:
|
||||
]
|
||||
ai_message = llm_with_tools.invoke(chat_history)
|
||||
_check_response(ai_message)
|
||||
tool_output = ai_message.additional_kwargs["tool_outputs"][0]
|
||||
|
||||
# Example tool output for an image
|
||||
expected_keys = {
|
||||
"id",
|
||||
"background",
|
||||
"output_format",
|
||||
"quality",
|
||||
"result",
|
||||
"revised_prompt",
|
||||
"size",
|
||||
"status",
|
||||
"type",
|
||||
}
|
||||
|
||||
if output_version == "v0":
|
||||
tool_output = ai_message.additional_kwargs["tool_outputs"][0]
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
elif output_version == "responses/v1":
|
||||
tool_output = next(
|
||||
block
|
||||
for block in ai_message.content
|
||||
if block["type"] == "image_generation_call"
|
||||
)
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
else:
|
||||
standard_keys = {"type", "source_type", "data", "id", "status"}
|
||||
tool_output = next(
|
||||
block for block in ai_message.content if block["type"] == "image"
|
||||
)
|
||||
assert set(standard_keys).issubset(tool_output.keys())
|
||||
|
||||
# Example tool output for an image (v0)
|
||||
# {
|
||||
# "background": "opaque",
|
||||
# "id": "ig_683716a8ddf0819888572b20621c7ae4029ec8c11f8dacf8",
|
||||
@@ -639,20 +758,6 @@ def test_image_generation_multi_turn() -> None:
|
||||
# "result": # base64 encode image data
|
||||
# }
|
||||
|
||||
expected_keys = {
|
||||
"id",
|
||||
"background",
|
||||
"output_format",
|
||||
"quality",
|
||||
"result",
|
||||
"revised_prompt",
|
||||
"size",
|
||||
"status",
|
||||
"type",
|
||||
}
|
||||
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
|
||||
chat_history.extend(
|
||||
[
|
||||
# AI message with tool output
|
||||
@@ -670,5 +775,20 @@ def test_image_generation_multi_turn() -> None:
|
||||
|
||||
ai_message2 = llm_with_tools.invoke(chat_history)
|
||||
_check_response(ai_message2)
|
||||
tool_output2 = ai_message2.additional_kwargs["tool_outputs"][0]
|
||||
assert set(tool_output2.keys()).issubset(expected_keys)
|
||||
|
||||
if output_version == "v0":
|
||||
tool_output = ai_message2.additional_kwargs["tool_outputs"][0]
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
elif output_version == "responses/v1":
|
||||
tool_output = next(
|
||||
block
|
||||
for block in ai_message2.content
|
||||
if block["type"] == "image_generation_call"
|
||||
)
|
||||
assert set(tool_output.keys()).issubset(expected_keys)
|
||||
else:
|
||||
standard_keys = {"type", "source_type", "data", "id", "status"}
|
||||
tool_output = next(
|
||||
block for block in ai_message2.content if block["type"] == "image"
|
||||
)
|
||||
assert set(standard_keys).issubset(tool_output.keys())
|
||||
|
||||
@@ -51,7 +51,11 @@ from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.chat_models._compat import (
|
||||
_FUNCTION_CALL_IDS_MAP_KEY,
|
||||
_convert_from_v03_ai_message,
|
||||
_convert_from_v1_to_chat_completions,
|
||||
_convert_from_v1_to_responses,
|
||||
_convert_to_v03_ai_message,
|
||||
_convert_to_v1_from_chat_completions,
|
||||
_convert_to_v1_from_responses,
|
||||
)
|
||||
from langchain_openai.chat_models.base import (
|
||||
_construct_lc_result_from_responses_api,
|
||||
@@ -2296,7 +2300,7 @@ def test_mcp_tracing() -> None:
|
||||
assert payload["tools"][0]["headers"]["Authorization"] == "Bearer PLACEHOLDER"
|
||||
|
||||
|
||||
def test_compat() -> None:
|
||||
def test_compat_responses_v1() -> None:
|
||||
# Check compatibility with v0.3 message format
|
||||
message_v03 = AIMessage(
|
||||
content=[
|
||||
@@ -2357,6 +2361,421 @@ def test_compat() -> None:
|
||||
assert message_v03_output is not message_v03
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message_v1, expected",
|
||||
[
|
||||
(
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "reasoning", "reasoning": "Reasoning text"},
|
||||
{"type": "tool_call", "id": "call_123"},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
"annotations": [
|
||||
{"type": "url_citation", "url": "https://example.com"}
|
||||
],
|
||||
},
|
||||
],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
id="chatcmpl-123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "Hello, world!"}],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
id="chatcmpl-123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
def test_convert_from_v1_to_chat_completions(
|
||||
message_v1: AIMessage, expected: AIMessage
|
||||
) -> None:
|
||||
result = _convert_from_v1_to_chat_completions(message_v1)
|
||||
assert result == expected
|
||||
|
||||
# Check no mutation
|
||||
assert message_v1 != result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message_chat_completions, expected",
|
||||
[
|
||||
(
|
||||
AIMessage(
|
||||
"Hello, world!", id="chatcmpl-123", response_metadata={"foo": "bar"}
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "Hello, world!"}],
|
||||
id="chatcmpl-123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
),
|
||||
(
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "Hello, world!"}],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
id="chatcmpl-123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "text", "text": "Hello, world!"},
|
||||
{"type": "tool_call", "id": "call_123"},
|
||||
],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
id="chatcmpl-123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
),
|
||||
(
|
||||
AIMessage(
|
||||
"",
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
id="chatcmpl-123",
|
||||
response_metadata={"foo": "bar"},
|
||||
additional_kwargs={"tool_calls": [{"foo": "bar"}]},
|
||||
),
|
||||
AIMessage(
|
||||
[{"type": "tool_call", "id": "call_123"}],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
id="chatcmpl-123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_convert_to_v1_from_chat_completions(
|
||||
message_chat_completions: AIMessage, expected: AIMessage
|
||||
) -> None:
|
||||
result = _convert_to_v1_from_chat_completions(message_chat_completions)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message_v1, expected",
|
||||
[
|
||||
(
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "reasoning", "id": "abc123"},
|
||||
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
||||
{"type": "reasoning", "id": "abc234", "reasoning": "bar"},
|
||||
{"type": "tool_call", "id": "call_123"},
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_234",
|
||||
"name": "get_weather_2",
|
||||
"arguments": '{"location": "New York"}',
|
||||
"item_id": "fc_123",
|
||||
},
|
||||
{"type": "text", "text": "Hello "},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "world",
|
||||
"annotations": [
|
||||
{"type": "url_citation", "url": "https://example.com"},
|
||||
{
|
||||
"type": "document_citation",
|
||||
"title": "my doc",
|
||||
"index": 1,
|
||||
"file_id": "file_123",
|
||||
},
|
||||
{
|
||||
"type": "non_standard_annotation",
|
||||
"value": {"bar": "baz"},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": "...",
|
||||
"id": "img_123",
|
||||
},
|
||||
{
|
||||
"type": "non_standard",
|
||||
"value": {"type": "something_else", "foo": "bar"},
|
||||
},
|
||||
],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
},
|
||||
{
|
||||
# Make values different to check we pull from content when
|
||||
# available
|
||||
"type": "tool_call",
|
||||
"id": "call_234",
|
||||
"name": "get_weather_3",
|
||||
"args": {"location": "Boston"},
|
||||
},
|
||||
],
|
||||
id="resp123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "reasoning", "id": "abc123"},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"id": "abc234",
|
||||
"summary": [
|
||||
{"type": "summary_text", "text": "foo "},
|
||||
{"type": "summary_text", "text": "bar"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "call_123",
|
||||
"name": "get_weather",
|
||||
"arguments": '{"location": "San Francisco"}',
|
||||
},
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "call_234",
|
||||
"name": "get_weather_2",
|
||||
"arguments": '{"location": "New York"}',
|
||||
"id": "fc_123",
|
||||
},
|
||||
{"type": "text", "text": "Hello "},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "world",
|
||||
"annotations": [
|
||||
{"type": "url_citation", "url": "https://example.com"},
|
||||
{
|
||||
"type": "file_citation",
|
||||
"filename": "my doc",
|
||||
"index": 1,
|
||||
"file_id": "file_123",
|
||||
},
|
||||
{"bar": "baz"},
|
||||
],
|
||||
},
|
||||
{"type": "image_generation_call", "id": "img_123", "result": "..."},
|
||||
{"type": "something_else", "foo": "bar"},
|
||||
],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
},
|
||||
{
|
||||
# Make values different to check we pull from content when
|
||||
# available
|
||||
"type": "tool_call",
|
||||
"id": "call_234",
|
||||
"name": "get_weather_3",
|
||||
"args": {"location": "Boston"},
|
||||
},
|
||||
],
|
||||
id="resp123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
def test_convert_from_v1_to_responses(
|
||||
message_v1: AIMessage, expected: AIMessage
|
||||
) -> None:
|
||||
result = _convert_from_v1_to_responses(message_v1)
|
||||
assert result == expected
|
||||
|
||||
# Check no mutation
|
||||
assert message_v1 != result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message_responses, expected",
|
||||
[
|
||||
(
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "reasoning", "id": "abc123"},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"id": "abc234",
|
||||
"summary": [
|
||||
{"type": "summary_text", "text": "foo "},
|
||||
{"type": "summary_text", "text": "bar"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "call_123",
|
||||
"name": "get_weather",
|
||||
"arguments": '{"location": "San Francisco"}',
|
||||
},
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "call_234",
|
||||
"name": "get_weather_2",
|
||||
"arguments": '{"location": "New York"}',
|
||||
"id": "fc_123",
|
||||
},
|
||||
{"type": "text", "text": "Hello "},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "world",
|
||||
"annotations": [
|
||||
{"type": "url_citation", "url": "https://example.com"},
|
||||
{
|
||||
"type": "file_citation",
|
||||
"filename": "my doc",
|
||||
"index": 1,
|
||||
"file_id": "file_123",
|
||||
},
|
||||
{"bar": "baz"},
|
||||
],
|
||||
},
|
||||
{"type": "image_generation_call", "id": "img_123", "result": "..."},
|
||||
{"type": "something_else", "foo": "bar"},
|
||||
],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
},
|
||||
{
|
||||
# Make values different to check we pull from content when
|
||||
# available
|
||||
"type": "tool_call",
|
||||
"id": "call_234",
|
||||
"name": "get_weather_3",
|
||||
"args": {"location": "Boston"},
|
||||
},
|
||||
],
|
||||
id="resp123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
AIMessage(
|
||||
[
|
||||
{"type": "reasoning", "id": "abc123"},
|
||||
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
||||
{"type": "reasoning", "id": "abc234", "reasoning": "bar"},
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"arguments": '{"location": "San Francisco"}',
|
||||
},
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_234",
|
||||
"name": "get_weather_2",
|
||||
"arguments": '{"location": "New York"}',
|
||||
"item_id": "fc_123",
|
||||
},
|
||||
{"type": "text", "text": "Hello "},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "world",
|
||||
"annotations": [
|
||||
{"type": "url_citation", "url": "https://example.com"},
|
||||
{
|
||||
"type": "document_citation",
|
||||
"title": "my doc",
|
||||
"index": 1,
|
||||
"file_id": "file_123",
|
||||
},
|
||||
{
|
||||
"type": "non_standard_annotation",
|
||||
"value": {"bar": "baz"},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": "...",
|
||||
"id": "img_123",
|
||||
},
|
||||
{
|
||||
"type": "non_standard",
|
||||
"value": {"type": "something_else", "foo": "bar"},
|
||||
},
|
||||
],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
},
|
||||
{
|
||||
# Make values different to check we pull from content when
|
||||
# available
|
||||
"type": "tool_call",
|
||||
"id": "call_234",
|
||||
"name": "get_weather_3",
|
||||
"args": {"location": "Boston"},
|
||||
},
|
||||
],
|
||||
id="resp123",
|
||||
response_metadata={"foo": "bar"},
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
def test_convert_to_v1_from_responses(
|
||||
message_responses: AIMessage, expected: AIMessage
|
||||
) -> None:
|
||||
result = _convert_to_v1_from_responses(message_responses)
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_last_messages() -> None:
|
||||
messages: list[BaseMessage] = [HumanMessage("Hello")]
|
||||
last_messages, previous_response_id = _get_last_messages(messages)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from typing import Any, Optional
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
|
||||
from openai.types.responses import (
|
||||
ResponseCompletedEvent,
|
||||
@@ -610,8 +611,97 @@ def _strip_none(obj: Any) -> Any:
|
||||
return obj
|
||||
|
||||
|
||||
def test_responses_stream() -> None:
|
||||
llm = ChatOpenAI(model="o4-mini", output_version="responses/v1")
|
||||
@pytest.mark.parametrize(
|
||||
"output_version, expected_content",
|
||||
[
|
||||
(
|
||||
"responses/v1",
|
||||
[
|
||||
{
|
||||
"id": "rs_123",
|
||||
"summary": [
|
||||
{
|
||||
"index": 0,
|
||||
"type": "summary_text",
|
||||
"text": "reasoning block one",
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"type": "summary_text",
|
||||
"text": "another reasoning block",
|
||||
},
|
||||
],
|
||||
"type": "reasoning",
|
||||
"index": 0,
|
||||
},
|
||||
{"type": "text", "text": "text block one", "index": 1, "id": "msg_123"},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "another text block",
|
||||
"index": 2,
|
||||
"id": "msg_123",
|
||||
},
|
||||
{
|
||||
"id": "rs_234",
|
||||
"summary": [
|
||||
{"index": 0, "type": "summary_text", "text": "more reasoning"},
|
||||
{
|
||||
"index": 1,
|
||||
"type": "summary_text",
|
||||
"text": "still more reasoning",
|
||||
},
|
||||
],
|
||||
"type": "reasoning",
|
||||
"index": 3,
|
||||
},
|
||||
{"type": "text", "text": "more", "index": 4, "id": "msg_234"},
|
||||
{"type": "text", "text": "text", "index": 5, "id": "msg_234"},
|
||||
],
|
||||
),
|
||||
(
|
||||
"v1",
|
||||
[
|
||||
{
|
||||
"type": "reasoning",
|
||||
"reasoning": "reasoning block one",
|
||||
"id": "rs_123",
|
||||
"index": 0,
|
||||
},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"reasoning": "another reasoning block",
|
||||
"id": "rs_123",
|
||||
"index": 1,
|
||||
},
|
||||
{"type": "text", "text": "text block one", "index": 2, "id": "msg_123"},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "another text block",
|
||||
"index": 3,
|
||||
"id": "msg_123",
|
||||
},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"reasoning": "more reasoning",
|
||||
"id": "rs_234",
|
||||
"index": 4,
|
||||
},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"reasoning": "still more reasoning",
|
||||
"id": "rs_234",
|
||||
"index": 5,
|
||||
},
|
||||
{"type": "text", "text": "more", "index": 6, "id": "msg_234"},
|
||||
{"type": "text", "text": "text", "index": 7, "id": "msg_234"},
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_responses_stream(output_version: str, expected_content: list[dict]) -> None:
|
||||
llm = ChatOpenAI(
|
||||
model="o4-mini", use_responses_api=True, output_version=output_version
|
||||
)
|
||||
mock_client = MagicMock()
|
||||
|
||||
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
|
||||
@@ -620,36 +710,14 @@ def test_responses_stream() -> None:
|
||||
mock_client.responses.create = mock_create
|
||||
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
chunks = []
|
||||
with patch.object(llm, "root_client", mock_client):
|
||||
for chunk in llm.stream("test"):
|
||||
assert isinstance(chunk, AIMessageChunk)
|
||||
full = chunk if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
chunks.append(chunk)
|
||||
|
||||
expected_content = [
|
||||
{
|
||||
"id": "rs_123",
|
||||
"summary": [
|
||||
{"index": 0, "type": "summary_text", "text": "reasoning block one"},
|
||||
{"index": 1, "type": "summary_text", "text": "another reasoning block"},
|
||||
],
|
||||
"type": "reasoning",
|
||||
"index": 0,
|
||||
},
|
||||
{"type": "text", "text": "text block one", "index": 1, "id": "msg_123"},
|
||||
{"type": "text", "text": "another text block", "index": 2, "id": "msg_123"},
|
||||
{
|
||||
"id": "rs_234",
|
||||
"summary": [
|
||||
{"index": 0, "type": "summary_text", "text": "more reasoning"},
|
||||
{"index": 1, "type": "summary_text", "text": "still more reasoning"},
|
||||
],
|
||||
"type": "reasoning",
|
||||
"index": 3,
|
||||
},
|
||||
{"type": "text", "text": "more", "index": 4, "id": "msg_234"},
|
||||
{"type": "text", "text": "text", "index": 5, "id": "msg_234"},
|
||||
]
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert full.content == expected_content
|
||||
assert full.additional_kwargs == {}
|
||||
assert full.id == "resp_123"
|
||||
|
||||
Reference in New Issue
Block a user