mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-17 00:17:47 +00:00
fix(openai): fix tracing and typing on standard outputs branch (#32326)
This commit is contained in:
parent
8cf97e838c
commit
309d1a232a
@ -253,19 +253,20 @@ def shielded(func: Func) -> Func:
|
|||||||
|
|
||||||
def _convert_llm_events(
|
def _convert_llm_events(
|
||||||
event_name: str, args: tuple[Any, ...], kwargs: dict[str, Any]
|
event_name: str, args: tuple[Any, ...], kwargs: dict[str, Any]
|
||||||
) -> None:
|
) -> tuple[tuple[Any, ...], dict[str, Any]]:
|
||||||
|
args_list = list(args)
|
||||||
if (
|
if (
|
||||||
event_name == "on_chat_model_start"
|
event_name == "on_chat_model_start"
|
||||||
and isinstance(args[1], list)
|
and isinstance(args_list[1], list)
|
||||||
and args[1]
|
and args_list[1]
|
||||||
and isinstance(args[1][0], MessageV1Types)
|
and isinstance(args_list[1][0], MessageV1Types)
|
||||||
):
|
):
|
||||||
batch = [
|
batch = [
|
||||||
convert_from_v1_message(item)
|
convert_from_v1_message(item)
|
||||||
for item in args[1]
|
for item in args_list[1]
|
||||||
if isinstance(item, MessageV1Types)
|
if isinstance(item, MessageV1Types)
|
||||||
]
|
]
|
||||||
args[1] = [batch] # type: ignore[index]
|
args_list[1] = [batch]
|
||||||
elif (
|
elif (
|
||||||
event_name == "on_llm_new_token"
|
event_name == "on_llm_new_token"
|
||||||
and "chunk" in kwargs
|
and "chunk" in kwargs
|
||||||
@ -273,12 +274,21 @@ def _convert_llm_events(
|
|||||||
):
|
):
|
||||||
chunk = kwargs["chunk"]
|
chunk = kwargs["chunk"]
|
||||||
kwargs["chunk"] = ChatGenerationChunk(text=chunk.text, message=chunk)
|
kwargs["chunk"] = ChatGenerationChunk(text=chunk.text, message=chunk)
|
||||||
elif event_name == "on_llm_end" and isinstance(args[0], MessageV1Types):
|
elif event_name == "on_llm_end" and isinstance(args_list[0], MessageV1Types):
|
||||||
args[0] = LLMResult( # type: ignore[index]
|
args_list[0] = LLMResult(
|
||||||
generations=[[ChatGeneration(text=args[0].text, message=args[0])]]
|
generations=[
|
||||||
|
[
|
||||||
|
ChatGeneration(
|
||||||
|
text=args_list[0].text,
|
||||||
|
message=convert_from_v1_message(args_list[0]),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
]
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return
|
pass
|
||||||
|
|
||||||
|
return tuple(args_list), kwargs
|
||||||
|
|
||||||
|
|
||||||
def handle_event(
|
def handle_event(
|
||||||
@ -310,7 +320,7 @@ def handle_event(
|
|||||||
handler, ignore_condition_name
|
handler, ignore_condition_name
|
||||||
):
|
):
|
||||||
if not handler.accepts_new_messages:
|
if not handler.accepts_new_messages:
|
||||||
_convert_llm_events(event_name, args, kwargs)
|
args, kwargs = _convert_llm_events(event_name, args, kwargs)
|
||||||
event = getattr(handler, event_name)(*args, **kwargs)
|
event = getattr(handler, event_name)(*args, **kwargs)
|
||||||
if asyncio.iscoroutine(event):
|
if asyncio.iscoroutine(event):
|
||||||
coros.append(event)
|
coros.append(event)
|
||||||
@ -406,7 +416,7 @@ async def _ahandle_event_for_handler(
|
|||||||
try:
|
try:
|
||||||
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
|
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
|
||||||
if not handler.accepts_new_messages:
|
if not handler.accepts_new_messages:
|
||||||
_convert_llm_events(event_name, args, kwargs)
|
args, kwargs = _convert_llm_events(event_name, args, kwargs)
|
||||||
event = getattr(handler, event_name)
|
event = getattr(handler, event_name)
|
||||||
if asyncio.iscoroutinefunction(event):
|
if asyncio.iscoroutinefunction(event):
|
||||||
await event(*args, **kwargs)
|
await event(*args, **kwargs)
|
||||||
|
@ -328,20 +328,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
does not properly support streaming.
|
does not properly support streaming.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_version: str = "v0"
|
|
||||||
"""Version of AIMessage output format to use.
|
|
||||||
|
|
||||||
This field is used to roll-out new output formats for chat model AIMessages
|
|
||||||
in a backwards-compatible way.
|
|
||||||
|
|
||||||
``'v1'`` standardizes output format using a list of typed ContentBlock dicts. We
|
|
||||||
recommend this for new applications.
|
|
||||||
|
|
||||||
All chat models currently support the default of ``"v0"``.
|
|
||||||
|
|
||||||
.. versionadded:: 0.4
|
|
||||||
"""
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def raise_deprecation(cls, values: dict) -> Any:
|
def raise_deprecation(cls, values: dict) -> Any:
|
||||||
|
@ -7,6 +7,7 @@ import typing
|
|||||||
import warnings
|
import warnings
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
|
from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
|
||||||
|
from functools import cached_property
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
@ -41,6 +42,7 @@ from langchain_core.language_models.base import (
|
|||||||
_get_token_ids_default_method,
|
_get_token_ids_default_method,
|
||||||
_get_verbosity,
|
_get_verbosity,
|
||||||
)
|
)
|
||||||
|
from langchain_core.load import dumpd
|
||||||
from langchain_core.messages import (
|
from langchain_core.messages import (
|
||||||
convert_to_openai_image_block,
|
convert_to_openai_image_block,
|
||||||
get_buffer_string,
|
get_buffer_string,
|
||||||
@ -312,6 +314,10 @@ class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC
|
|||||||
arbitrary_types_allowed=True,
|
arbitrary_types_allowed=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def _serialized(self) -> dict[str, Any]:
|
||||||
|
return dumpd(self)
|
||||||
|
|
||||||
# --- Runnable methods ---
|
# --- Runnable methods ---
|
||||||
|
|
||||||
@field_validator("verbose", mode="before")
|
@field_validator("verbose", mode="before")
|
||||||
@ -434,7 +440,7 @@ class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC
|
|||||||
self.metadata,
|
self.metadata,
|
||||||
)
|
)
|
||||||
(run_manager,) = callback_manager.on_chat_model_start(
|
(run_manager,) = callback_manager.on_chat_model_start(
|
||||||
{},
|
self._serialized,
|
||||||
_format_for_tracing(messages),
|
_format_for_tracing(messages),
|
||||||
invocation_params=params,
|
invocation_params=params,
|
||||||
options=options,
|
options=options,
|
||||||
@ -500,7 +506,7 @@ class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC
|
|||||||
self.metadata,
|
self.metadata,
|
||||||
)
|
)
|
||||||
(run_manager,) = await callback_manager.on_chat_model_start(
|
(run_manager,) = await callback_manager.on_chat_model_start(
|
||||||
{},
|
self._serialized,
|
||||||
_format_for_tracing(messages),
|
_format_for_tracing(messages),
|
||||||
invocation_params=params,
|
invocation_params=params,
|
||||||
options=options,
|
options=options,
|
||||||
@ -578,7 +584,7 @@ class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC
|
|||||||
self.metadata,
|
self.metadata,
|
||||||
)
|
)
|
||||||
(run_manager,) = callback_manager.on_chat_model_start(
|
(run_manager,) = callback_manager.on_chat_model_start(
|
||||||
{},
|
self._serialized,
|
||||||
_format_for_tracing(messages),
|
_format_for_tracing(messages),
|
||||||
invocation_params=params,
|
invocation_params=params,
|
||||||
options=options,
|
options=options,
|
||||||
@ -647,7 +653,7 @@ class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC
|
|||||||
self.metadata,
|
self.metadata,
|
||||||
)
|
)
|
||||||
(run_manager,) = await callback_manager.on_chat_model_start(
|
(run_manager,) = await callback_manager.on_chat_model_start(
|
||||||
{},
|
self._serialized,
|
||||||
_format_for_tracing(messages),
|
_format_for_tracing(messages),
|
||||||
invocation_params=params,
|
invocation_params=params,
|
||||||
options=options,
|
options=options,
|
||||||
|
@ -233,6 +233,8 @@ class InvalidToolCall(TypedDict):
|
|||||||
"""An identifier associated with the tool call."""
|
"""An identifier associated with the tool call."""
|
||||||
error: Optional[str]
|
error: Optional[str]
|
||||||
"""An error message associated with the tool call."""
|
"""An error message associated with the tool call."""
|
||||||
|
index: NotRequired[int]
|
||||||
|
"""Index of block in aggregate response. Used during streaming."""
|
||||||
type: Literal["invalid_tool_call"]
|
type: Literal["invalid_tool_call"]
|
||||||
|
|
||||||
|
|
||||||
|
@ -300,9 +300,8 @@ def test_llm_representation_for_serializable() -> None:
|
|||||||
assert chat._get_llm_string() == (
|
assert chat._get_llm_string() == (
|
||||||
'{"id": ["tests", "unit_tests", "language_models", "chat_models", '
|
'{"id": ["tests", "unit_tests", "language_models", "chat_models", '
|
||||||
'"test_cache", "CustomChat"], "kwargs": {"messages": {"id": '
|
'"test_cache", "CustomChat"], "kwargs": {"messages": {"id": '
|
||||||
'["builtins", "list_iterator"], "lc": 1, "type": "not_implemented"}, '
|
'["builtins", "list_iterator"], "lc": 1, "type": "not_implemented"}}, "lc": '
|
||||||
'"output_version": "v0"}, "lc": 1, "name": "CustomChat", "type": '
|
'1, "name": "CustomChat", "type": "constructor"}---[(\'stop\', None)]'
|
||||||
"\"constructor\"}---[('stop', None)]"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -763,6 +763,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -2201,6 +2205,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
|
@ -1166,6 +1166,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
|
@ -2711,6 +2711,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -4193,6 +4197,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -5706,6 +5714,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -7094,6 +7106,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -8618,6 +8634,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -10051,6 +10071,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -11483,6 +11507,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
@ -12957,6 +12985,10 @@
|
|||||||
]),
|
]),
|
||||||
'title': 'Id',
|
'title': 'Id',
|
||||||
}),
|
}),
|
||||||
|
'index': dict({
|
||||||
|
'title': 'Index',
|
||||||
|
'type': 'integer',
|
||||||
|
}),
|
||||||
'name': dict({
|
'name': dict({
|
||||||
'anyOf': list([
|
'anyOf': list([
|
||||||
dict({
|
dict({
|
||||||
|
@ -205,7 +205,6 @@ def test_configurable_with_default() -> None:
|
|||||||
"name": None,
|
"name": None,
|
||||||
"bound": {
|
"bound": {
|
||||||
"name": None,
|
"name": None,
|
||||||
"output_version": "v0",
|
|
||||||
"disable_streaming": False,
|
"disable_streaming": False,
|
||||||
"model": "claude-3-sonnet-20240229",
|
"model": "claude-3-sonnet-20240229",
|
||||||
"mcp_servers": None,
|
"mcp_servers": None,
|
||||||
|
@ -71,7 +71,7 @@ import json
|
|||||||
from collections.abc import Iterable, Iterator
|
from collections.abc import Iterable, Iterator
|
||||||
from typing import Any, Literal, Optional, Union, cast
|
from typing import Any, Literal, Optional, Union, cast
|
||||||
|
|
||||||
from langchain_core.messages import AIMessage, AIMessageChunk, is_data_content_block
|
from langchain_core.messages import AIMessage, is_data_content_block
|
||||||
from langchain_core.messages import content_blocks as types
|
from langchain_core.messages import content_blocks as types
|
||||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||||
|
|
||||||
@ -266,32 +266,6 @@ def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
|
|||||||
|
|
||||||
|
|
||||||
# v1 / Chat Completions
|
# v1 / Chat Completions
|
||||||
def _convert_to_v1_from_chat_completions(message: AIMessage) -> AIMessage:
|
|
||||||
"""Mutate a Chat Completions message to v1 format."""
|
|
||||||
if isinstance(message.content, str):
|
|
||||||
if message.content:
|
|
||||||
message.content = [{"type": "text", "text": message.content}]
|
|
||||||
else:
|
|
||||||
message.content = []
|
|
||||||
|
|
||||||
for tool_call in message.tool_calls:
|
|
||||||
if id_ := tool_call.get("id"):
|
|
||||||
message.content.append({"type": "tool_call", "id": id_})
|
|
||||||
|
|
||||||
if "tool_calls" in message.additional_kwargs:
|
|
||||||
_ = message.additional_kwargs.pop("tool_calls")
|
|
||||||
|
|
||||||
if "token_usage" in message.response_metadata:
|
|
||||||
_ = message.response_metadata.pop("token_usage")
|
|
||||||
|
|
||||||
return message
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_v1_from_chat_completions_chunk(chunk: AIMessageChunk) -> AIMessageChunk:
|
|
||||||
result = _convert_to_v1_from_chat_completions(cast(AIMessage, chunk))
|
|
||||||
return cast(AIMessageChunk, result)
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_from_v1_to_chat_completions(message: AIMessageV1) -> AIMessageV1:
|
def _convert_from_v1_to_chat_completions(message: AIMessageV1) -> AIMessageV1:
|
||||||
"""Convert a v1 message to the Chat Completions format."""
|
"""Convert a v1 message to the Chat Completions format."""
|
||||||
new_content: list[types.ContentBlock] = []
|
new_content: list[types.ContentBlock] = []
|
||||||
@ -341,14 +315,14 @@ def _convert_annotation_to_v1(annotation: dict[str, Any]) -> dict[str, Any]:
|
|||||||
return non_standard_annotation
|
return non_standard_annotation
|
||||||
|
|
||||||
|
|
||||||
def _explode_reasoning(block: dict[str, Any]) -> Iterable[dict[str, Any]]:
|
def _explode_reasoning(block: dict[str, Any]) -> Iterable[types.ReasoningContentBlock]:
|
||||||
if block.get("type") != "reasoning" or "summary" not in block:
|
if "summary" not in block:
|
||||||
yield block
|
yield cast(types.ReasoningContentBlock, block)
|
||||||
return
|
return
|
||||||
|
|
||||||
if not block["summary"]:
|
if not block["summary"]:
|
||||||
_ = block.pop("summary", None)
|
_ = block.pop("summary", None)
|
||||||
yield block
|
yield cast(types.ReasoningContentBlock, block)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Common part for every exploded line, except 'summary'
|
# Common part for every exploded line, except 'summary'
|
||||||
@ -364,7 +338,7 @@ def _explode_reasoning(block: dict[str, Any]) -> Iterable[dict[str, Any]]:
|
|||||||
new_block["reasoning"] = part.get("text", "")
|
new_block["reasoning"] = part.get("text", "")
|
||||||
if idx == 0:
|
if idx == 0:
|
||||||
new_block.update(first_only)
|
new_block.update(first_only)
|
||||||
yield new_block
|
yield cast(types.ReasoningContentBlock, new_block)
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_v1_from_responses(
|
def _convert_to_v1_from_responses(
|
||||||
@ -374,7 +348,7 @@ def _convert_to_v1_from_responses(
|
|||||||
) -> list[types.ContentBlock]:
|
) -> list[types.ContentBlock]:
|
||||||
"""Mutate a Responses message to v1 format."""
|
"""Mutate a Responses message to v1 format."""
|
||||||
|
|
||||||
def _iter_blocks() -> Iterable[dict[str, Any]]:
|
def _iter_blocks() -> Iterable[types.ContentBlock]:
|
||||||
for block in content:
|
for block in content:
|
||||||
if not isinstance(block, dict):
|
if not isinstance(block, dict):
|
||||||
continue
|
continue
|
||||||
@ -385,7 +359,7 @@ def _convert_to_v1_from_responses(
|
|||||||
block["annotations"] = [
|
block["annotations"] = [
|
||||||
_convert_annotation_to_v1(a) for a in block["annotations"]
|
_convert_annotation_to_v1(a) for a in block["annotations"]
|
||||||
]
|
]
|
||||||
yield block
|
yield cast(types.TextContentBlock, block)
|
||||||
|
|
||||||
elif block_type == "reasoning":
|
elif block_type == "reasoning":
|
||||||
yield from _explode_reasoning(block)
|
yield from _explode_reasoning(block)
|
||||||
@ -408,27 +382,29 @@ def _convert_to_v1_from_responses(
|
|||||||
):
|
):
|
||||||
if extra_key in block:
|
if extra_key in block:
|
||||||
new_block[extra_key] = block[extra_key]
|
new_block[extra_key] = block[extra_key]
|
||||||
yield new_block
|
yield cast(types.ImageContentBlock, new_block)
|
||||||
|
|
||||||
elif block_type == "function_call":
|
elif block_type == "function_call":
|
||||||
new_block = None
|
tool_call_block: Optional[types.ContentBlock] = None
|
||||||
call_id = block.get("call_id", "")
|
call_id = block.get("call_id", "")
|
||||||
if call_id:
|
if call_id:
|
||||||
for tool_call in tool_calls or []:
|
for tool_call in tool_calls or []:
|
||||||
if tool_call.get("id") == call_id:
|
if tool_call.get("id") == call_id:
|
||||||
new_block = tool_call.copy()
|
tool_call_block = cast(types.ToolCall, tool_call.copy())
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
for invalid_tool_call in invalid_tool_calls or []:
|
for invalid_tool_call in invalid_tool_calls or []:
|
||||||
if invalid_tool_call.get("id") == call_id:
|
if invalid_tool_call.get("id") == call_id:
|
||||||
new_block = invalid_tool_call.copy()
|
tool_call_block = cast(
|
||||||
|
types.InvalidToolCall, invalid_tool_call.copy()
|
||||||
|
)
|
||||||
break
|
break
|
||||||
if new_block:
|
if tool_call_block:
|
||||||
if "id" in block:
|
if "id" in block:
|
||||||
new_block["item_id"] = block["id"]
|
tool_call_block["item_id"] = block["id"]
|
||||||
if "index" in block:
|
if "index" in block:
|
||||||
new_block["index"] = block["index"]
|
tool_call_block["index"] = block["index"]
|
||||||
yield new_block
|
yield tool_call_block
|
||||||
|
|
||||||
elif block_type == "web_search_call":
|
elif block_type == "web_search_call":
|
||||||
web_search_call = {"type": "web_search_call", "id": block["id"]}
|
web_search_call = {"type": "web_search_call", "id": block["id"]}
|
||||||
@ -448,8 +424,8 @@ def _convert_to_v1_from_responses(
|
|||||||
web_search_result = {"type": "web_search_result", "id": block["id"]}
|
web_search_result = {"type": "web_search_result", "id": block["id"]}
|
||||||
if "index" in block:
|
if "index" in block:
|
||||||
web_search_result["index"] = block["index"] + 1
|
web_search_result["index"] = block["index"] + 1
|
||||||
yield web_search_call
|
yield cast(types.WebSearchCall, web_search_call)
|
||||||
yield web_search_result
|
yield cast(types.WebSearchResult, web_search_result)
|
||||||
|
|
||||||
elif block_type == "code_interpreter_call":
|
elif block_type == "code_interpreter_call":
|
||||||
code_interpreter_call = {
|
code_interpreter_call = {
|
||||||
@ -489,14 +465,14 @@ def _convert_to_v1_from_responses(
|
|||||||
if "index" in block:
|
if "index" in block:
|
||||||
code_interpreter_result["index"] = block["index"] + 1
|
code_interpreter_result["index"] = block["index"] + 1
|
||||||
|
|
||||||
yield code_interpreter_call
|
yield cast(types.CodeInterpreterCall, code_interpreter_call)
|
||||||
yield code_interpreter_result
|
yield cast(types.CodeInterpreterResult, code_interpreter_result)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
new_block = {"type": "non_standard", "value": block}
|
new_block = {"type": "non_standard", "value": block}
|
||||||
if "index" in new_block["value"]:
|
if "index" in new_block["value"]:
|
||||||
new_block["index"] = new_block["value"].pop("index")
|
new_block["index"] = new_block["value"].pop("index")
|
||||||
yield new_block
|
yield cast(types.NonStandardContentBlock, new_block)
|
||||||
|
|
||||||
return list(_iter_blocks())
|
return list(_iter_blocks())
|
||||||
|
|
||||||
@ -511,9 +487,9 @@ def _convert_annotation_from_v1(annotation: types.Annotation) -> dict[str, Any]:
|
|||||||
if "title" in annotation:
|
if "title" in annotation:
|
||||||
new_ann["filename"] = annotation["title"]
|
new_ann["filename"] = annotation["title"]
|
||||||
if "file_id" in annotation:
|
if "file_id" in annotation:
|
||||||
new_ann["file_id"] = annotation["file_id"]
|
new_ann["file_id"] = annotation["file_id"] # type: ignore[typeddict-item]
|
||||||
if "file_index" in annotation:
|
if "file_index" in annotation:
|
||||||
new_ann["index"] = annotation["file_index"]
|
new_ann["index"] = annotation["file_index"] # type: ignore[typeddict-item]
|
||||||
|
|
||||||
return new_ann
|
return new_ann
|
||||||
|
|
||||||
@ -649,10 +625,11 @@ def _convert_from_v1_to_responses(
|
|||||||
new_block = {"type": "function_call", "call_id": block["id"]}
|
new_block = {"type": "function_call", "call_id": block["id"]}
|
||||||
if "item_id" in block:
|
if "item_id" in block:
|
||||||
new_block["id"] = block["item_id"] # type: ignore[typeddict-item]
|
new_block["id"] = block["item_id"] # type: ignore[typeddict-item]
|
||||||
if "name" in block and "arguments" in block:
|
if "name" in block:
|
||||||
new_block["name"] = block["name"]
|
new_block["name"] = block["name"]
|
||||||
|
if "arguments" in block:
|
||||||
new_block["arguments"] = block["arguments"] # type: ignore[typeddict-item]
|
new_block["arguments"] = block["arguments"] # type: ignore[typeddict-item]
|
||||||
else:
|
if any(key not in block for key in ("name", "arguments")):
|
||||||
matching_tool_calls = [
|
matching_tool_calls = [
|
||||||
call for call in tool_calls if call["id"] == block["id"]
|
call for call in tool_calls if call["id"] == block["id"]
|
||||||
]
|
]
|
||||||
|
@ -108,12 +108,7 @@ from langchain_openai.chat_models._client_utils import (
|
|||||||
)
|
)
|
||||||
from langchain_openai.chat_models._compat import (
|
from langchain_openai.chat_models._compat import (
|
||||||
_convert_from_v03_ai_message,
|
_convert_from_v03_ai_message,
|
||||||
_convert_from_v1_to_chat_completions,
|
|
||||||
_convert_from_v1_to_responses,
|
|
||||||
_convert_to_v03_ai_message,
|
_convert_to_v03_ai_message,
|
||||||
_convert_to_v1_from_chat_completions,
|
|
||||||
_convert_to_v1_from_chat_completions_chunk,
|
|
||||||
_convert_to_v1_from_responses,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@ -674,7 +669,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
.. versionadded:: 0.3.9
|
.. versionadded:: 0.3.9
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_version: str = "v0"
|
output_version: Literal["v0", "responses/v1"] = "v0"
|
||||||
"""Version of AIMessage output format to use.
|
"""Version of AIMessage output format to use.
|
||||||
|
|
||||||
This field is used to roll-out new output formats for chat model AIMessages
|
This field is used to roll-out new output formats for chat model AIMessages
|
||||||
@ -685,7 +680,6 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
- ``'v0'``: AIMessage format as of langchain-openai 0.3.x.
|
- ``'v0'``: AIMessage format as of langchain-openai 0.3.x.
|
||||||
- ``'responses/v1'``: Formats Responses API output
|
- ``'responses/v1'``: Formats Responses API output
|
||||||
items into AIMessage content blocks.
|
items into AIMessage content blocks.
|
||||||
- ``"v1"``: v1 of LangChain cross-provider standard.
|
|
||||||
|
|
||||||
Currently only impacts the Responses API. ``output_version='responses/v1'`` is
|
Currently only impacts the Responses API. ``output_version='responses/v1'`` is
|
||||||
recommended.
|
recommended.
|
||||||
@ -875,10 +869,6 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
message=default_chunk_class(content="", usage_metadata=usage_metadata),
|
message=default_chunk_class(content="", usage_metadata=usage_metadata),
|
||||||
generation_info=base_generation_info,
|
generation_info=base_generation_info,
|
||||||
)
|
)
|
||||||
if self.output_version == "v1":
|
|
||||||
generation_chunk.message = _convert_to_v1_from_chat_completions_chunk(
|
|
||||||
cast(AIMessageChunk, generation_chunk.message)
|
|
||||||
)
|
|
||||||
return generation_chunk
|
return generation_chunk
|
||||||
|
|
||||||
choice = choices[0]
|
choice = choices[0]
|
||||||
@ -906,20 +896,6 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
|
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
|
||||||
message_chunk.usage_metadata = usage_metadata
|
message_chunk.usage_metadata = usage_metadata
|
||||||
|
|
||||||
if self.output_version == "v1":
|
|
||||||
message_chunk = cast(AIMessageChunk, message_chunk)
|
|
||||||
# Convert to v1 format
|
|
||||||
if isinstance(message_chunk.content, str):
|
|
||||||
message_chunk = _convert_to_v1_from_chat_completions_chunk(
|
|
||||||
message_chunk
|
|
||||||
)
|
|
||||||
if message_chunk.content:
|
|
||||||
message_chunk.content[0]["index"] = 0 # type: ignore[index]
|
|
||||||
else:
|
|
||||||
message_chunk = _convert_to_v1_from_chat_completions_chunk(
|
|
||||||
message_chunk
|
|
||||||
)
|
|
||||||
|
|
||||||
generation_chunk = ChatGenerationChunk(
|
generation_chunk = ChatGenerationChunk(
|
||||||
message=message_chunk, generation_info=generation_info or None
|
message=message_chunk, generation_info=generation_info or None
|
||||||
)
|
)
|
||||||
@ -1212,12 +1188,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
else:
|
else:
|
||||||
payload = _construct_responses_api_payload(messages, payload)
|
payload = _construct_responses_api_payload(messages, payload)
|
||||||
else:
|
else:
|
||||||
payload["messages"] = [
|
payload["messages"] = [_convert_message_to_dict(m) for m in messages]
|
||||||
_convert_message_to_dict(_convert_from_v1_to_chat_completions(m))
|
|
||||||
if isinstance(m, AIMessage)
|
|
||||||
else _convert_message_to_dict(m)
|
|
||||||
for m in messages
|
|
||||||
]
|
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
def _create_chat_result(
|
def _create_chat_result(
|
||||||
@ -1283,11 +1254,6 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
if hasattr(message, "refusal"):
|
if hasattr(message, "refusal"):
|
||||||
generations[0].message.additional_kwargs["refusal"] = message.refusal
|
generations[0].message.additional_kwargs["refusal"] = message.refusal
|
||||||
|
|
||||||
if self.output_version == "v1":
|
|
||||||
_ = llm_output.pop("token_usage", None)
|
|
||||||
generations[0].message = _convert_to_v1_from_chat_completions(
|
|
||||||
cast(AIMessage, generations[0].message)
|
|
||||||
)
|
|
||||||
return ChatResult(generations=generations, llm_output=llm_output)
|
return ChatResult(generations=generations, llm_output=llm_output)
|
||||||
|
|
||||||
async def _astream(
|
async def _astream(
|
||||||
@ -3611,7 +3577,6 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
|||||||
for lc_msg in messages:
|
for lc_msg in messages:
|
||||||
if isinstance(lc_msg, AIMessage):
|
if isinstance(lc_msg, AIMessage):
|
||||||
lc_msg = _convert_from_v03_ai_message(lc_msg)
|
lc_msg = _convert_from_v03_ai_message(lc_msg)
|
||||||
lc_msg = _convert_from_v1_to_responses(lc_msg)
|
|
||||||
msg = _convert_message_to_dict(lc_msg)
|
msg = _convert_message_to_dict(lc_msg)
|
||||||
# "name" parameter unsupported
|
# "name" parameter unsupported
|
||||||
if "name" in msg:
|
if "name" in msg:
|
||||||
@ -3755,7 +3720,7 @@ def _construct_lc_result_from_responses_api(
|
|||||||
response: Response,
|
response: Response,
|
||||||
schema: Optional[type[_BM]] = None,
|
schema: Optional[type[_BM]] = None,
|
||||||
metadata: Optional[dict] = None,
|
metadata: Optional[dict] = None,
|
||||||
output_version: str = "v0",
|
output_version: Literal["v0", "responses/v1"] = "v0",
|
||||||
) -> ChatResult:
|
) -> ChatResult:
|
||||||
"""Construct ChatResponse from OpenAI Response API response."""
|
"""Construct ChatResponse from OpenAI Response API response."""
|
||||||
if response.error:
|
if response.error:
|
||||||
@ -3894,27 +3859,6 @@ def _construct_lc_result_from_responses_api(
|
|||||||
)
|
)
|
||||||
if output_version == "v0":
|
if output_version == "v0":
|
||||||
message = _convert_to_v03_ai_message(message)
|
message = _convert_to_v03_ai_message(message)
|
||||||
elif output_version == "v1":
|
|
||||||
message = _convert_to_v1_from_responses(message)
|
|
||||||
if response.tools and any(
|
|
||||||
tool.type == "image_generation" for tool in response.tools
|
|
||||||
):
|
|
||||||
# Get mime_time from tool definition and add to image generations
|
|
||||||
# if missing (primarily for tracing purposes).
|
|
||||||
image_generation_call = next(
|
|
||||||
tool for tool in response.tools if tool.type == "image_generation"
|
|
||||||
)
|
|
||||||
if image_generation_call.output_format:
|
|
||||||
mime_type = f"image/{image_generation_call.output_format}"
|
|
||||||
for content_block in message.content:
|
|
||||||
# OK to mutate output message
|
|
||||||
if (
|
|
||||||
isinstance(content_block, dict)
|
|
||||||
and content_block.get("type") == "image"
|
|
||||||
and "base64" in content_block
|
|
||||||
and "mime_type" not in block
|
|
||||||
):
|
|
||||||
block["mime_type"] = mime_type
|
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||||
@ -3928,7 +3872,7 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
schema: Optional[type[_BM]] = None,
|
schema: Optional[type[_BM]] = None,
|
||||||
metadata: Optional[dict] = None,
|
metadata: Optional[dict] = None,
|
||||||
has_reasoning: bool = False,
|
has_reasoning: bool = False,
|
||||||
output_version: str = "v0",
|
output_version: Literal["v0", "responses/v1"] = "v0",
|
||||||
) -> tuple[int, int, int, Optional[ChatGenerationChunk]]:
|
) -> tuple[int, int, int, Optional[ChatGenerationChunk]]:
|
||||||
def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None:
|
def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None:
|
||||||
"""Advance indexes tracked during streaming.
|
"""Advance indexes tracked during streaming.
|
||||||
@ -3994,28 +3938,8 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
annotation = chunk.annotation
|
annotation = chunk.annotation
|
||||||
else:
|
else:
|
||||||
annotation = chunk.annotation.model_dump(exclude_none=True, mode="json")
|
annotation = chunk.annotation.model_dump(exclude_none=True, mode="json")
|
||||||
if output_version == "v1":
|
|
||||||
content.append(
|
|
||||||
{
|
|
||||||
"type": "text",
|
|
||||||
"text": "",
|
|
||||||
"annotations": [annotation],
|
|
||||||
"index": current_index,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
content.append({"annotations": [annotation], "index": current_index})
|
content.append({"annotations": [annotation], "index": current_index})
|
||||||
elif chunk.type == "response.output_text.done":
|
elif chunk.type == "response.output_text.done":
|
||||||
if output_version == "v1":
|
|
||||||
content.append(
|
|
||||||
{
|
|
||||||
"type": "text",
|
|
||||||
"text": "",
|
|
||||||
"id": chunk.item_id,
|
|
||||||
"index": current_index,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
content.append({"id": chunk.item_id, "index": current_index})
|
content.append({"id": chunk.item_id, "index": current_index})
|
||||||
elif chunk.type == "response.created":
|
elif chunk.type == "response.created":
|
||||||
id = chunk.response.id
|
id = chunk.response.id
|
||||||
@ -4092,34 +4016,21 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
content.append({"type": "refusal", "refusal": chunk.refusal})
|
content.append({"type": "refusal", "refusal": chunk.refusal})
|
||||||
elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning":
|
elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning":
|
||||||
_advance(chunk.output_index)
|
_advance(chunk.output_index)
|
||||||
current_sub_index = 0
|
|
||||||
reasoning = chunk.item.model_dump(exclude_none=True, mode="json")
|
reasoning = chunk.item.model_dump(exclude_none=True, mode="json")
|
||||||
reasoning["index"] = current_index
|
reasoning["index"] = current_index
|
||||||
content.append(reasoning)
|
content.append(reasoning)
|
||||||
elif chunk.type == "response.reasoning_summary_part.added":
|
elif chunk.type == "response.reasoning_summary_part.added":
|
||||||
if output_version in ("v0", "responses/v1"):
|
|
||||||
_advance(chunk.output_index)
|
_advance(chunk.output_index)
|
||||||
content.append(
|
content.append(
|
||||||
{
|
{
|
||||||
# langchain-core uses the `index` key to aggregate text blocks.
|
# langchain-core uses the `index` key to aggregate text blocks.
|
||||||
"summary": [
|
"summary": [
|
||||||
{
|
{"index": chunk.summary_index, "type": "summary_text", "text": ""}
|
||||||
"index": chunk.summary_index,
|
|
||||||
"type": "summary_text",
|
|
||||||
"text": "",
|
|
||||||
}
|
|
||||||
],
|
],
|
||||||
"index": current_index,
|
"index": current_index,
|
||||||
"type": "reasoning",
|
"type": "reasoning",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
block: dict = {"type": "reasoning", "reasoning": ""}
|
|
||||||
if chunk.summary_index > 0:
|
|
||||||
_advance(chunk.output_index, chunk.summary_index)
|
|
||||||
block["id"] = chunk.item_id
|
|
||||||
block["index"] = current_index
|
|
||||||
content.append(block)
|
|
||||||
elif chunk.type == "response.image_generation_call.partial_image":
|
elif chunk.type == "response.image_generation_call.partial_image":
|
||||||
# Partial images are not supported yet.
|
# Partial images are not supported yet.
|
||||||
pass
|
pass
|
||||||
@ -4154,15 +4065,6 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
AIMessageChunk,
|
AIMessageChunk,
|
||||||
_convert_to_v03_ai_message(message, has_reasoning=has_reasoning),
|
_convert_to_v03_ai_message(message, has_reasoning=has_reasoning),
|
||||||
)
|
)
|
||||||
elif output_version == "v1":
|
|
||||||
message = cast(AIMessageChunk, _convert_to_v1_from_responses(message))
|
|
||||||
for content_block in message.content:
|
|
||||||
if (
|
|
||||||
isinstance(content_block, dict)
|
|
||||||
and content_block.get("index", -1) > current_index
|
|
||||||
):
|
|
||||||
# blocks were added for v1
|
|
||||||
current_index = content_block["index"]
|
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
return (
|
return (
|
||||||
|
@ -154,7 +154,7 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> MessageV1:
|
|||||||
invalid_tool_calls.append(
|
invalid_tool_calls.append(
|
||||||
make_invalid_tool_call(raw_tool_call, str(e))
|
make_invalid_tool_call(raw_tool_call, str(e))
|
||||||
)
|
)
|
||||||
content.extend(tool_calls)
|
content.extend(cast(list[ToolCall], tool_calls))
|
||||||
if audio := _dict.get("audio"):
|
if audio := _dict.get("audio"):
|
||||||
# TODO: populate standard fields
|
# TODO: populate standard fields
|
||||||
content.append(
|
content.append(
|
||||||
@ -3796,7 +3796,7 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
for content_block in content_v1:
|
for content_block in content_v1:
|
||||||
if (
|
if (
|
||||||
isinstance(content_block, dict)
|
isinstance(content_block, dict)
|
||||||
and content_block.get("index", -1) > current_index
|
and (content_block.get("index") or -1) > current_index # type: ignore[operator]
|
||||||
):
|
):
|
||||||
# blocks were added for v1
|
# blocks were added for v1
|
||||||
current_index = content_block["index"]
|
current_index = content_block["index"]
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from typing import Annotated, Any, Literal, Optional, cast
|
from typing import Annotated, Any, Literal, Optional, Union, cast
|
||||||
|
|
||||||
import openai
|
import openai
|
||||||
import pytest
|
import pytest
|
||||||
@ -25,7 +25,9 @@ from langchain_openai import ChatOpenAI, ChatOpenAIV1
|
|||||||
MODEL_NAME = "gpt-4o-mini"
|
MODEL_NAME = "gpt-4o-mini"
|
||||||
|
|
||||||
|
|
||||||
def _check_response(response: Optional[BaseMessage], output_version) -> None:
|
def _check_response(
|
||||||
|
response: Optional[Union[BaseMessage, AIMessageV1]], output_version: str
|
||||||
|
) -> None:
|
||||||
if output_version == "v1":
|
if output_version == "v1":
|
||||||
assert isinstance(response, AIMessageV1) or isinstance(
|
assert isinstance(response, AIMessageV1) or isinstance(
|
||||||
response, AIMessageChunkV1
|
response, AIMessageChunkV1
|
||||||
@ -36,8 +38,8 @@ def _check_response(response: Optional[BaseMessage], output_version) -> None:
|
|||||||
for block in response.content:
|
for block in response.content:
|
||||||
assert isinstance(block, dict)
|
assert isinstance(block, dict)
|
||||||
if block["type"] == "text":
|
if block["type"] == "text":
|
||||||
assert isinstance(block["text"], str)
|
assert isinstance(block["text"], str) # type: ignore[typeddict-item]
|
||||||
for annotation in block["annotations"]:
|
for annotation in block["annotations"]: # type: ignore[typeddict-item]
|
||||||
if annotation["type"] == "file_citation":
|
if annotation["type"] == "file_citation":
|
||||||
assert all(
|
assert all(
|
||||||
key in annotation
|
key in annotation
|
||||||
@ -52,7 +54,7 @@ def _check_response(response: Optional[BaseMessage], output_version) -> None:
|
|||||||
if output_version == "v1":
|
if output_version == "v1":
|
||||||
text_content = response.text
|
text_content = response.text
|
||||||
else:
|
else:
|
||||||
text_content = response.text()
|
text_content = response.text() # type: ignore[operator,misc]
|
||||||
assert isinstance(text_content, str)
|
assert isinstance(text_content, str)
|
||||||
assert text_content
|
assert text_content
|
||||||
assert response.usage_metadata
|
assert response.usage_metadata
|
||||||
@ -60,7 +62,7 @@ def _check_response(response: Optional[BaseMessage], output_version) -> None:
|
|||||||
assert response.usage_metadata["output_tokens"] > 0
|
assert response.usage_metadata["output_tokens"] > 0
|
||||||
assert response.usage_metadata["total_tokens"] > 0
|
assert response.usage_metadata["total_tokens"] > 0
|
||||||
assert response.response_metadata["model_name"]
|
assert response.response_metadata["model_name"]
|
||||||
assert response.response_metadata["service_tier"]
|
assert response.response_metadata["service_tier"] # type: ignore[typeddict-item]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.default_cassette("test_web_search.yaml.gz")
|
@pytest.mark.default_cassette("test_web_search.yaml.gz")
|
||||||
@ -70,7 +72,7 @@ def test_web_search(output_version: Literal["responses/v1", "v1"]) -> None:
|
|||||||
if output_version == "v1":
|
if output_version == "v1":
|
||||||
llm = ChatOpenAIV1(model=MODEL_NAME)
|
llm = ChatOpenAIV1(model=MODEL_NAME)
|
||||||
else:
|
else:
|
||||||
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
|
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version) # type: ignore[assignment]
|
||||||
first_response = llm.invoke(
|
first_response = llm.invoke(
|
||||||
"What was a positive news story from today?",
|
"What was a positive news story from today?",
|
||||||
tools=[{"type": "web_search_preview"}],
|
tools=[{"type": "web_search_preview"}],
|
||||||
@ -87,7 +89,7 @@ def test_web_search(output_version: Literal["responses/v1", "v1"]) -> None:
|
|||||||
assert isinstance(chunk, AIMessageChunkV1)
|
assert isinstance(chunk, AIMessageChunkV1)
|
||||||
full = chunk if full is None else full + chunk
|
full = chunk if full is None else full + chunk
|
||||||
else:
|
else:
|
||||||
full: Optional[BaseMessageChunk] = None
|
full: Optional[BaseMessageChunk] = None # type: ignore[no-redef]
|
||||||
for chunk in llm.stream(
|
for chunk in llm.stream(
|
||||||
"What was a positive news story from today?",
|
"What was a positive news story from today?",
|
||||||
tools=[{"type": "web_search_preview"}],
|
tools=[{"type": "web_search_preview"}],
|
||||||
@ -100,7 +102,7 @@ def test_web_search(output_version: Literal["responses/v1", "v1"]) -> None:
|
|||||||
response = llm.invoke(
|
response = llm.invoke(
|
||||||
"what about a negative one",
|
"what about a negative one",
|
||||||
tools=[{"type": "web_search_preview"}],
|
tools=[{"type": "web_search_preview"}],
|
||||||
previous_response_id=first_response.response_metadata["id"],
|
previous_response_id=first_response.response_metadata["id"], # type: ignore[typeddict-item]
|
||||||
)
|
)
|
||||||
_check_response(response, output_version)
|
_check_response(response, output_version)
|
||||||
|
|
||||||
@ -122,6 +124,7 @@ def test_web_search(output_version: Literal["responses/v1", "v1"]) -> None:
|
|||||||
_check_response(response, output_version)
|
_check_response(response, output_version)
|
||||||
|
|
||||||
for msg in [first_response, full, response]:
|
for msg in [first_response, full, response]:
|
||||||
|
assert msg is not None
|
||||||
block_types = [block["type"] for block in msg.content] # type: ignore[index]
|
block_types = [block["type"] for block in msg.content] # type: ignore[index]
|
||||||
if output_version == "responses/v1":
|
if output_version == "responses/v1":
|
||||||
assert block_types == ["web_search_call", "text"]
|
assert block_types == ["web_search_call", "text"]
|
||||||
@ -246,6 +249,7 @@ def test_parsed_pydantic_schema(output_version: Literal["v0", "responses/v1"]) -
|
|||||||
def test_parsed_pydantic_schema_v1() -> None:
|
def test_parsed_pydantic_schema_v1() -> None:
|
||||||
llm = ChatOpenAIV1(model=MODEL_NAME, use_responses_api=True)
|
llm = ChatOpenAIV1(model=MODEL_NAME, use_responses_api=True)
|
||||||
response = llm.invoke("how are ya", response_format=Foo)
|
response = llm.invoke("how are ya", response_format=Foo)
|
||||||
|
assert response.text
|
||||||
parsed = Foo(**json.loads(response.text))
|
parsed = Foo(**json.loads(response.text))
|
||||||
assert parsed == response.parsed
|
assert parsed == response.parsed
|
||||||
assert parsed.response
|
assert parsed.response
|
||||||
@ -258,6 +262,7 @@ def test_parsed_pydantic_schema_v1() -> None:
|
|||||||
full = chunk if full is None else full + chunk
|
full = chunk if full is None else full + chunk
|
||||||
chunks.append(chunk)
|
chunks.append(chunk)
|
||||||
assert isinstance(full, AIMessageChunkV1)
|
assert isinstance(full, AIMessageChunkV1)
|
||||||
|
assert full.text
|
||||||
parsed = Foo(**json.loads(full.text))
|
parsed = Foo(**json.loads(full.text))
|
||||||
assert parsed == full.parsed
|
assert parsed == full.parsed
|
||||||
assert parsed.response
|
assert parsed.response
|
||||||
@ -649,7 +654,7 @@ def test_code_interpreter_v1() -> None:
|
|||||||
|
|
||||||
# Test streaming
|
# Test streaming
|
||||||
# Use same container
|
# Use same container
|
||||||
container_id = tool_outputs[0]["container_id"]
|
container_id = tool_outputs[0]["container_id"] # type: ignore[typeddict-item]
|
||||||
llm_with_tools = llm.bind_tools(
|
llm_with_tools = llm.bind_tools(
|
||||||
[{"type": "code_interpreter", "container": container_id}]
|
[{"type": "code_interpreter", "container": container_id}]
|
||||||
)
|
)
|
||||||
@ -815,7 +820,9 @@ def test_mcp_builtin_zdr_v1() -> None:
|
|||||||
@pytest.mark.default_cassette("test_image_generation_streaming.yaml.gz")
|
@pytest.mark.default_cassette("test_image_generation_streaming.yaml.gz")
|
||||||
@pytest.mark.vcr
|
@pytest.mark.vcr
|
||||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
||||||
def test_image_generation_streaming(output_version: str) -> None:
|
def test_image_generation_streaming(
|
||||||
|
output_version: Literal["v0", "responses/v1"],
|
||||||
|
) -> None:
|
||||||
"""Test image generation streaming."""
|
"""Test image generation streaming."""
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="gpt-4.1", use_responses_api=True, output_version=output_version
|
model="gpt-4.1", use_responses_api=True, output_version=output_version
|
||||||
@ -934,7 +941,9 @@ def test_image_generation_streaming_v1() -> None:
|
|||||||
@pytest.mark.default_cassette("test_image_generation_multi_turn.yaml.gz")
|
@pytest.mark.default_cassette("test_image_generation_multi_turn.yaml.gz")
|
||||||
@pytest.mark.vcr
|
@pytest.mark.vcr
|
||||||
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
|
||||||
def test_image_generation_multi_turn(output_version: str) -> None:
|
def test_image_generation_multi_turn(
|
||||||
|
output_version: Literal["v0", "responses/v1"],
|
||||||
|
) -> None:
|
||||||
"""Test multi-turn editing of image generation by passing in history."""
|
"""Test multi-turn editing of image generation by passing in history."""
|
||||||
# Test multi-turn
|
# Test multi-turn
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
|
@ -20,7 +20,9 @@ from langchain_core.messages import (
|
|||||||
ToolCall,
|
ToolCall,
|
||||||
ToolMessage,
|
ToolMessage,
|
||||||
)
|
)
|
||||||
|
from langchain_core.messages import content_blocks as types
|
||||||
from langchain_core.messages.ai import UsageMetadata
|
from langchain_core.messages.ai import UsageMetadata
|
||||||
|
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||||
from langchain_core.runnables import RunnableLambda
|
from langchain_core.runnables import RunnableLambda
|
||||||
from langchain_core.tracers.base import BaseTracer
|
from langchain_core.tracers.base import BaseTracer
|
||||||
@ -54,7 +56,6 @@ from langchain_openai.chat_models._compat import (
|
|||||||
_convert_from_v1_to_chat_completions,
|
_convert_from_v1_to_chat_completions,
|
||||||
_convert_from_v1_to_responses,
|
_convert_from_v1_to_responses,
|
||||||
_convert_to_v03_ai_message,
|
_convert_to_v03_ai_message,
|
||||||
_convert_to_v1_from_chat_completions,
|
|
||||||
_convert_to_v1_from_responses,
|
_convert_to_v1_from_responses,
|
||||||
)
|
)
|
||||||
from langchain_openai.chat_models.base import (
|
from langchain_openai.chat_models.base import (
|
||||||
@ -2301,7 +2302,7 @@ def test_mcp_tracing() -> None:
|
|||||||
assert payload["tools"][0]["headers"]["Authorization"] == "Bearer PLACEHOLDER"
|
assert payload["tools"][0]["headers"]["Authorization"] == "Bearer PLACEHOLDER"
|
||||||
|
|
||||||
|
|
||||||
def test_compat_responses_v1() -> None:
|
def test_compat_responses_v03() -> None:
|
||||||
# Check compatibility with v0.3 message format
|
# Check compatibility with v0.3 message format
|
||||||
message_v03 = AIMessage(
|
message_v03 = AIMessage(
|
||||||
content=[
|
content=[
|
||||||
@ -2366,198 +2367,99 @@ def test_compat_responses_v1() -> None:
|
|||||||
"message_v1, expected",
|
"message_v1, expected",
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
AIMessage(
|
AIMessageV1(
|
||||||
[
|
[
|
||||||
{"type": "reasoning", "reasoning": "Reasoning text"},
|
{"type": "reasoning", "reasoning": "Reasoning text"},
|
||||||
{"type": "tool_call", "id": "call_123"},
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"text": "Hello, world!",
|
"text": "Hello, world!",
|
||||||
"annotations": [
|
"annotations": [
|
||||||
{"type": "url_citation", "url": "https://example.com"}
|
{"type": "citation", "url": "https://example.com"}
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
id="chatcmpl-123",
|
id="chatcmpl-123",
|
||||||
response_metadata={"foo": "bar"},
|
response_metadata={"model_provider": "openai", "model_name": "gpt-4.1"},
|
||||||
),
|
),
|
||||||
AIMessage(
|
AIMessageV1(
|
||||||
[{"type": "text", "text": "Hello, world!"}],
|
[{"type": "text", "text": "Hello, world!"}],
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
id="chatcmpl-123",
|
id="chatcmpl-123",
|
||||||
response_metadata={"foo": "bar"},
|
response_metadata={"model_provider": "openai", "model_name": "gpt-4.1"},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_convert_from_v1_to_chat_completions(
|
def test_convert_from_v1_to_chat_completions(
|
||||||
message_v1: AIMessage, expected: AIMessage
|
message_v1: AIMessageV1, expected: AIMessageV1
|
||||||
) -> None:
|
) -> None:
|
||||||
result = _convert_from_v1_to_chat_completions(message_v1)
|
result = _convert_from_v1_to_chat_completions(message_v1)
|
||||||
assert result == expected
|
assert result == expected
|
||||||
|
assert result.tool_calls == message_v1.tool_calls # tool calls remain cached
|
||||||
|
|
||||||
# Check no mutation
|
# Check no mutation
|
||||||
assert message_v1 != result
|
assert message_v1 != result
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"message_chat_completions, expected",
|
|
||||||
[
|
|
||||||
(
|
|
||||||
AIMessage(
|
|
||||||
"Hello, world!", id="chatcmpl-123", response_metadata={"foo": "bar"}
|
|
||||||
),
|
|
||||||
AIMessage(
|
|
||||||
[{"type": "text", "text": "Hello, world!"}],
|
|
||||||
id="chatcmpl-123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
AIMessage(
|
|
||||||
[{"type": "text", "text": "Hello, world!"}],
|
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
id="chatcmpl-123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
|
||||||
AIMessage(
|
|
||||||
[
|
|
||||||
{"type": "text", "text": "Hello, world!"},
|
|
||||||
{"type": "tool_call", "id": "call_123"},
|
|
||||||
],
|
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
id="chatcmpl-123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
AIMessage(
|
|
||||||
"",
|
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
id="chatcmpl-123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
additional_kwargs={"tool_calls": [{"foo": "bar"}]},
|
|
||||||
),
|
|
||||||
AIMessage(
|
|
||||||
[{"type": "tool_call", "id": "call_123"}],
|
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
id="chatcmpl-123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_convert_to_v1_from_chat_completions(
|
|
||||||
message_chat_completions: AIMessage, expected: AIMessage
|
|
||||||
) -> None:
|
|
||||||
result = _convert_to_v1_from_chat_completions(message_chat_completions)
|
|
||||||
assert result == expected
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"message_v1, expected",
|
"message_v1, expected",
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
AIMessage(
|
AIMessageV1(
|
||||||
[
|
[
|
||||||
{"type": "reasoning", "id": "abc123"},
|
{"type": "reasoning", "id": "abc123"},
|
||||||
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
||||||
{"type": "reasoning", "id": "abc234", "reasoning": "bar"},
|
{"type": "reasoning", "id": "abc234", "reasoning": "bar"},
|
||||||
{"type": "tool_call", "id": "call_123"},
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"id": "call_123",
|
||||||
|
"name": "get_weather",
|
||||||
|
"args": {"location": "San Francisco"},
|
||||||
|
},
|
||||||
|
cast(
|
||||||
|
ToolCall,
|
||||||
{
|
{
|
||||||
"type": "tool_call",
|
"type": "tool_call",
|
||||||
"id": "call_234",
|
"id": "call_234",
|
||||||
"name": "get_weather_2",
|
"name": "get_weather_2",
|
||||||
"arguments": '{"location": "New York"}',
|
"args": {"location": "New York"},
|
||||||
"item_id": "fc_123",
|
"item_id": "fc_123",
|
||||||
},
|
},
|
||||||
|
),
|
||||||
{"type": "text", "text": "Hello "},
|
{"type": "text", "text": "Hello "},
|
||||||
{
|
{
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"text": "world",
|
"text": "world",
|
||||||
"annotations": [
|
"annotations": [
|
||||||
{"type": "url_citation", "url": "https://example.com"},
|
{"type": "citation", "url": "https://example.com"},
|
||||||
|
cast(
|
||||||
|
types.Citation,
|
||||||
{
|
{
|
||||||
"type": "document_citation",
|
"type": "citation",
|
||||||
"title": "my doc",
|
"title": "my doc",
|
||||||
"index": 1,
|
"file_index": 1,
|
||||||
"file_id": "file_123",
|
"file_id": "file_123",
|
||||||
},
|
},
|
||||||
|
),
|
||||||
{
|
{
|
||||||
"type": "non_standard_annotation",
|
"type": "non_standard_annotation",
|
||||||
"value": {"bar": "baz"},
|
"value": {"bar": "baz"},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{"type": "image", "base64": "...", "id": "img_123"},
|
{"type": "image", "base64": "...", "id": "ig_123"},
|
||||||
{
|
{
|
||||||
"type": "non_standard",
|
"type": "non_standard",
|
||||||
"value": {"type": "something_else", "foo": "bar"},
|
"value": {"type": "something_else", "foo": "bar"},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
# Make values different to check we pull from content when
|
|
||||||
# available
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_234",
|
|
||||||
"name": "get_weather_3",
|
|
||||||
"args": {"location": "Boston"},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
id="resp123",
|
id="resp123",
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
),
|
||||||
AIMessage(
|
|
||||||
[
|
[
|
||||||
{"type": "reasoning", "id": "abc123", "summary": []},
|
{"type": "reasoning", "id": "abc123", "summary": []},
|
||||||
{
|
{
|
||||||
@ -2596,35 +2498,16 @@ def test_convert_to_v1_from_chat_completions(
|
|||||||
{"bar": "baz"},
|
{"bar": "baz"},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{"type": "image_generation_call", "id": "img_123", "result": "..."},
|
{"type": "image_generation_call", "id": "ig_123", "result": "..."},
|
||||||
{"type": "something_else", "foo": "bar"},
|
{"type": "something_else", "foo": "bar"},
|
||||||
],
|
],
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
# Make values different to check we pull from content when
|
|
||||||
# available
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_234",
|
|
||||||
"name": "get_weather_3",
|
|
||||||
"args": {"location": "Boston"},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
id="resp123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_convert_from_v1_to_responses(
|
def test_convert_from_v1_to_responses(
|
||||||
message_v1: AIMessage, expected: AIMessage
|
message_v1: AIMessageV1, expected: AIMessageV1
|
||||||
) -> None:
|
) -> None:
|
||||||
result = _convert_from_v1_to_responses(message_v1)
|
result = _convert_from_v1_to_responses(message_v1.content, message_v1.tool_calls)
|
||||||
assert result == expected
|
assert result == expected
|
||||||
|
|
||||||
# Check no mutation
|
# Check no mutation
|
||||||
@ -2632,12 +2515,11 @@ def test_convert_from_v1_to_responses(
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"message_responses, expected",
|
"responses_content, tool_calls, expected_content",
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
AIMessage(
|
|
||||||
[
|
[
|
||||||
{"type": "reasoning", "id": "abc123"},
|
{"type": "reasoning", "id": "abc123", "summary": []},
|
||||||
{
|
{
|
||||||
"type": "reasoning",
|
"type": "reasoning",
|
||||||
"id": "abc234",
|
"id": "abc234",
|
||||||
@ -2674,10 +2556,10 @@ def test_convert_from_v1_to_responses(
|
|||||||
{"bar": "baz"},
|
{"bar": "baz"},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{"type": "image_generation_call", "id": "img_123", "result": "..."},
|
{"type": "image_generation_call", "id": "ig_123", "result": "..."},
|
||||||
{"type": "something_else", "foo": "bar"},
|
{"type": "something_else", "foo": "bar"},
|
||||||
],
|
],
|
||||||
tool_calls=[
|
[
|
||||||
{
|
{
|
||||||
"type": "tool_call",
|
"type": "tool_call",
|
||||||
"id": "call_123",
|
"id": "call_123",
|
||||||
@ -2685,18 +2567,12 @@ def test_convert_from_v1_to_responses(
|
|||||||
"args": {"location": "San Francisco"},
|
"args": {"location": "San Francisco"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
# Make values different to check we pull from content when
|
|
||||||
# available
|
|
||||||
"type": "tool_call",
|
"type": "tool_call",
|
||||||
"id": "call_234",
|
"id": "call_234",
|
||||||
"name": "get_weather_3",
|
"name": "get_weather_2",
|
||||||
"args": {"location": "Boston"},
|
"args": {"location": "New York"},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
id="resp123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
|
||||||
AIMessage(
|
|
||||||
[
|
[
|
||||||
{"type": "reasoning", "id": "abc123"},
|
{"type": "reasoning", "id": "abc123"},
|
||||||
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
{"type": "reasoning", "id": "abc234", "reasoning": "foo "},
|
||||||
@ -2705,66 +2581,52 @@ def test_convert_from_v1_to_responses(
|
|||||||
"type": "tool_call",
|
"type": "tool_call",
|
||||||
"id": "call_123",
|
"id": "call_123",
|
||||||
"name": "get_weather",
|
"name": "get_weather",
|
||||||
"arguments": '{"location": "San Francisco"}',
|
"args": {"location": "San Francisco"},
|
||||||
},
|
},
|
||||||
|
cast(
|
||||||
|
ToolCall,
|
||||||
{
|
{
|
||||||
"type": "tool_call",
|
"type": "tool_call",
|
||||||
"id": "call_234",
|
"id": "call_234",
|
||||||
"name": "get_weather_2",
|
"name": "get_weather_2",
|
||||||
"arguments": '{"location": "New York"}',
|
"args": {"location": "New York"},
|
||||||
"item_id": "fc_123",
|
"item_id": "fc_123",
|
||||||
},
|
},
|
||||||
|
),
|
||||||
{"type": "text", "text": "Hello "},
|
{"type": "text", "text": "Hello "},
|
||||||
{
|
{
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"text": "world",
|
"text": "world",
|
||||||
"annotations": [
|
"annotations": [
|
||||||
{"type": "url_citation", "url": "https://example.com"},
|
{"type": "citation", "url": "https://example.com"},
|
||||||
|
cast(
|
||||||
|
types.Citation,
|
||||||
{
|
{
|
||||||
"type": "document_citation",
|
"type": "citation",
|
||||||
"title": "my doc",
|
"title": "my doc",
|
||||||
"index": 1,
|
"file_index": 1,
|
||||||
"file_id": "file_123",
|
"file_id": "file_123",
|
||||||
},
|
},
|
||||||
{
|
),
|
||||||
"type": "non_standard_annotation",
|
{"type": "non_standard_annotation", "value": {"bar": "baz"}},
|
||||||
"value": {"bar": "baz"},
|
|
||||||
},
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{"type": "image", "base64": "...", "id": "img_123"},
|
{"type": "image", "base64": "...", "id": "ig_123"},
|
||||||
{
|
{
|
||||||
"type": "non_standard",
|
"type": "non_standard",
|
||||||
"value": {"type": "something_else", "foo": "bar"},
|
"value": {"type": "something_else", "foo": "bar"},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_123",
|
|
||||||
"name": "get_weather",
|
|
||||||
"args": {"location": "San Francisco"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
# Make values different to check we pull from content when
|
|
||||||
# available
|
|
||||||
"type": "tool_call",
|
|
||||||
"id": "call_234",
|
|
||||||
"name": "get_weather_3",
|
|
||||||
"args": {"location": "Boston"},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
id="resp123",
|
|
||||||
response_metadata={"foo": "bar"},
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_convert_to_v1_from_responses(
|
def test_convert_to_v1_from_responses(
|
||||||
message_responses: AIMessage, expected: AIMessage
|
responses_content: list[dict[str, Any]],
|
||||||
|
tool_calls: list[ToolCall],
|
||||||
|
expected_content: list[types.ContentBlock],
|
||||||
) -> None:
|
) -> None:
|
||||||
result = _convert_to_v1_from_responses(message_responses)
|
result = _convert_to_v1_from_responses(responses_content, tool_calls)
|
||||||
assert result == expected
|
assert result == expected_content
|
||||||
|
|
||||||
|
|
||||||
def test_get_last_messages() -> None:
|
def test_get_last_messages() -> None:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from langchain_openai.chat_models import __all__
|
from langchain_openai.chat_models import __all__
|
||||||
|
|
||||||
EXPECTED_ALL = ["ChatOpenAI", "AzureChatOpenAI"]
|
EXPECTED_ALL = ["ChatOpenAI", "ChatOpenAIV1", "AzureChatOpenAI"]
|
||||||
|
|
||||||
|
|
||||||
def test_all_imports() -> None:
|
def test_all_imports() -> None:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from typing import Any, Optional
|
from typing import Any, Literal, Optional
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -698,7 +698,9 @@ def _strip_none(obj: Any) -> Any:
|
|||||||
),
|
),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_responses_stream(output_version: str, expected_content: list[dict]) -> None:
|
def test_responses_stream(
|
||||||
|
output_version: Literal["v0", "responses/v1"], expected_content: list[dict]
|
||||||
|
) -> None:
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="o4-mini", use_responses_api=True, output_version=output_version
|
model="o4-mini", use_responses_api=True, output_version=output_version
|
||||||
)
|
)
|
||||||
|
@ -6,6 +6,7 @@ from uuid import UUID
|
|||||||
|
|
||||||
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
|
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
|
||||||
from langchain_core.messages import BaseMessage
|
from langchain_core.messages import BaseMessage
|
||||||
|
from langchain_core.messages.v1 import MessageV1
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
@ -196,7 +197,7 @@ class FakeCallbackHandlerWithChatStart(FakeCallbackHandler):
|
|||||||
def on_chat_model_start(
|
def on_chat_model_start(
|
||||||
self,
|
self,
|
||||||
serialized: dict[str, Any],
|
serialized: dict[str, Any],
|
||||||
messages: list[list[BaseMessage]],
|
messages: Union[list[list[BaseMessage]], list[MessageV1]],
|
||||||
*,
|
*,
|
||||||
run_id: UUID,
|
run_id: UUID,
|
||||||
parent_run_id: Optional[UUID] = None,
|
parent_run_id: Optional[UUID] = None,
|
||||||
|
@ -3,6 +3,7 @@ from langchain_openai import __all__
|
|||||||
EXPECTED_ALL = [
|
EXPECTED_ALL = [
|
||||||
"OpenAI",
|
"OpenAI",
|
||||||
"ChatOpenAI",
|
"ChatOpenAI",
|
||||||
|
"ChatOpenAIV1",
|
||||||
"OpenAIEmbeddings",
|
"OpenAIEmbeddings",
|
||||||
"AzureOpenAI",
|
"AzureOpenAI",
|
||||||
"AzureChatOpenAI",
|
"AzureChatOpenAI",
|
||||||
|
Loading…
Reference in New Issue
Block a user