mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-04 20:28:10 +00:00
openai[patch]: route to Responses API if relevant attributes are set (#31645)
Following https://github.com/langchain-ai/langchain/pull/30329.
This commit is contained in:
parent
3044bd37a9
commit
6409498f6c
@ -593,8 +593,8 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
store: Optional[bool] = None
|
store: Optional[bool] = None
|
||||||
"""If True, the Responses API may store response data for future use. Defaults to
|
"""If True, OpenAI may store response data for future use. Defaults to True
|
||||||
True.
|
for the Responses API and False for the Chat Completions API.
|
||||||
|
|
||||||
.. versionadded:: 0.3.24
|
.. versionadded:: 0.3.24
|
||||||
"""
|
"""
|
||||||
@ -1074,6 +1074,12 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
def _use_responses_api(self, payload: dict) -> bool:
|
def _use_responses_api(self, payload: dict) -> bool:
|
||||||
if isinstance(self.use_responses_api, bool):
|
if isinstance(self.use_responses_api, bool):
|
||||||
return self.use_responses_api
|
return self.use_responses_api
|
||||||
|
elif self.include is not None:
|
||||||
|
return True
|
||||||
|
elif self.reasoning is not None:
|
||||||
|
return True
|
||||||
|
elif self.truncation is not None:
|
||||||
|
return True
|
||||||
else:
|
else:
|
||||||
return _use_responses_api(payload)
|
return _use_responses_api(payload)
|
||||||
|
|
||||||
@ -3173,7 +3179,13 @@ def _use_responses_api(payload: dict) -> bool:
|
|||||||
uses_builtin_tools = "tools" in payload and any(
|
uses_builtin_tools = "tools" in payload and any(
|
||||||
_is_builtin_tool(tool) for tool in payload["tools"]
|
_is_builtin_tool(tool) for tool in payload["tools"]
|
||||||
)
|
)
|
||||||
responses_only_args = {"previous_response_id", "text", "truncation", "include"}
|
responses_only_args = {
|
||||||
|
"include",
|
||||||
|
"previous_response_id",
|
||||||
|
"reasoning",
|
||||||
|
"text",
|
||||||
|
"truncation",
|
||||||
|
}
|
||||||
return bool(uses_builtin_tools or responses_only_args.intersection(payload))
|
return bool(uses_builtin_tools or responses_only_args.intersection(payload))
|
||||||
|
|
||||||
|
|
||||||
|
@ -317,7 +317,9 @@ def test_stateful_api() -> None:
|
|||||||
|
|
||||||
|
|
||||||
def test_route_from_model_kwargs() -> None:
|
def test_route_from_model_kwargs() -> None:
|
||||||
llm = ChatOpenAI(model=MODEL_NAME, model_kwargs={"truncation": "auto"})
|
llm = ChatOpenAI(
|
||||||
|
model=MODEL_NAME, model_kwargs={"text": {"format": {"type": "text"}}}
|
||||||
|
)
|
||||||
_ = next(llm.stream("Hello"))
|
_ = next(llm.stream("Hello"))
|
||||||
|
|
||||||
|
|
||||||
@ -356,7 +358,7 @@ def test_file_search() -> None:
|
|||||||
def test_stream_reasoning_summary() -> None:
|
def test_stream_reasoning_summary() -> None:
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="o4-mini",
|
model="o4-mini",
|
||||||
use_responses_api=True,
|
# Routes to Responses API if `reasoning` is set.
|
||||||
reasoning={"effort": "medium", "summary": "auto"},
|
reasoning={"effort": "medium", "summary": "auto"},
|
||||||
)
|
)
|
||||||
message_1 = {"role": "user", "content": "What is 3^3?"}
|
message_1 = {"role": "user", "content": "What is 3^3?"}
|
||||||
|
Loading…
Reference in New Issue
Block a user