mirror of
https://github.com/hwchase17/langchain.git
synced 2026-03-18 11:07:36 +00:00
fix(openai): Allow temperature when reasoning is set to the string 'none' (#34298)
Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
@@ -846,9 +846,15 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
if model_lower.startswith("o1") and "temperature" not in values:
|
||||
values["temperature"] = 1
|
||||
|
||||
# For gpt-5 models, handle temperature restrictions
|
||||
# Note that gpt-5-chat models do support temperature
|
||||
if model_lower.startswith("gpt-5") and "chat" not in model_lower:
|
||||
# For gpt-5 models, handle temperature restrictions. Temperature is supported
|
||||
# by gpt-5-chat and gpt-5 models with reasoning_effort='none' or
|
||||
# reasoning={'effort': 'none'}.
|
||||
if (
|
||||
model_lower.startswith("gpt-5")
|
||||
and ("chat" not in model_lower)
|
||||
and values.get("reasoning_effort") != "none"
|
||||
and (values.get("reasoning") or {}).get("effort") != "none"
|
||||
):
|
||||
temperature = values.get("temperature")
|
||||
if temperature is not None and temperature != 1:
|
||||
# For gpt-5 (non-chat), only temperature=1 is supported
|
||||
@@ -3745,8 +3751,14 @@ def _construct_responses_api_payload(
|
||||
payload["reasoning"] = {"effort": payload.pop("reasoning_effort")}
|
||||
|
||||
# Remove temperature parameter for models that don't support it in responses API
|
||||
# gpt-5-chat supports temperature, and gpt-5 models with reasoning.effort='none'
|
||||
# also support temperature
|
||||
model = payload.get("model") or ""
|
||||
if model.startswith("gpt-5") and "chat" not in model: # gpt-5-chat supports
|
||||
if (
|
||||
model.startswith("gpt-5")
|
||||
and ("chat" not in model) # gpt-5-chat supports
|
||||
and (payload.get("reasoning") or {}).get("effort") != "none"
|
||||
):
|
||||
payload.pop("temperature", None)
|
||||
|
||||
payload["input"] = _construct_responses_api_input(messages)
|
||||
|
||||
@@ -3056,3 +3056,63 @@ def test_gpt_5_temperature_case_insensitive(
|
||||
messages = [HumanMessage(content="Hello")]
|
||||
payload = llm._get_request_payload(messages)
|
||||
assert payload["temperature"] == 0.7
|
||||
|
||||
|
||||
@pytest.mark.parametrize("use_responses_api", [False, True])
|
||||
def test_gpt_5_1_temperature_with_reasoning_effort_none(
|
||||
use_responses_api: bool,
|
||||
) -> None:
|
||||
"""Test that temperature is preserved when reasoning_effort is explicitly 'none'."""
|
||||
# Test with reasoning_effort='none' explicitly set
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-5.1",
|
||||
temperature=0.5,
|
||||
reasoning_effort="none",
|
||||
use_responses_api=use_responses_api,
|
||||
)
|
||||
messages = [HumanMessage(content="Hello")]
|
||||
payload = llm._get_request_payload(messages)
|
||||
assert payload["temperature"] == 0.5
|
||||
|
||||
# Test with reasoning={'effort': 'none'}
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-5.1",
|
||||
temperature=0.5,
|
||||
reasoning={"effort": "none"},
|
||||
use_responses_api=use_responses_api,
|
||||
)
|
||||
messages = [HumanMessage(content="Hello")]
|
||||
payload = llm._get_request_payload(messages)
|
||||
assert payload["temperature"] == 0.5
|
||||
|
||||
# Test that temperature is restricted by default (no reasoning_effort)
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-5.1",
|
||||
temperature=0.5,
|
||||
use_responses_api=use_responses_api,
|
||||
)
|
||||
messages = [HumanMessage(content="Hello")]
|
||||
payload = llm._get_request_payload(messages)
|
||||
assert "temperature" not in payload
|
||||
|
||||
# Test that temperature is still restricted when reasoning_effort is something else
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-5.1",
|
||||
temperature=0.5,
|
||||
reasoning_effort="low",
|
||||
use_responses_api=use_responses_api,
|
||||
)
|
||||
messages = [HumanMessage(content="Hello")]
|
||||
payload = llm._get_request_payload(messages)
|
||||
assert "temperature" not in payload
|
||||
|
||||
# Test with reasoning={'effort': 'low'}
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-5.1",
|
||||
temperature=0.5,
|
||||
reasoning={"effort": "low"},
|
||||
use_responses_api=use_responses_api,
|
||||
)
|
||||
messages = [HumanMessage(content="Hello")]
|
||||
payload = llm._get_request_payload(messages)
|
||||
assert "temperature" not in payload
|
||||
|
||||
Reference in New Issue
Block a user