Compare commits

...

2 Commits

Author SHA1 Message Date
Chester Curme
46b86ef2a6 add tests 2025-02-27 12:12:57 -05:00
Chester Curme
82aa5ac8a5 update validators 2025-02-27 12:10:37 -05:00
2 changed files with 44 additions and 10 deletions

View File

@@ -518,10 +518,28 @@ class BaseChatOpenAI(BaseChatModel):
@model_validator(mode="before")
@classmethod
def validate_temperature(cls, values: Dict[str, Any]) -> Any:
"""Currently o1 models only allow temperature=1."""
"""Currently o-series only allow temperature=1."""
model = values.get("model_name") or values.get("model") or ""
if model.startswith("o1") and "temperature" not in values:
values["temperature"] = 1
if re.match(r"^o\d", model) and values.get("temperature") not in (None, 1):
temperature = values.get("temperature")
warnings.warn(
f"Received invalid temperature value of {temperature} "
f"for model {model}. Defaulting to null."
)
values["temperature"] = None
return values
@model_validator(mode="before")
@classmethod
def validate_reasoning_effort(cls, values: Dict[str, Any]) -> Any:
"""Currently only o-series models support reasoning_effort."""
model = values.get("model_name") or values.get("model") or ""
if not re.match(r"^o\d", model) and values.get("reasoning_effort") is not None:
warnings.warn(
f"Reasoning effort is not supported for model '{model}'. Defaulting "
"to null."
)
values["reasoning_effort"] = None
return values
@model_validator(mode="after")

View File

@@ -47,11 +47,22 @@ def test_openai_model_param() -> None:
assert llm.max_tokens == 10
def test_openai_o1_temperature() -> None:
llm = ChatOpenAI(model="o1-preview")
assert llm.temperature == 1
llm = ChatOpenAI(model_name="o1-mini") # type: ignore[call-arg]
assert llm.temperature == 1
def test_openai_o_series_temperature() -> None:
with pytest.warns(None) as record: # type: ignore[call-overload]
llm = ChatOpenAI(model="o1-preview")
assert llm.temperature is None
llm = ChatOpenAI(model_name="o1-mini") # type: ignore[call-arg]
assert llm.temperature is None
llm = ChatOpenAI(model="o3-mini")
assert llm.temperature is None
llm = ChatOpenAI(model="o3-mini", temperature=1)
assert llm.temperature == 1
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.5)
assert len(record) == 0
with pytest.warns(match="invalid temperature"):
llm = ChatOpenAI(model="o3-mini", temperature=0.5)
assert llm.temperature is None
def test_function_message_dict_to_function_message() -> None:
@@ -901,10 +912,15 @@ def test__get_request_payload() -> None:
assert payload == expected
def test_init_o1() -> None:
def test_init_reasoning_effort() -> None:
with pytest.warns(None) as record: # type: ignore[call-overload]
ChatOpenAI(model="o1", reasoning_effort="medium")
llm_o1 = ChatOpenAI(model="o1", reasoning_effort="medium")
assert len(record) == 0
assert llm_o1.reasoning_effort == "medium"
with pytest.warns(match="Reasoning effort is not supported"):
llm_gpt = ChatOpenAI(model="gpt-4o-mini", reasoning_effort="medium")
assert llm_gpt.reasoning_effort is None
def test_structured_output_old_model() -> None: