From 46b86ef2a688ead150c381e63031d92570967d1b Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Thu, 27 Feb 2025 12:12:57 -0500 Subject: [PATCH] add tests --- .../langchain_openai/chat_models/base.py | 1 - .../tests/unit_tests/chat_models/test_base.py | 30 ++++++++++++++----- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index eb559c9521d..3bd204b76b0 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -535,7 +535,6 @@ class BaseChatOpenAI(BaseChatModel): """Currently only o-series models support reasoning_effort.""" model = values.get("model_name") or values.get("model") or "" if not re.match(r"^o\d", model) and values.get("reasoning_effort") is not None: - reasoning_effort = values.get("reasoning_effort") warnings.warn( f"Reasoning effort is not supported for model '{model}'. Defaulting " "to null." diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index 8f8c6fa0361..70a24ec4586 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -47,11 +47,22 @@ def test_openai_model_param() -> None: assert llm.max_tokens == 10 -def test_openai_o1_temperature() -> None: - llm = ChatOpenAI(model="o1-preview") - assert llm.temperature == 1 - llm = ChatOpenAI(model_name="o1-mini") # type: ignore[call-arg] - assert llm.temperature == 1 +def test_openai_o_series_temperature() -> None: + with pytest.warns(None) as record: # type: ignore[call-overload] + llm = ChatOpenAI(model="o1-preview") + assert llm.temperature is None + llm = ChatOpenAI(model_name="o1-mini") # type: ignore[call-arg] + assert llm.temperature is None + llm = ChatOpenAI(model="o3-mini") + assert llm.temperature is None + llm = ChatOpenAI(model="o3-mini", temperature=1) + assert llm.temperature == 1 + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.5) + assert len(record) == 0 + + with pytest.warns(match="invalid temperature"): + llm = ChatOpenAI(model="o3-mini", temperature=0.5) + assert llm.temperature is None def test_function_message_dict_to_function_message() -> None: @@ -901,10 +912,15 @@ def test__get_request_payload() -> None: assert payload == expected -def test_init_o1() -> None: +def test_init_reasoning_effort() -> None: with pytest.warns(None) as record: # type: ignore[call-overload] - ChatOpenAI(model="o1", reasoning_effort="medium") + llm_o1 = ChatOpenAI(model="o1", reasoning_effort="medium") assert len(record) == 0 + assert llm_o1.reasoning_effort == "medium" + + with pytest.warns(match="Reasoning effort is not supported"): + llm_gpt = ChatOpenAI(model="gpt-4o-mini", reasoning_effort="medium") + assert llm_gpt.reasoning_effort is None def test_structured_output_old_model() -> None: