mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-26 00:23:25 +00:00
add tests
This commit is contained in:
parent
82aa5ac8a5
commit
46b86ef2a6
@ -535,7 +535,6 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
"""Currently only o-series models support reasoning_effort."""
|
"""Currently only o-series models support reasoning_effort."""
|
||||||
model = values.get("model_name") or values.get("model") or ""
|
model = values.get("model_name") or values.get("model") or ""
|
||||||
if not re.match(r"^o\d", model) and values.get("reasoning_effort") is not None:
|
if not re.match(r"^o\d", model) and values.get("reasoning_effort") is not None:
|
||||||
reasoning_effort = values.get("reasoning_effort")
|
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
f"Reasoning effort is not supported for model '{model}'. Defaulting "
|
f"Reasoning effort is not supported for model '{model}'. Defaulting "
|
||||||
"to null."
|
"to null."
|
||||||
|
@ -47,11 +47,22 @@ def test_openai_model_param() -> None:
|
|||||||
assert llm.max_tokens == 10
|
assert llm.max_tokens == 10
|
||||||
|
|
||||||
|
|
||||||
def test_openai_o1_temperature() -> None:
|
def test_openai_o_series_temperature() -> None:
|
||||||
|
with pytest.warns(None) as record: # type: ignore[call-overload]
|
||||||
llm = ChatOpenAI(model="o1-preview")
|
llm = ChatOpenAI(model="o1-preview")
|
||||||
assert llm.temperature == 1
|
assert llm.temperature is None
|
||||||
llm = ChatOpenAI(model_name="o1-mini") # type: ignore[call-arg]
|
llm = ChatOpenAI(model_name="o1-mini") # type: ignore[call-arg]
|
||||||
|
assert llm.temperature is None
|
||||||
|
llm = ChatOpenAI(model="o3-mini")
|
||||||
|
assert llm.temperature is None
|
||||||
|
llm = ChatOpenAI(model="o3-mini", temperature=1)
|
||||||
assert llm.temperature == 1
|
assert llm.temperature == 1
|
||||||
|
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.5)
|
||||||
|
assert len(record) == 0
|
||||||
|
|
||||||
|
with pytest.warns(match="invalid temperature"):
|
||||||
|
llm = ChatOpenAI(model="o3-mini", temperature=0.5)
|
||||||
|
assert llm.temperature is None
|
||||||
|
|
||||||
|
|
||||||
def test_function_message_dict_to_function_message() -> None:
|
def test_function_message_dict_to_function_message() -> None:
|
||||||
@ -901,10 +912,15 @@ def test__get_request_payload() -> None:
|
|||||||
assert payload == expected
|
assert payload == expected
|
||||||
|
|
||||||
|
|
||||||
def test_init_o1() -> None:
|
def test_init_reasoning_effort() -> None:
|
||||||
with pytest.warns(None) as record: # type: ignore[call-overload]
|
with pytest.warns(None) as record: # type: ignore[call-overload]
|
||||||
ChatOpenAI(model="o1", reasoning_effort="medium")
|
llm_o1 = ChatOpenAI(model="o1", reasoning_effort="medium")
|
||||||
assert len(record) == 0
|
assert len(record) == 0
|
||||||
|
assert llm_o1.reasoning_effort == "medium"
|
||||||
|
|
||||||
|
with pytest.warns(match="Reasoning effort is not supported"):
|
||||||
|
llm_gpt = ChatOpenAI(model="gpt-4o-mini", reasoning_effort="medium")
|
||||||
|
assert llm_gpt.reasoning_effort is None
|
||||||
|
|
||||||
|
|
||||||
def test_structured_output_old_model() -> None:
|
def test_structured_output_old_model() -> None:
|
||||||
|
Loading…
Reference in New Issue
Block a user