From 9c160e2368f7f44bd41e967a7b3371208cf13a52 Mon Sep 17 00:00:00 2001 From: ccurme Date: Wed, 18 Feb 2026 18:19:28 -0500 Subject: [PATCH] revert: accept integer temperature values in _get_ls_params (#35319) --- .../language_models/chat_models.py | 6 ++-- .../langchain_core/language_models/llms.py | 6 ++-- .../language_models/chat_models/test_base.py | 34 ------------------ .../language_models/llms/test_base.py | 35 ------------------- 4 files changed, 4 insertions(+), 77 deletions(-) diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 32f198532e6..7e11c491d6f 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -812,11 +812,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): ls_params["ls_model_name"] = self.model_name # temperature - if "temperature" in kwargs and isinstance(kwargs["temperature"], (int, float)): + if "temperature" in kwargs and isinstance(kwargs["temperature"], float): ls_params["ls_temperature"] = kwargs["temperature"] - elif hasattr(self, "temperature") and isinstance( - self.temperature, (int, float) - ): + elif hasattr(self, "temperature") and isinstance(self.temperature, float): ls_params["ls_temperature"] = self.temperature # max_tokens diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index fa034675d89..5aa287ada8e 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -351,11 +351,9 @@ class BaseLLM(BaseLanguageModel[str], ABC): ls_params["ls_model_name"] = self.model_name # temperature - if "temperature" in kwargs and isinstance(kwargs["temperature"], (int, float)): + if "temperature" in kwargs and isinstance(kwargs["temperature"], float): ls_params["ls_temperature"] = kwargs["temperature"] - elif hasattr(self, "temperature") and isinstance( - self.temperature, (int, float) - ): + elif hasattr(self, "temperature") and isinstance(self.temperature, float): ls_params["ls_temperature"] = self.temperature # max_tokens diff --git a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py index 51fbecb8ffe..eedb5d393a8 100644 --- a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py +++ b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py @@ -1213,40 +1213,6 @@ def test_get_ls_params() -> None: assert ls_params["ls_stop"] == ["stop"] -def test_get_ls_params_int_temperature() -> None: - class IntTempModel(BaseChatModel): - model: str = "foo" - temperature: int = 0 - max_tokens: int = 1024 - - def _generate( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> ChatResult: - raise NotImplementedError - - @property - def _llm_type(self) -> str: - return "fake-chat-model" - - llm = IntTempModel() - - # Integer temperature from self attribute - ls_params = llm._get_ls_params() - assert ls_params["ls_temperature"] == 0 - - # Integer temperature from kwargs - ls_params = llm._get_ls_params(temperature=1) - assert ls_params["ls_temperature"] == 1 - - # Float temperature from kwargs still works - ls_params = llm._get_ls_params(temperature=0.5) - assert ls_params["ls_temperature"] == 0.5 - - def test_model_profiles() -> None: model = GenericFakeChatModel(messages=iter([])) assert model.profile is None diff --git a/libs/core/tests/unit_tests/language_models/llms/test_base.py b/libs/core/tests/unit_tests/language_models/llms/test_base.py index fb2ae8dc321..e5547a617a7 100644 --- a/libs/core/tests/unit_tests/language_models/llms/test_base.py +++ b/libs/core/tests/unit_tests/language_models/llms/test_base.py @@ -277,38 +277,3 @@ def test_get_ls_params() -> None: ls_params = llm._get_ls_params(stop=["stop"]) assert ls_params["ls_stop"] == ["stop"] - - -def test_get_ls_params_int_temperature() -> None: - class IntTempModel(BaseLLM): - model: str = "foo" - temperature: int = 0 - max_tokens: int = 1024 - - @override - def _generate( - self, - prompts: list[str], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> LLMResult: - raise NotImplementedError - - @property - def _llm_type(self) -> str: - return "fake-model" - - llm = IntTempModel() - - # Integer temperature from self attribute - ls_params = llm._get_ls_params() - assert ls_params["ls_temperature"] == 0 - - # Integer temperature from kwargs - ls_params = llm._get_ls_params(temperature=1) - assert ls_params["ls_temperature"] == 1 - - # Float temperature from kwargs still works - ls_params = llm._get_ls_params(temperature=0.5) - assert ls_params["ls_temperature"] == 0.5