From a21e84faf7b42e01d51bbcedc77acdc1130f3e71 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Tue, 21 Nov 2023 20:12:35 -0800 Subject: [PATCH] BUGFIX: llm backwards compat imports (#13698) --- .../language_models/llms/__init__.py | 0 .../language_models/llms/test_base.py | 19 +++++++++++ libs/langchain/langchain/chat_models/base.py | 2 ++ libs/langchain/langchain/llms/base.py | 5 +++ .../tests/unit_tests/chat_models/test_base.py | 13 ++++++++ .../tests/unit_tests/llms/test_base.py | 33 +++++++++---------- 6 files changed, 54 insertions(+), 18 deletions(-) create mode 100644 libs/core/tests/unit_tests/language_models/llms/__init__.py create mode 100644 libs/core/tests/unit_tests/language_models/llms/test_base.py create mode 100644 libs/langchain/tests/unit_tests/chat_models/test_base.py diff --git a/libs/core/tests/unit_tests/language_models/llms/__init__.py b/libs/core/tests/unit_tests/language_models/llms/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/core/tests/unit_tests/language_models/llms/test_base.py b/libs/core/tests/unit_tests/language_models/llms/test_base.py new file mode 100644 index 00000000000..9c7261655ca --- /dev/null +++ b/libs/core/tests/unit_tests/language_models/llms/test_base.py @@ -0,0 +1,19 @@ +from tests.unit_tests.fake.llm import FakeListLLM + + +def test_batch() -> None: + llm = FakeListLLM(responses=["foo"] * 3) + output = llm.batch(["foo", "bar", "foo"]) + assert output == ["foo"] * 3 + + output = llm.batch(["foo", "bar", "foo"], config={"max_concurrency": 2}) + assert output == ["foo"] * 3 + + +async def test_abatch() -> None: + llm = FakeListLLM(responses=["foo"] * 3) + output = await llm.abatch(["foo", "bar", "foo"]) + assert output == ["foo"] * 3 + + output = await llm.abatch(["foo", "bar", "foo"], config={"max_concurrency": 2}) + assert output == ["foo"] * 3 diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index 1be59da54ce..1da92ce9a79 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -3,6 +3,7 @@ from langchain_core.language_models.chat_models import ( SimpleChatModel, _agenerate_from_stream, _generate_from_stream, + _get_verbosity, ) __all__ = [ @@ -10,4 +11,5 @@ __all__ = [ "SimpleChatModel", "_generate_from_stream", "_agenerate_from_stream", + "_get_verbosity", ] diff --git a/libs/langchain/langchain/llms/base.py b/libs/langchain/langchain/llms/base.py index f6739068649..2564db39c82 100644 --- a/libs/langchain/langchain/llms/base.py +++ b/libs/langchain/langchain/llms/base.py @@ -1,6 +1,9 @@ +# Backwards compatibility. +from langchain_core.language_models import BaseLanguageModel from langchain_core.language_models.llms import ( LLM, BaseLLM, + _get_verbosity, create_base_retry_decorator, get_prompts, update_cache, @@ -10,6 +13,8 @@ __all__ = [ "create_base_retry_decorator", "get_prompts", "update_cache", + "BaseLanguageModel", + "_get_verbosity", "BaseLLM", "LLM", ] diff --git a/libs/langchain/tests/unit_tests/chat_models/test_base.py b/libs/langchain/tests/unit_tests/chat_models/test_base.py new file mode 100644 index 00000000000..51576f20818 --- /dev/null +++ b/libs/langchain/tests/unit_tests/chat_models/test_base.py @@ -0,0 +1,13 @@ +from langchain.chat_models.base import __all__ + +EXPECTED_ALL = [ + "BaseChatModel", + "SimpleChatModel", + "_agenerate_from_stream", + "_generate_from_stream", + "_get_verbosity", +] + + +def test_all_imports() -> None: + assert set(__all__) == set(EXPECTED_ALL) diff --git a/libs/langchain/tests/unit_tests/llms/test_base.py b/libs/langchain/tests/unit_tests/llms/test_base.py index c43e73bd117..c69ed501482 100644 --- a/libs/langchain/tests/unit_tests/llms/test_base.py +++ b/libs/langchain/tests/unit_tests/llms/test_base.py @@ -10,8 +10,23 @@ from langchain_core.outputs import Generation, LLMResult from langchain.cache import InMemoryCache, SQLAlchemyCache from langchain.globals import get_llm_cache, set_llm_cache +from langchain.llms.base import __all__ from tests.unit_tests.llms.fake_llm import FakeLLM +EXPECTED_ALL = [ + "BaseLLM", + "LLM", + "_get_verbosity", + "create_base_retry_decorator", + "get_prompts", + "update_cache", + "BaseLanguageModel", +] + + +def test_all_imports() -> None: + assert set(__all__) == set(EXPECTED_ALL) + def test_caching() -> None: """Test caching behavior.""" @@ -74,21 +89,3 @@ def test_custom_caching() -> None: llm_output=None, ) assert output == expected_output - - -def test_batch() -> None: - llm = FakeLLM() - output = llm.batch(["foo", "bar", "foo"]) - assert output == ["foo"] * 3 - - output = llm.batch(["foo", "bar", "foo"], config={"max_concurrency": 2}) - assert output == ["foo"] * 3 - - -async def test_abatch() -> None: - llm = FakeLLM() - output = await llm.abatch(["foo", "bar", "foo"]) - assert output == ["foo"] * 3 - - output = await llm.abatch(["foo", "bar", "foo"], config={"max_concurrency": 2}) - assert output == ["foo"] * 3