community: Use Blockbuster to detect blocking calls in asyncio during tests (#29609)

Same as https://github.com/langchain-ai/langchain/pull/29043 for
langchain-community.

**Dependencies:**
- blockbuster (test)

**Twitter handle:** cbornet_

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Christophe Bornet 2025-02-08 02:10:39 +01:00 committed by GitHub
parent 3a57a28daa
commit 30f6c9f5c8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 104 additions and 26 deletions

View File

@ -1,10 +1,12 @@
from __future__ import annotations
import functools
from importlib.metadata import version
from packaging.version import parse
@functools.cache
def is_openai_v1() -> bool:
"""Return whether OpenAI API is v1 or more."""
_version = parse(version("openai"))

View File

@ -4,7 +4,7 @@ build-backend = "pdm.backend"
[project]
authors = []
license = {text = "MIT"}
license = { text = "MIT" }
requires-python = "<4.0,>=3.9"
dependencies = [
"langchain-core<1.0.0,>=0.3.34",
@ -48,6 +48,7 @@ test = [
"syrupy<5.0.0,>=4.0.2",
"requests-mock<2.0.0,>=1.11.0",
"pytest-xdist<4.0.0,>=3.6.1",
"blockbuster<1.6,>=1.5.13",
"cffi<1.17.1; python_version < \"3.10\"",
"cffi; python_version >= \"3.10\"",
"langchain-core @ file:///${PROJECT_ROOT}/../core",
@ -55,13 +56,8 @@ test = [
"langchain-tests @ file:///${PROJECT_ROOT}/../standard-tests",
"toml>=0.10.2",
]
codespell = [
"codespell<3.0.0,>=2.2.0",
]
test_integration = [
"pytest-vcr<2.0.0,>=1.0.2",
"vcrpy<7,>=6",
]
codespell = ["codespell<3.0.0,>=2.2.0"]
test_integration = ["pytest-vcr<2.0.0,>=1.0.2", "vcrpy<7,>=6"]
lint = [
"ruff<0.6,>=0.5",
"cffi<1.17.1; python_version < \"3.10\"",

View File

@ -1,12 +1,28 @@
"""Configuration for unit tests."""
from collections.abc import Iterator
from importlib import util
from typing import Dict, Sequence
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain_community") as bb:
(
bb.functions["os.stat"]
.can_block_in("langchain_community/utils/openai.py", "is_openai_v1")
.can_block_in("httpx/_client.py", "_init_transport")
)
bb.functions["os.path.abspath"].can_block_in(
"sqlalchemy/dialects/sqlite/pysqlite.py", "create_connect_args"
)
yield
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(

View File

@ -62,20 +62,31 @@ def set_cache_and_teardown(request: FixtureRequest) -> Generator[None, None, Non
raise ValueError("Cache not set. This should never happen.")
async def test_llm_caching() -> None:
def test_llm_caching() -> None:
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=prompt,
llm_string=create_llm_string(llm),
return_val=[Generation(text=cached_response)],
)
assert llm.invoke(prompt) == cached_response
# async test
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
async def test_llm_caching_async() -> None:
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=prompt,
llm_string=create_llm_string(llm),
@ -110,14 +121,13 @@ def test_old_sqlite_llm_caching() -> None:
assert llm.invoke(prompt) == cached_response
async def test_chat_model_caching() -> None:
def test_chat_model_caching() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(),
@ -126,8 +136,20 @@ async def test_chat_model_caching() -> None:
result = llm.invoke(prompt)
assert isinstance(result, AIMessage)
assert result.content == cached_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
# async test
async def test_chat_model_caching_async() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(),
@ -143,14 +165,13 @@ async def test_chat_model_caching() -> None:
)
async def test_chat_model_caching_params() -> None:
def test_chat_model_caching_params() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(functions=[]),
@ -162,8 +183,20 @@ async def test_chat_model_caching_params() -> None:
assert result.content == cached_response
assert isinstance(result_no_params, AIMessage)
assert result_no_params.content == response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
# async test
async def test_chat_model_caching_params_async() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(functions=[]),
@ -182,13 +215,12 @@ async def test_chat_model_caching_params() -> None:
)
async def test_llm_cache_clear() -> None:
def test_llm_cache_clear() -> None:
prompt = "How are you?"
expected_response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[expected_response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=prompt,
llm_string=create_llm_string(llm),
@ -197,8 +229,19 @@ async def test_llm_cache_clear() -> None:
llm_cache.clear()
response = llm.invoke(prompt)
assert response == expected_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
# async test
async def test_llm_cache_clear_async() -> None:
prompt = "How are you?"
expected_response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[expected_response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=prompt,
llm_string=create_llm_string(llm),

View File

@ -76,6 +76,7 @@ def test_test_group_dependencies(uv_conf: Mapping[str, Any]) -> None:
"pytest-socket",
"pytest-watcher",
"pytest-xdist",
"blockbuster",
"responses",
"syrupy",
"toml",

View File

@ -1589,10 +1589,10 @@ def test_faiss_local_save_load() -> None:
@pytest.mark.requires("faiss")
async def test_faiss_async_local_save_load() -> None:
def test_faiss_async_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
docsearch.save_local(temp_folder)

View File

@ -72,17 +72,17 @@ async def test_inmemory_mmr() -> None:
assert output[1] == _AnyDocument(page_content="foy")
async def test_inmemory_dump_load(tmp_path: Path) -> None:
def test_inmemory_dump_load(tmp_path: Path) -> None:
"""Test end to end construction and search."""
embedding = ConsistentFakeEmbeddings()
store = await InMemoryVectorStore.afrom_texts(["foo", "bar", "baz"], embedding)
output = await store.asimilarity_search("foo", k=1)
store = InMemoryVectorStore.from_texts(["foo", "bar", "baz"], embedding)
output = store.similarity_search("foo", k=1)
test_file = str(tmp_path / "test.json")
store.dump(test_file)
loaded_store = InMemoryVectorStore.load(test_file, embedding)
loaded_output = await loaded_store.asimilarity_search("foo", k=1)
loaded_output = loaded_store.similarity_search("foo", k=1)
assert output == loaded_output

View File

@ -284,6 +284,18 @@ css = [
{ name = "tinycss2" },
]
[[package]]
name = "blockbuster"
version = "1.5.14"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "forbiddenfruit" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e0/77/a46b97dc6807c88c864a134793d7c7b915dea45c7e44da6c3adebac90501/blockbuster-1.5.14.tar.gz", hash = "sha256:d77ed3b931b058b4e746f65e32ea21e8ed21a4ef0ca88b7bb046bdb057e1adb0", size = 50191 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/81/c2/1515ea61aa08f3b44882aa59a0c03be667a6fec2a4026aad76944b40b030/blockbuster-1.5.14-py3-none-any.whl", hash = "sha256:5b5e46ac4b5f5d2a7a599944d83bee0c9eb46509868acb6d8fbc7c8058769aaf", size = 12372 },
]
[[package]]
name = "certifi"
version = "2025.1.31"
@ -819,6 +831,12 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/90/2b/0817a2b257fe88725c25589d89aec060581aabf668707a8d03b2e9e0cb2a/fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667", size = 23924 },
]
[[package]]
name = "forbiddenfruit"
version = "0.1.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/e6/79/d4f20e91327c98096d605646bdc6a5ffedae820f38d378d3515c42ec5e60/forbiddenfruit-0.1.4.tar.gz", hash = "sha256:e3f7e66561a29ae129aac139a85d610dbf3dd896128187ed5454b6421f624253", size = 43756 }
[[package]]
name = "fqdn"
version = "1.5.1"
@ -1617,6 +1635,7 @@ lint = [
{ name = "ruff" },
]
test = [
{ name = "blockbuster" },
{ name = "cffi", version = "1.17.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
{ name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
{ name = "duckdb-engine" },
@ -1687,6 +1706,7 @@ lint = [
{ name = "ruff", specifier = ">=0.5,<0.6" },
]
test = [
{ name = "blockbuster", specifier = ">=1.5.13,<1.6" },
{ name = "cffi", marker = "python_full_version < '3.10'", specifier = "<1.17.1" },
{ name = "cffi", marker = "python_full_version >= '3.10'" },
{ name = "duckdb-engine", specifier = ">=0.13.6,<1.0.0" },