community: Use Blockbuster to detect blocking calls in asyncio during tests (#29609)

Same as https://github.com/langchain-ai/langchain/pull/29043 for
langchain-community.

**Dependencies:**
- blockbuster (test)

**Twitter handle:** cbornet_

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Christophe Bornet
2025-02-08 02:10:39 +01:00
committed by GitHub
parent 3a57a28daa
commit 30f6c9f5c8
8 changed files with 104 additions and 26 deletions

View File

@@ -1,12 +1,28 @@
"""Configuration for unit tests."""
from collections.abc import Iterator
from importlib import util
from typing import Dict, Sequence
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain_community") as bb:
(
bb.functions["os.stat"]
.can_block_in("langchain_community/utils/openai.py", "is_openai_v1")
.can_block_in("httpx/_client.py", "_init_transport")
)
bb.functions["os.path.abspath"].can_block_in(
"sqlalchemy/dialects/sqlite/pysqlite.py", "create_connect_args"
)
yield
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(

View File

@@ -62,20 +62,31 @@ def set_cache_and_teardown(request: FixtureRequest) -> Generator[None, None, Non
raise ValueError("Cache not set. This should never happen.")
async def test_llm_caching() -> None:
def test_llm_caching() -> None:
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=prompt,
llm_string=create_llm_string(llm),
return_val=[Generation(text=cached_response)],
)
assert llm.invoke(prompt) == cached_response
# async test
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
async def test_llm_caching_async() -> None:
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=prompt,
llm_string=create_llm_string(llm),
@@ -110,14 +121,13 @@ def test_old_sqlite_llm_caching() -> None:
assert llm.invoke(prompt) == cached_response
async def test_chat_model_caching() -> None:
def test_chat_model_caching() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(),
@@ -126,8 +136,20 @@ async def test_chat_model_caching() -> None:
result = llm.invoke(prompt)
assert isinstance(result, AIMessage)
assert result.content == cached_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
# async test
async def test_chat_model_caching_async() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(),
@@ -143,14 +165,13 @@ async def test_chat_model_caching() -> None:
)
async def test_chat_model_caching_params() -> None:
def test_chat_model_caching_params() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(functions=[]),
@@ -162,8 +183,20 @@ async def test_chat_model_caching_params() -> None:
assert result.content == cached_response
assert isinstance(result_no_params, AIMessage)
assert result_no_params.content == response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
# async test
async def test_chat_model_caching_params_async() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(functions=[]),
@@ -182,13 +215,12 @@ async def test_chat_model_caching_params() -> None:
)
async def test_llm_cache_clear() -> None:
def test_llm_cache_clear() -> None:
prompt = "How are you?"
expected_response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[expected_response])
if llm_cache := get_llm_cache():
# sync test
llm_cache.update(
prompt=prompt,
llm_string=create_llm_string(llm),
@@ -197,8 +229,19 @@ async def test_llm_cache_clear() -> None:
llm_cache.clear()
response = llm.invoke(prompt)
assert response == expected_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
# async test
async def test_llm_cache_clear_async() -> None:
prompt = "How are you?"
expected_response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[expected_response])
if llm_cache := get_llm_cache():
await llm_cache.aupdate(
prompt=prompt,
llm_string=create_llm_string(llm),

View File

@@ -76,6 +76,7 @@ def test_test_group_dependencies(uv_conf: Mapping[str, Any]) -> None:
"pytest-socket",
"pytest-watcher",
"pytest-xdist",
"blockbuster",
"responses",
"syrupy",
"toml",

View File

@@ -1589,10 +1589,10 @@ def test_faiss_local_save_load() -> None:
@pytest.mark.requires("faiss")
async def test_faiss_async_local_save_load() -> None:
def test_faiss_async_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
docsearch.save_local(temp_folder)

View File

@@ -72,17 +72,17 @@ async def test_inmemory_mmr() -> None:
assert output[1] == _AnyDocument(page_content="foy")
async def test_inmemory_dump_load(tmp_path: Path) -> None:
def test_inmemory_dump_load(tmp_path: Path) -> None:
"""Test end to end construction and search."""
embedding = ConsistentFakeEmbeddings()
store = await InMemoryVectorStore.afrom_texts(["foo", "bar", "baz"], embedding)
output = await store.asimilarity_search("foo", k=1)
store = InMemoryVectorStore.from_texts(["foo", "bar", "baz"], embedding)
output = store.similarity_search("foo", k=1)
test_file = str(tmp_path / "test.json")
store.dump(test_file)
loaded_store = InMemoryVectorStore.load(test_file, embedding)
loaded_output = await loaded_store.asimilarity_search("foo", k=1)
loaded_output = loaded_store.similarity_search("foo", k=1)
assert output == loaded_output