standard-tests: Ruff autofixes (#31862)

Auto-fixes from ruff with rule ALL
This commit is contained in:
Christophe Bornet
2025-07-07 16:27:39 +02:00
committed by GitHub
parent 2df3fdf40d
commit 9368b92b2c
19 changed files with 120 additions and 178 deletions

View File

@@ -26,13 +26,13 @@ from .tools import ToolsIntegrationTests
from .vectorstores import VectorStoreIntegrationTests
__all__ = [
"ChatModelIntegrationTests",
"EmbeddingsIntegrationTests",
"ToolsIntegrationTests",
"AsyncCacheTestSuite",
"BaseStoreAsyncTests",
"BaseStoreSyncTests",
"AsyncCacheTestSuite",
"SyncCacheTestSuite",
"VectorStoreIntegrationTests",
"ChatModelIntegrationTests",
"EmbeddingsIntegrationTests",
"RetrieversIntegrationTests",
"SyncCacheTestSuite",
"ToolsIntegrationTests",
"VectorStoreIntegrationTests",
]

View File

@@ -1,5 +1,4 @@
"""
Standard tests for the BaseStore abstraction
"""Standard tests for the BaseStore abstraction.
We don't recommend implementing externally managed BaseStore abstractions at this time.
@@ -38,10 +37,9 @@ class BaseStoreSyncTests(BaseStandardTests, Generic[V]):
"""
@abstractmethod
@pytest.fixture()
@pytest.fixture
def three_values(self) -> tuple[V, V, V]:
"""Three example values that will be used in the tests."""
pass
def test_three_values(self, three_values: tuple[V, V, V]) -> None:
"""Test that the fixture provides three values."""
@@ -169,10 +167,9 @@ class BaseStoreAsyncTests(BaseStandardTests, Generic[V]):
"""
@abstractmethod
@pytest.fixture()
@pytest.fixture
def three_values(self) -> tuple[V, V, V]:
"""Three example values that will be used in the tests."""
pass
async def test_three_values(self, three_values: tuple[V, V, V]) -> None:
"""Test that the fixture provides three values."""

View File

@@ -1,5 +1,4 @@
"""
Standard tests for the BaseCache abstraction
"""Standard tests for the BaseCache abstraction.
We don't recommend implementing externally managed BaseCache abstractions at this time.

View File

@@ -41,9 +41,7 @@ from langchain_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION
def _get_joke_class(
schema_type: Literal["pydantic", "typeddict", "json_schema"],
) -> Any:
"""
:private:
"""
""":private:"""
class Joke(BaseModel):
"""Joke to tell user."""
@@ -61,18 +59,18 @@ def _get_joke_class(
punchline: Annotated[str, ..., "answer to resolve the joke"]
def validate_joke_dict(result: Any) -> bool:
return all(key in ["setup", "punchline"] for key in result.keys())
return all(key in ["setup", "punchline"] for key in result)
if schema_type == "pydantic":
return Joke, validate_joke
elif schema_type == "typeddict":
if schema_type == "typeddict":
return JokeDict, validate_joke_dict
elif schema_type == "json_schema":
if schema_type == "json_schema":
return Joke.model_json_schema(), validate_joke_dict
else:
raise ValueError("Invalid schema type")
msg = "Invalid schema type"
raise ValueError(msg)
class _TestCallbackHandler(BaseCallbackHandler):
@@ -879,8 +877,7 @@ class ChatModelIntegrationTests(ChatModelTests):
assert len(result.content) > 0
def test_double_messages_conversation(self, model: BaseChatModel) -> None:
"""
Test to verify that the model can handle double-message conversations.
"""Test to verify that the model can handle double-message conversations.
This should pass for all integrations. Tests the model's ability to process
a sequence of double-system, double-human, and double-ai messages as context
@@ -1083,7 +1080,8 @@ class ChatModelIntegrationTests(ChatModelTests):
assert usage_metadata.get("input_tokens", 0) >= total_detailed_tokens
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
"""
"""Test usage metadata in streaming mode.
Test to verify that the model returns correct usage metadata in streaming mode.
.. versionchanged:: 0.3.17
@@ -1193,7 +1191,7 @@ class ChatModelIntegrationTests(ChatModelTests):
"Only one chunk should set input_tokens,"
" the rest should be 0 or None"
)
full = chunk if full is None else cast(AIMessageChunk, full + chunk)
full = chunk if full is None else cast("AIMessageChunk", full + chunk)
assert isinstance(full, AIMessageChunk)
assert full.usage_metadata is not None
@@ -1261,7 +1259,7 @@ class ChatModelIntegrationTests(ChatModelTests):
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
""" # noqa: E501
"""
result = model.invoke("hi", stop=["you"])
assert isinstance(result, AIMessage)
@@ -1315,10 +1313,7 @@ class ChatModelIntegrationTests(ChatModelTests):
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
if not self.has_tool_choice:
tool_choice_value = None
else:
tool_choice_value = "any"
tool_choice_value = None if not self.has_tool_choice else "any"
# Emit warning if tool_choice_value property is overridden
if inspect.getattr_static(
self, "tool_choice_value"
@@ -1391,10 +1386,7 @@ class ChatModelIntegrationTests(ChatModelTests):
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
if not self.has_tool_choice:
tool_choice_value = None
else:
tool_choice_value = "any"
tool_choice_value = None if not self.has_tool_choice else "any"
model_with_tools = model.bind_tools(
[magic_function], tool_choice=tool_choice_value
)
@@ -1730,10 +1722,7 @@ class ChatModelIntegrationTests(ChatModelTests):
""" # noqa: E501
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
if not self.has_tool_choice:
tool_choice_value = None
else:
tool_choice_value = "any"
tool_choice_value = None if not self.has_tool_choice else "any"
model_with_tools = model.bind_tools(
[magic_function_no_args], tool_choice=tool_choice_value
)
@@ -1856,14 +1845,15 @@ class ChatModelIntegrationTests(ChatModelTests):
@pytest.mark.xfail(reason=("Not implemented."))
def test_structured_few_shot_examples(self, *args: Any) -> None:
super().test_structured_few_shot_examples(*args)
""" # noqa: E501
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
model_with_tools = model.bind_tools([my_adder_tool], tool_choice="any")
function_result = json.dumps({"result": 3})
tool_schema = my_adder_tool.args_schema
assert isinstance(tool_schema, type) and issubclass(tool_schema, BaseModel)
assert isinstance(tool_schema, type)
assert issubclass(tool_schema, BaseModel)
few_shot_messages = tool_example_to_messages(
"What is 1 + 2",
[tool_schema(a=1, b=2)],
@@ -1871,7 +1861,7 @@ class ChatModelIntegrationTests(ChatModelTests):
ai_response=function_result,
)
messages = few_shot_messages + [HumanMessage("What is 3 + 4")]
messages = [*few_shot_messages, HumanMessage("What is 3 + 4")]
result = model_with_tools.invoke(messages)
assert isinstance(result, AIMessage)
@@ -1905,7 +1895,7 @@ class ChatModelIntegrationTests(ChatModelTests):
most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
""" # noqa: E501
"""
if not self.has_structured_output:
pytest.skip("Test requires structured output.")
@@ -1983,7 +1973,7 @@ class ChatModelIntegrationTests(ChatModelTests):
most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
""" # noqa: E501
"""
if not self.has_structured_output:
pytest.skip("Test requires structured output.")
@@ -2163,7 +2153,7 @@ class ChatModelIntegrationTests(ChatModelTests):
assert isinstance(result, dict)
def test_json_mode(self, model: BaseChatModel) -> None:
"""Test structured output via `JSON mode. <https://python.langchain.com/docs/concepts/structured_outputs/#json-mode>`_
"""Test structured output via `JSON mode. <https://python.langchain.com/docs/concepts/structured_outputs/#json-mode>`_.
This test is optional and should be skipped if the model does not support
the JSON mode feature (see Configuration below).
@@ -2183,7 +2173,7 @@ class ChatModelIntegrationTests(ChatModelTests):
.. dropdown:: Troubleshooting
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
""" # noqa: E501
"""
if not self.supports_json_mode:
pytest.skip("Test requires json mode support.")
@@ -2893,20 +2883,20 @@ class ChatModelIntegrationTests(ChatModelTests):
def invoke_with_audio_input(self, *, stream: bool = False) -> AIMessage:
""":private:"""
raise NotImplementedError()
raise NotImplementedError
def invoke_with_audio_output(self, *, stream: bool = False) -> AIMessage:
""":private:"""
raise NotImplementedError()
raise NotImplementedError
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
""":private:"""
raise NotImplementedError()
raise NotImplementedError
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
""":private:"""
raise NotImplementedError()
raise NotImplementedError
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
""":private:"""
raise NotImplementedError()
raise NotImplementedError

View File

@@ -31,7 +31,7 @@ class DocumentIndexerTestSuite(ABC):
"""Get the index."""
def test_upsert_documents_has_no_ids(self, index: DocumentIndex) -> None:
"""Verify that there is not parameter called ids in upsert"""
"""Verify that there is not parameter called ids in upsert."""
signature = inspect.signature(index.upsert)
assert "ids" not in signature.parameters
@@ -75,7 +75,7 @@ class DocumentIndexerTestSuite(ABC):
]
response = index.upsert(documents)
ids = response["succeeded"]
other_id = list(set(ids) - {foo_uuid})[0]
other_id = next(iter(set(ids) - {foo_uuid}))
assert response["failed"] == []
assert foo_uuid in ids
# Ordering is not guaranteed, so we use a set.
@@ -221,7 +221,7 @@ class AsyncDocumentIndexTestSuite(ABC):
"""Get the index."""
async def test_upsert_documents_has_no_ids(self, index: DocumentIndex) -> None:
"""Verify that there is not parameter called ids in upsert"""
"""Verify that there is not parameter called ids in upsert."""
signature = inspect.signature(index.upsert)
assert "ids" not in signature.parameters
@@ -265,7 +265,7 @@ class AsyncDocumentIndexTestSuite(ABC):
]
response = await index.aupsert(documents)
ids = response["succeeded"]
other_id = list(set(ids) - {foo_uuid})[0]
other_id = next(iter(set(ids) - {foo_uuid}))
assert response["failed"] == []
assert foo_uuid in ids
# Ordering is not guaranteed, so we use a set.

View File

@@ -8,43 +8,32 @@ from langchain_tests.base import BaseStandardTests
class RetrieversIntegrationTests(BaseStandardTests):
"""
Base class for retrievers integration tests.
"""
"""Base class for retrievers integration tests."""
@property
@abstractmethod
def retriever_constructor(self) -> type[BaseRetriever]:
"""
A BaseRetriever subclass to be tested.
"""
"""A BaseRetriever subclass to be tested."""
...
@property
def retriever_constructor_params(self) -> dict:
"""
Returns a dictionary of parameters to pass to the retriever constructor.
"""
"""Returns a dictionary of parameters to pass to the retriever constructor."""
return {}
@property
@abstractmethod
def retriever_query_example(self) -> str:
"""
Returns a str representing the "query" of an example retriever call.
"""
"""Returns a str representing the "query" of an example retriever call."""
...
@pytest.fixture
def retriever(self) -> BaseRetriever:
"""
:private:
"""
""":private:"""
return self.retriever_constructor(**self.retriever_constructor_params)
def test_k_constructor_param(self) -> None:
"""
Test that the retriever constructor accepts a k parameter, representing
"""Test that the retriever constructor accepts a k parameter, representing
the number of documents to return.
.. dropdown:: Troubleshooting
@@ -77,8 +66,7 @@ class RetrieversIntegrationTests(BaseStandardTests):
assert all(isinstance(doc, Document) for doc in result_1)
def test_invoke_with_k_kwarg(self, retriever: BaseRetriever) -> None:
"""
Test that the invoke method accepts a k parameter, representing the number of
"""Test that the invoke method accepts a k parameter, representing the number of
documents to return.
.. dropdown:: Troubleshooting
@@ -104,8 +92,7 @@ class RetrieversIntegrationTests(BaseStandardTests):
assert all(isinstance(doc, Document) for doc in result_3)
def test_invoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If invoked with the example params, the retriever should return a list of
"""If invoked with the example params, the retriever should return a list of
Documents.
.. dropdown:: Troubleshooting
@@ -120,8 +107,7 @@ class RetrieversIntegrationTests(BaseStandardTests):
assert all(isinstance(doc, Document) for doc in result)
async def test_ainvoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If ainvoked with the example params, the retriever should return a list of
"""If ainvoked with the example params, the retriever should return a list of
Documents.
See :meth:`test_invoke_returns_documents` for more information on

View File

@@ -5,12 +5,11 @@ from langchain_tests.unit_tests.tools import ToolsTests
class ToolsIntegrationTests(ToolsTests):
"""
Base class for tools integration tests.
"""
"""Base class for tools integration tests."""
def test_invoke_matches_output_schema(self, tool: BaseTool) -> None:
"""
"""Test invoke matches output schema.
If invoked with a ToolCall, the tool should return a valid ToolMessage content.
If you have followed the `custom tool guide <https://python.langchain.com/docs/how_to/custom_tools/>`_,
@@ -41,7 +40,8 @@ class ToolsIntegrationTests(ToolsTests):
assert all(isinstance(c, (str, dict)) for c in tool_message.content)
async def test_async_invoke_matches_output_schema(self, tool: BaseTool) -> None:
"""
"""Test async invoke matches output schema.
If ainvoked with a ToolCall, the tool should return a valid ToolMessage content.
For debugging tips, see :meth:`test_invoke_matches_output_schema`.
@@ -66,9 +66,8 @@ class ToolsIntegrationTests(ToolsTests):
assert all(isinstance(c, (str, dict)) for c in tool_message.content)
def test_invoke_no_tool_call(self, tool: BaseTool) -> None:
"""
If invoked without a ToolCall, the tool can return anything
but it shouldn't throw an error
"""If invoked without a ToolCall, the tool can return anything
but it shouldn't throw an error.
If this test fails, your tool may not be handling the input you defined
in `tool_invoke_params_example` correctly, and it's throwing an error.
@@ -79,9 +78,8 @@ class ToolsIntegrationTests(ToolsTests):
tool.invoke(self.tool_invoke_params_example)
async def test_async_invoke_no_tool_call(self, tool: BaseTool) -> None:
"""
If ainvoked without a ToolCall, the tool can return anything
but it shouldn't throw an error
"""If ainvoked without a ToolCall, the tool can return anything
but it shouldn't throw an error.
For debugging tips, see :meth:`test_invoke_no_tool_call`.
"""

View File

@@ -105,16 +105,12 @@ class VectorStoreIntegrationTests(BaseStandardTests):
@property
def has_sync(self) -> bool:
"""
Configurable property to enable or disable sync tests.
"""
"""Configurable property to enable or disable sync tests."""
return True
@property
def has_async(self) -> bool:
"""
Configurable property to enable or disable async tests.
"""
"""Configurable property to enable or disable async tests."""
return True
@staticmethod
@@ -368,7 +364,7 @@ class VectorStoreIntegrationTests(BaseStandardTests):
@pytest.mark.xfail(reason=("get_by_ids not implemented."))
def test_get_by_ids_missing(self, vectorstore: VectorStore) -> None:
super().test_get_by_ids_missing(vectorstore)
""" # noqa: E501
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")