mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-11 16:01:33 +00:00
chore(standard-tests): enable ruff docstring-code-format (#32852)
This commit is contained in:
committed by
GitHub
parent
33c7f230e0
commit
05d14775f2
@@ -428,7 +428,8 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
on invoke and streaming responses. Defaults to ``True``.
|
||||
|
||||
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
|
||||
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
and output tokens.
|
||||
`See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
|
||||
Example:
|
||||
|
||||
@@ -525,7 +526,8 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
and stream.
|
||||
|
||||
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
|
||||
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
and output tokens.
|
||||
`See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
|
||||
It includes optional keys ``input_token_details`` and ``output_token_details``
|
||||
that can track usage details associated with special types of tokens, such as
|
||||
@@ -549,7 +551,8 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
def enable_vcr_tests(self) -> bool:
|
||||
return True
|
||||
|
||||
2. Configure VCR to exclude sensitive headers and other information from cassettes.
|
||||
2. Configure VCR to exclude sensitive headers and other information from
|
||||
cassettes.
|
||||
|
||||
.. important::
|
||||
VCR will by default record authentication headers and other sensitive
|
||||
@@ -569,7 +572,9 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
:caption: tests/conftest.py
|
||||
|
||||
import pytest
|
||||
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
|
||||
from langchain_tests.conftest import (
|
||||
_base_vcr_config as _base_vcr_config,
|
||||
)
|
||||
|
||||
_EXTRA_HEADERS = [
|
||||
# Specify additional headers to redact
|
||||
@@ -603,8 +608,13 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
:caption: tests/conftest.py
|
||||
|
||||
import pytest
|
||||
from langchain_tests.conftest import CustomPersister, CustomSerializer
|
||||
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
|
||||
from langchain_tests.conftest import (
|
||||
CustomPersister,
|
||||
CustomSerializer,
|
||||
)
|
||||
from langchain_tests.conftest import (
|
||||
_base_vcr_config as _base_vcr_config,
|
||||
)
|
||||
from vcr import VCR
|
||||
|
||||
_EXTRA_HEADERS = [
|
||||
@@ -648,10 +658,15 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_tests.conftest import CustomPersister, CustomSerializer
|
||||
from langchain_tests.conftest import (
|
||||
CustomPersister,
|
||||
CustomSerializer,
|
||||
)
|
||||
|
||||
cassette_path = "/path/to/tests/cassettes/TestClass_test.yaml.gz"
|
||||
requests, responses = CustomPersister().load_cassette(path, CustomSerializer())
|
||||
requests, responses = CustomPersister().load_cassette(
|
||||
path, CustomSerializer()
|
||||
)
|
||||
|
||||
3. Run tests to generate VCR cassettes.
|
||||
|
||||
@@ -697,9 +712,9 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
return ChatResult(
|
||||
generations=[ChatGeneration(
|
||||
message=AIMessage(content="Output text")
|
||||
)]
|
||||
generations=[
|
||||
ChatGeneration(message=AIMessage(content="Output text"))
|
||||
]
|
||||
)
|
||||
|
||||
"""
|
||||
@@ -730,9 +745,9 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
return ChatResult(
|
||||
generations=[ChatGeneration(
|
||||
message=AIMessage(content="Output text")
|
||||
)]
|
||||
generations=[
|
||||
ChatGeneration(message=AIMessage(content="Output text"))
|
||||
]
|
||||
)
|
||||
|
||||
"""
|
||||
@@ -763,9 +778,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
yield ChatGenerationChunk(
|
||||
message=AIMessageChunk(content="chunk text")
|
||||
)
|
||||
yield ChatGenerationChunk(message=AIMessageChunk(content="chunk text"))
|
||||
|
||||
"""
|
||||
num_chunks = 0
|
||||
@@ -800,9 +813,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
yield ChatGenerationChunk(
|
||||
message=AIMessageChunk(content="chunk text")
|
||||
)
|
||||
yield ChatGenerationChunk(message=AIMessageChunk(content="chunk text"))
|
||||
|
||||
"""
|
||||
num_chunks = 0
|
||||
@@ -920,10 +931,11 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
because this test is the "basic case" without double messages.
|
||||
|
||||
If that test passes those but not this one, you should verify that:
|
||||
1. Your model API can handle double messages, or the integration should merge messages before sending them to the API.
|
||||
1. Your model API can handle double messages, or the integration should
|
||||
merge messages before sending them to the API.
|
||||
2. The response is a valid :class:`~langchain_core.messages.AIMessage`
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
messages = [
|
||||
SystemMessage("hello"),
|
||||
SystemMessage("hello"),
|
||||
@@ -1002,25 +1014,27 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
return ChatResult(
|
||||
generations=[ChatGeneration(
|
||||
message=AIMessage(
|
||||
content="Output text",
|
||||
usage_metadata={
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
generations=[
|
||||
ChatGeneration(
|
||||
message=AIMessage(
|
||||
content="Output text",
|
||||
usage_metadata={
|
||||
"input_tokens": 350,
|
||||
"output_tokens": 240,
|
||||
"total_tokens": 590,
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
)]
|
||||
]
|
||||
)
|
||||
|
||||
Check also that the response includes a ``'model_name'`` key in its
|
||||
@@ -1173,29 +1187,31 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
yield ChatResult(
|
||||
generations=[ChatGeneration(
|
||||
message=AIMessage(
|
||||
content="Output text",
|
||||
usage_metadata={
|
||||
"input_tokens": (
|
||||
num_input_tokens if is_first_chunk else 0
|
||||
),
|
||||
"output_tokens": 11,
|
||||
"total_tokens": (
|
||||
11+num_input_tokens if is_first_chunk else 11
|
||||
),
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
generations=[
|
||||
ChatGeneration(
|
||||
message=AIMessage(
|
||||
content="Output text",
|
||||
usage_metadata={
|
||||
"input_tokens": (
|
||||
num_input_tokens if is_first_chunk else 0
|
||||
),
|
||||
"output_tokens": 11,
|
||||
"total_tokens": (
|
||||
11 + num_input_tokens if is_first_chunk else 11
|
||||
),
|
||||
"input_token_details": {
|
||||
"audio": 10,
|
||||
"cache_creation": 200,
|
||||
"cache_read": 100,
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
},
|
||||
},
|
||||
"output_token_details": {
|
||||
"audio": 10,
|
||||
"reasoning": 200,
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
)]
|
||||
]
|
||||
)
|
||||
|
||||
Check also that the aggregated response includes a ``'model_name'`` key
|
||||
@@ -1538,9 +1554,12 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. The model can correctly handle message histories that include ``AIMessage`` objects with ``""`` content.
|
||||
2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly handled and passed to the model in an appropriate format.
|
||||
3. The model can correctly handle ``ToolMessage`` objects with string content and arbitrary string values for ``tool_call_id``.
|
||||
1. The model can correctly handle message histories that include
|
||||
``AIMessage`` objects with ``""`` content.
|
||||
2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly
|
||||
handled and passed to the model in an appropriate format.
|
||||
3. The model can correctly handle ``ToolMessage`` objects with string
|
||||
content and arbitrary string values for ``tool_call_id``.
|
||||
|
||||
You can ``xfail`` the test if tool calling is implemented but this format
|
||||
is not supported.
|
||||
@@ -1548,10 +1567,12 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("Not implemented."))
|
||||
def test_tool_message_histories_string_content(self, *args: Any) -> None:
|
||||
def test_tool_message_histories_string_content(
|
||||
self, *args: Any
|
||||
) -> None:
|
||||
super().test_tool_message_histories_string_content(*args)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_tool_calling:
|
||||
pytest.skip("Test requires tool calling.")
|
||||
|
||||
@@ -1625,9 +1646,12 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. The model can correctly handle message histories that include ``AIMessage`` objects with list content.
|
||||
2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly handled and passed to the model in an appropriate format.
|
||||
3. The model can correctly handle ToolMessage objects with string content and arbitrary string values for ``tool_call_id``.
|
||||
1. The model can correctly handle message histories that include
|
||||
``AIMessage`` objects with list content.
|
||||
2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly
|
||||
handled and passed to the model in an appropriate format.
|
||||
3. The model can correctly handle ToolMessage objects with string content
|
||||
and arbitrary string values for ``tool_call_id``.
|
||||
|
||||
You can ``xfail`` the test if tool calling is implemented but this format
|
||||
is not supported.
|
||||
@@ -1638,7 +1662,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
def test_tool_message_histories_list_content(self, *args: Any) -> None:
|
||||
super().test_tool_message_histories_list_content(*args)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_tool_calling:
|
||||
pytest.skip("Test requires tool calling.")
|
||||
|
||||
@@ -1766,13 +1790,15 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("Does not support tool_choice."))
|
||||
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
|
||||
def test_tool_calling_with_no_arguments(
|
||||
self, model: BaseChatModel
|
||||
) -> None:
|
||||
super().test_tool_calling_with_no_arguments(model)
|
||||
|
||||
Otherwise, in the case that only one tool is bound, ensure that
|
||||
``tool_choice`` supports the string ``'any'`` to force calling that tool.
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_tool_calling:
|
||||
pytest.skip("Test requires tool calling.")
|
||||
|
||||
@@ -2317,7 +2343,6 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
class TestMyChatModelIntegration(ChatModelIntegrationTests):
|
||||
|
||||
@property
|
||||
def supports_pdf_inputs(self) -> bool:
|
||||
return False
|
||||
@@ -2394,7 +2419,6 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
.. code-block:: python
|
||||
|
||||
class TestMyChatModelIntegration(ChatModelIntegrationTests):
|
||||
|
||||
@property
|
||||
def supports_audio_inputs(self) -> bool:
|
||||
return False
|
||||
@@ -2704,7 +2728,8 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "green is a great pick! that's my sister's favorite color", # noqa: E501
|
||||
"text": "green is a great pick! "
|
||||
"that's my sister's favorite color",
|
||||
}
|
||||
],
|
||||
"is_error": False,
|
||||
@@ -2732,14 +2757,17 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. The model can correctly handle message histories that include message objects with list content.
|
||||
2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format.
|
||||
3. ``HumanMessage``s with "tool_result" content blocks are correctly handled.
|
||||
1. The model can correctly handle message histories that include message
|
||||
objects with list content.
|
||||
2. The ``tool_calls`` attribute on AIMessage objects is correctly handled
|
||||
and passed to the model in an appropriate format.
|
||||
3. ``HumanMessage``s with "tool_result" content blocks are correctly
|
||||
handled.
|
||||
|
||||
Otherwise, if Anthropic tool call and result formats are not supported,
|
||||
set the ``supports_anthropic_inputs`` property to False.
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.supports_anthropic_inputs:
|
||||
pytest.skip("Model does not explicitly support Anthropic inputs.")
|
||||
|
||||
@@ -3017,13 +3045,15 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
|
||||
Args:
|
||||
model: The chat model to test
|
||||
tool_choice: Tool choice parameter to pass to ``bind_tools()`` (provider-specific)
|
||||
force_tool_call: Whether to force a tool call (use ``tool_choice=True`` if None)
|
||||
tool_choice: Tool choice parameter to pass to ``bind_tools()``
|
||||
(provider-specific)
|
||||
force_tool_call: Whether to force a tool call
|
||||
(use ``tool_choice=True`` if None)
|
||||
|
||||
Tests that Unicode characters in tool call arguments are preserved correctly,
|
||||
not escaped as ``\\uXXXX`` sequences.
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_tool_calling:
|
||||
pytest.skip("Test requires tool calling support.")
|
||||
|
||||
|
@@ -45,9 +45,10 @@ class EmbeddingsIntegrationTests(EmbeddingsTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. The model will generate a list of floats when calling ``.embed_query`` on a string.
|
||||
1. The model will generate a list of floats when calling ``.embed_query``
|
||||
on a string.
|
||||
2. The length of the list is consistent across different inputs.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
embedding_1 = model.embed_query("foo")
|
||||
|
||||
assert isinstance(embedding_1, list)
|
||||
@@ -65,9 +66,10 @@ class EmbeddingsIntegrationTests(EmbeddingsTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. The model will generate a list of lists of floats when calling ``.embed_documents`` on a list of strings.
|
||||
1. The model will generate a list of lists of floats when calling
|
||||
``.embed_documents`` on a list of strings.
|
||||
2. The length of each list is the same.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
documents = ["foo", "bar", "baz"]
|
||||
embeddings = model.embed_documents(documents)
|
||||
|
||||
@@ -84,9 +86,10 @@ class EmbeddingsIntegrationTests(EmbeddingsTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. The model will generate a list of floats when calling ``.aembed_query`` on a string.
|
||||
1. The model will generate a list of floats when calling ``.aembed_query``
|
||||
on a string.
|
||||
2. The length of the list is consistent across different inputs.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
embedding_1 = await model.aembed_query("foo")
|
||||
|
||||
assert isinstance(embedding_1, list)
|
||||
@@ -104,9 +107,10 @@ class EmbeddingsIntegrationTests(EmbeddingsTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. The model will generate a list of lists of floats when calling ``.aembed_documents`` on a list of strings.
|
||||
1. The model will generate a list of lists of floats when calling
|
||||
``.aembed_documents`` on a list of strings.
|
||||
2. The length of each list is the same.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
documents = ["foo", "bar", "baz"]
|
||||
embeddings = await model.aembed_documents(documents)
|
||||
|
||||
|
@@ -87,13 +87,13 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
.. code-block:: python
|
||||
|
||||
class TestParrotVectorStore(VectorStoreIntegrationTests):
|
||||
@pytest.fixture()
|
||||
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
|
||||
...
|
||||
@pytest.fixture()
|
||||
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
|
||||
...
|
||||
|
||||
@property
|
||||
def has_async(self) -> bool:
|
||||
return False
|
||||
@property
|
||||
def has_async(self) -> bool:
|
||||
return False
|
||||
|
||||
.. note::
|
||||
API references for individual test methods include troubleshooting tips.
|
||||
@@ -154,10 +154,13 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. We correctly initialize an empty vector store in the ``vectorestore`` fixture.
|
||||
2. Calling ``.similarity_search`` for the top ``k`` similar documents does not threshold by score.
|
||||
3. We do not mutate the original document object when adding it to the vector store (e.g., by adding an ID).
|
||||
""" # noqa: E501
|
||||
1. We correctly initialize an empty vector store in the ``vectorestore``
|
||||
fixture.
|
||||
2. Calling ``.similarity_search`` for the top ``k`` similar documents does
|
||||
not threshold by score.
|
||||
3. We do not mutate the original document object when adding it to the
|
||||
vector store (e.g., by adding an ID).
|
||||
"""
|
||||
if not self.has_sync:
|
||||
pytest.skip("Sync tests not supported.")
|
||||
|
||||
@@ -404,10 +407,12 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("get_by_ids not implemented."))
|
||||
def test_add_documents_documents(self, vectorstore: VectorStore) -> None:
|
||||
def test_add_documents_documents(
|
||||
self, vectorstore: VectorStore
|
||||
) -> None:
|
||||
super().test_add_documents_documents(vectorstore)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_sync:
|
||||
pytest.skip("Sync tests not supported.")
|
||||
|
||||
@@ -433,8 +438,10 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
|
||||
This test also verifies that:
|
||||
|
||||
1. IDs specified in the ``Document.id`` field are assigned when adding documents.
|
||||
2. If some documents include IDs and others don't string IDs are generated for the latter.
|
||||
1. IDs specified in the ``Document.id`` field are assigned when adding
|
||||
documents.
|
||||
2. If some documents include IDs and others don't string IDs are generated
|
||||
for the latter.
|
||||
|
||||
.. note::
|
||||
``get_by_ids`` was added to the ``VectorStore`` interface in
|
||||
@@ -444,10 +451,12 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("get_by_ids not implemented."))
|
||||
def test_add_documents_with_existing_ids(self, vectorstore: VectorStore) -> None:
|
||||
def test_add_documents_with_existing_ids(
|
||||
self, vectorstore: VectorStore
|
||||
) -> None:
|
||||
super().test_add_documents_with_existing_ids(vectorstore)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_sync:
|
||||
pytest.skip("Sync tests not supported.")
|
||||
|
||||
@@ -485,10 +494,13 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
|
||||
If this test fails, check that:
|
||||
|
||||
1. We correctly initialize an empty vector store in the ``vectorestore`` fixture.
|
||||
2. Calling ``.asimilarity_search`` for the top ``k`` similar documents does not threshold by score.
|
||||
3. We do not mutate the original document object when adding it to the vector store (e.g., by adding an ID).
|
||||
""" # noqa: E501
|
||||
1. We correctly initialize an empty vector store in the ``vectorestore``
|
||||
fixture.
|
||||
2. Calling ``.asimilarity_search`` for the top ``k`` similar documents does
|
||||
not threshold by score.
|
||||
3. We do not mutate the original document object when adding it to the
|
||||
vector store (e.g., by adding an ID).
|
||||
"""
|
||||
if not self.has_async:
|
||||
pytest.skip("Async tests not supported.")
|
||||
|
||||
@@ -712,10 +724,12 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("get_by_ids not implemented."))
|
||||
async def test_get_by_ids_missing(self, vectorstore: VectorStore) -> None:
|
||||
async def test_get_by_ids_missing(
|
||||
self, vectorstore: VectorStore
|
||||
) -> None:
|
||||
await super().test_get_by_ids_missing(vectorstore)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_async:
|
||||
pytest.skip("Async tests not supported.")
|
||||
|
||||
@@ -743,10 +757,12 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("get_by_ids not implemented."))
|
||||
async def test_add_documents_documents(self, vectorstore: VectorStore) -> None:
|
||||
async def test_add_documents_documents(
|
||||
self, vectorstore: VectorStore
|
||||
) -> None:
|
||||
await super().test_add_documents_documents(vectorstore)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_async:
|
||||
pytest.skip("Async tests not supported.")
|
||||
|
||||
@@ -774,8 +790,10 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
|
||||
This test also verifies that:
|
||||
|
||||
1. IDs specified in the ``Document.id`` field are assigned when adding documents.
|
||||
2. If some documents include IDs and others don't string IDs are generated for the latter.
|
||||
1. IDs specified in the ``Document.id`` field are assigned when adding
|
||||
documents.
|
||||
2. If some documents include IDs and others don't string IDs are generated
|
||||
for the latter.
|
||||
|
||||
.. note::
|
||||
``get_by_ids`` was added to the ``VectorStore`` interface in
|
||||
@@ -785,10 +803,12 @@ class VectorStoreIntegrationTests(BaseStandardTests):
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("get_by_ids not implemented."))
|
||||
async def test_add_documents_with_existing_ids(self, vectorstore: VectorStore) -> None:
|
||||
async def test_add_documents_with_existing_ids(
|
||||
self, vectorstore: VectorStore
|
||||
) -> None:
|
||||
await super().test_add_documents_with_existing_ids(vectorstore)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if not self.has_async:
|
||||
pytest.skip("Async tests not supported.")
|
||||
|
||||
|
@@ -551,8 +551,9 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
Boolean property indicating whether the chat model returns usage metadata
|
||||
on invoke and streaming responses. Defaults to ``True``.
|
||||
|
||||
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
|
||||
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track
|
||||
input and output tokens.
|
||||
`See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
|
||||
Example:
|
||||
|
||||
@@ -648,8 +649,9 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
Property controlling what usage metadata details are emitted in both ``invoke``
|
||||
and ``stream``.
|
||||
|
||||
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
|
||||
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track
|
||||
input and output tokens.
|
||||
`See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
|
||||
|
||||
It includes optional keys ``input_token_details`` and ``output_token_details``
|
||||
that can track usage details associated with special types of tokens, such as
|
||||
@@ -673,7 +675,8 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
def enable_vcr_tests(self) -> bool:
|
||||
return True
|
||||
|
||||
2. Configure VCR to exclude sensitive headers and other information from cassettes.
|
||||
2. Configure VCR to exclude sensitive headers and other information from
|
||||
cassettes.
|
||||
|
||||
.. important::
|
||||
VCR will by default record authentication headers and other sensitive
|
||||
@@ -693,7 +696,9 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
:caption: tests/conftest.py
|
||||
|
||||
import pytest
|
||||
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
|
||||
from langchain_tests.conftest import (
|
||||
_base_vcr_config as _base_vcr_config,
|
||||
)
|
||||
|
||||
_EXTRA_HEADERS = [
|
||||
# Specify additional headers to redact
|
||||
@@ -727,8 +732,13 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
:caption: tests/conftest.py
|
||||
|
||||
import pytest
|
||||
from langchain_tests.conftest import CustomPersister, CustomSerializer
|
||||
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
|
||||
from langchain_tests.conftest import (
|
||||
CustomPersister,
|
||||
CustomSerializer,
|
||||
)
|
||||
from langchain_tests.conftest import (
|
||||
_base_vcr_config as _base_vcr_config,
|
||||
)
|
||||
from vcr import VCR
|
||||
|
||||
_EXTRA_HEADERS = [
|
||||
@@ -772,10 +782,15 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_tests.conftest import CustomPersister, CustomSerializer
|
||||
from langchain_tests.conftest import (
|
||||
CustomPersister,
|
||||
CustomSerializer,
|
||||
)
|
||||
|
||||
cassette_path = "/path/to/tests/cassettes/TestClass_test.yaml.gz"
|
||||
requests, responses = CustomPersister().load_cassette(path, CustomSerializer())
|
||||
requests, responses = CustomPersister().load_cassette(
|
||||
path, CustomSerializer()
|
||||
)
|
||||
|
||||
3. Run tests to generate VCR cassettes.
|
||||
|
||||
@@ -858,10 +873,12 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
|
||||
If this test fails, ensure that:
|
||||
|
||||
1. ``chat_model_params`` is specified and the model can be initialized from those params;
|
||||
2. The model accommodates `standard parameters <https://python.langchain.com/docs/concepts/chat_models/#standard-parameters>`__
|
||||
1. ``chat_model_params`` is specified and the model can be initialized
|
||||
from those params;
|
||||
2. The model accommodates
|
||||
`standard parameters <https://python.langchain.com/docs/concepts/chat_models/#standard-parameters>`__
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
model = self.chat_model_class(
|
||||
**{
|
||||
**self.standard_chat_model_params,
|
||||
|
@@ -58,12 +58,14 @@ ignore_missing_imports = true
|
||||
[tool.ruff]
|
||||
target-version = "py39"
|
||||
|
||||
[tool.ruff.format]
|
||||
docstring-code-format = true
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [ "ALL",]
|
||||
ignore = [
|
||||
"C90", # McCabe complexity
|
||||
"COM812", # Messes with the formatter
|
||||
"FA100", # Can't activate since we exclude UP007 for now
|
||||
"FIX002", # Line contains TODO
|
||||
"ISC001", # Messes with the formatter
|
||||
"PERF203", # Rarely useful
|
||||
|
@@ -27,8 +27,9 @@ class ChatParrotLink(BaseChatModel):
|
||||
|
||||
model = ChatParrotLink(parrot_buffer_length=2, model="bird-brain-001")
|
||||
result = model.invoke([HumanMessage(content="hello")])
|
||||
result = model.batch([[HumanMessage(content="hello")],
|
||||
[HumanMessage(content="world")]])
|
||||
result = model.batch(
|
||||
[[HumanMessage(content="hello")], [HumanMessage(content="world")]]
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
|
Reference in New Issue
Block a user