formatting

This commit is contained in:
Mason Daugherty 2025-08-04 15:53:32 -04:00
parent 7b89cabd0b
commit 2f8470d7f2
No known key found for this signature in database
6 changed files with 40 additions and 41 deletions

View File

@ -15,7 +15,7 @@ DEFAULT_MODEL_NAME = "llama3.1"
@pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")])
def test_structured_output(method: str) -> None:
"""Test to verify structured output via tool calling and ``format`` parameter."""
"""Test to verify structured output via tool calling and `format` parameter."""
class Joke(BaseModel):
"""Joke to tell user."""

View File

@ -11,7 +11,7 @@ SAMPLE = "What is 3^3?"
def test_stream_text_tokens() -> None:
"""Test streaming raw string tokens from OllamaLLM."""
"""Test streaming raw string tokens from `OllamaLLM`."""
llm = OllamaLLM(model=MODEL_NAME)
for token in llm.stream("I'm Pickle Rick"):
@ -64,7 +64,7 @@ def test__stream_with_reasoning(model: str) -> None:
async def test_astream_text_tokens() -> None:
"""Test async streaming raw string tokens from OllamaLLM."""
"""Test async streaming raw string tokens from `OllamaLLM`."""
llm = OllamaLLM(model=MODEL_NAME)
async for token in llm.astream("I'm Pickle Rick"):
@ -109,7 +109,7 @@ async def test__astream_with_reasoning(model: str) -> None:
async def test_abatch() -> None:
"""Test batch sync token generation from OllamaLLM."""
"""Test batch sync token generation from `OllamaLLM`."""
llm = OllamaLLM(model=MODEL_NAME)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
@ -129,7 +129,7 @@ async def test_abatch_tags() -> None:
def test_batch() -> None:
"""Test batch token generation from OllamaLLM."""
"""Test batch token generation from `OllamaLLM`."""
llm = OllamaLLM(model=MODEL_NAME)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])

View File

@ -1,4 +1,4 @@
"""Test chat model integration."""
"""Unit tests for ChatOllama."""
import json
import logging
@ -54,6 +54,7 @@ def _mock_httpx_client_stream(
def test_arbitrary_roles_accepted_in_chatmessages(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test that `ChatOllama` accepts arbitrary roles in `ChatMessage`."""
monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream)
llm = ChatOllama(
model=MODEL_NAME,
@ -94,9 +95,6 @@ dummy_raw_tool_call = {
}
# --- Regression tests for tool-call argument parsing (see #30910) ---
@pytest.mark.parametrize(
"input_string, expected_output",
[
@ -113,14 +111,14 @@ dummy_raw_tool_call = {
def test_parse_json_string_success_cases(
input_string: str, expected_output: Any
) -> None:
"""Tests that _parse_json_string correctly parses valid and fixable strings."""
"""Tests that `_parse_json_string` correctly parses valid and fixable strings."""
raw_tool_call = {"function": {"name": "test_func", "arguments": input_string}}
result = _parse_json_string(input_string, raw_tool_call=raw_tool_call, skip=False)
assert result == expected_output
def test_parse_json_string_failure_case_raises_exception() -> None:
"""Tests that _parse_json_string raises an exception for truly malformed strings."""
"""Tests that `_parse_json_string` raises an exception for malformed strings."""
malformed_string = "{'key': 'value',,}"
raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}}
with pytest.raises(OutputParserException):
@ -132,7 +130,7 @@ def test_parse_json_string_failure_case_raises_exception() -> None:
def test_parse_json_string_skip_returns_input_on_failure() -> None:
"""Tests that skip=True returns the original string on parse failure."""
"""Tests that `skip=True` returns the original string on parse failure."""
malformed_string = "{'not': valid,,,}"
raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}}
result = _parse_json_string(

View File

@ -32,7 +32,7 @@ def test_validate_model_on_init(mock_validate_model: Any) -> None:
@patch("langchain_ollama.embeddings.Client")
def test_embed_documents_passes_options(mock_client_class: Any) -> None:
"""Test that embed_documents method passes options including num_gpu."""
"""Test that `embed_documents()` passes options, including `num_gpu`."""
# Create a mock client instance
mock_client = Mock()
mock_client_class.return_value = mock_client

View File

@ -171,7 +171,7 @@ class ChatModelIntegrationTests(ChatModelTests):
API references for individual test methods include troubleshooting tips.
Test subclasses must implement the following two properties:
Test subclasses **must** implement the following two properties:
chat_model_class
The chat model class to test, e.g., ``ChatParrotLink``.
@ -424,10 +424,10 @@ class ChatModelIntegrationTests(ChatModelTests):
.. dropdown:: returns_usage_metadata
Boolean property indicating whether the chat model returns usage metadata
on invoke and streaming responses.
on invoke and streaming responses. Defaults to ``True``.
``usage_metadata`` is an optional dict attribute on AIMessages that track input
and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
Example:
@ -438,7 +438,7 @@ class ChatModelIntegrationTests(ChatModelTests):
return False
Models supporting ``usage_metadata`` should also return the name of the
underlying model in the ``response_metadata`` of the AIMessage.
underlying model in the ``response_metadata`` of the ``AIMessage``.
.. dropdown:: supports_anthropic_inputs
@ -523,8 +523,8 @@ class ChatModelIntegrationTests(ChatModelTests):
Property controlling what usage metadata details are emitted in both invoke
and stream.
``usage_metadata`` is an optional dict attribute on AIMessages that track input
and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
It includes optional keys ``input_token_details`` and ``output_token_details``
that can track usage details associated with special types of tokens, such as
@ -944,7 +944,8 @@ class ChatModelIntegrationTests(ChatModelTests):
.. dropdown:: Configuration
By default, this test is run.
To disable this feature, set `returns_usage_metadata` to False in your
To disable this feature, set ``returns_usage_metadata`` to ``False`` in your
test class:
.. code-block:: python
@ -955,7 +956,7 @@ class ChatModelIntegrationTests(ChatModelTests):
return False
This test can also check the format of specific kinds of usage metadata
based on the `supported_usage_metadata_details` property. This property
based on the ``supported_usage_metadata_details`` property. This property
should be configured as follows with the types of tokens that the model
supports tracking:
@ -1110,7 +1111,7 @@ class ChatModelIntegrationTests(ChatModelTests):
.. dropdown:: Configuration
By default, this test is run.
To disable this feature, set `returns_usage_metadata` to False in your
To disable this feature, set ``returns_usage_metadata`` to ``False`` in your
test class:
.. code-block:: python
@ -1121,7 +1122,7 @@ class ChatModelIntegrationTests(ChatModelTests):
return False
This test can also check the format of specific kinds of usage metadata
based on the `supported_usage_metadata_details` property. This property
based on the ``supported_usage_metadata_details`` property. This property
should be configured as follows with the types of tokens that the model
supports tracking:

View File

@ -193,7 +193,7 @@ class ChatModelTests(BaseStandardTests):
@property
def supports_image_tool_message(self) -> bool:
"""(bool) whether the chat model supports ToolMessages that include image
"""(bool) whether the chat model supports ``ToolMessage``s that include image
content.
"""
return False
@ -261,7 +261,7 @@ class ChatModelUnitTests(ChatModelTests):
API references for individual test methods include troubleshooting tips.
Test subclasses must implement the following two properties:
Test subclasses **must** implement the following two properties:
chat_model_class
The chat model class to test, e.g., ``ChatParrotLink``.
@ -293,7 +293,7 @@ class ChatModelUnitTests(ChatModelTests):
Boolean property indicating whether the chat model supports tool calling.
By default, this is determined by whether the chat model's `bind_tools` method
By default, this is determined by whether the chat model's ``bind_tools`` method
is overridden. It typically does not need to be overridden on the test class.
Example override:
@ -395,7 +395,7 @@ class ChatModelUnitTests(ChatModelTests):
Defaults to ``False``.
If set to ``True``, the chat model will be tested using content blocks of the
form
form.
.. code-block:: python
@ -431,7 +431,7 @@ class ChatModelUnitTests(ChatModelTests):
URLs. Defaults to ``False``.
If set to ``True``, the chat model will be tested using content blocks of the
form
form.
.. code-block:: python
@ -457,7 +457,7 @@ class ChatModelUnitTests(ChatModelTests):
Defaults to ``False``.
If set to ``True``, the chat model will be tested using content blocks of the
form
form.
.. code-block:: python
@ -484,7 +484,7 @@ class ChatModelUnitTests(ChatModelTests):
Defaults to ``False``.
If set to ``True``, the chat model will be tested using content blocks of the
form
form.
.. code-block:: python
@ -513,10 +513,10 @@ class ChatModelUnitTests(ChatModelTests):
.. dropdown:: returns_usage_metadata
Boolean property indicating whether the chat model returns usage metadata
on invoke and streaming responses.
on invoke and streaming responses. Defaults to ``True``.
``usage_metadata`` is an optional dict attribute on AIMessages that track input
and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
Example:
@ -527,7 +527,7 @@ class ChatModelUnitTests(ChatModelTests):
return False
Models supporting ``usage_metadata`` should also return the name of the
underlying model in the ``response_metadata`` of the AIMessage.
underlying model in the ``response_metadata`` of the ``AIMessage``.
.. dropdown:: supports_anthropic_inputs
@ -561,7 +561,7 @@ class ChatModelUnitTests(ChatModelTests):
.. dropdown:: supports_image_tool_message
Boolean property indicating whether the chat model supports ToolMessages
Boolean property indicating whether the chat model supports ``ToolMessage``s
that include image content, e.g.,
.. code-block:: python
@ -609,11 +609,11 @@ class ChatModelUnitTests(ChatModelTests):
.. dropdown:: supported_usage_metadata_details
Property controlling what usage metadata details are emitted in both invoke
and stream.
Property controlling what usage metadata details are emitted in both ``invoke``
and ``stream``.
``usage_metadata`` is an optional dict attribute on AIMessages that track input
and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html
``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input
and output tokens. `See more. <https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html>`__
It includes optional keys ``input_token_details`` and ``output_token_details``
that can track usage details associated with special types of tokens, such as
@ -817,7 +817,7 @@ class ChatModelUnitTests(ChatModelTests):
If this test fails, ensure that:
1. ``chat_model_params`` is specified and the model can be initialized from those params;
2. The model accommodates standard parameters: https://python.langchain.com/docs/concepts/chat_models/#standard-parameters
2. The model accommodates `standard parameters <https://python.langchain.com/docs/concepts/chat_models/#standard-parameters>`__
""" # noqa: E501
model = self.chat_model_class(
**{