From c1b86cc92957f465c061a8101020e50aff66e361 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Wed, 6 Aug 2025 18:22:02 -0400 Subject: [PATCH] feat: minor core work, v1 standard tests & (most of) v1 ollama (#32315) Resolves #32215 --------- Co-authored-by: Chester Curme Co-authored-by: Eugene Yurtsev Co-authored-by: Nuno Campos --- .vscode/settings.json | 2 +- libs/core/langchain_core/load/dump.py | 21 + libs/core/langchain_core/load/load.py | 9 +- libs/core/langchain_core/messages/__init__.py | 12 + libs/core/langchain_core/messages/ai.py | 13 +- .../langchain_core/messages/content_blocks.py | 128 +- libs/core/langchain_core/messages/utils.py | 80 +- libs/core/langchain_core/runnables/base.py | 2 +- libs/core/langchain_core/tools/base.py | 3 +- libs/core/langchain_core/tools/convert.py | 4 +- libs/core/langchain_core/tools/retriever.py | 2 +- libs/core/langchain_core/tools/structured.py | 2 +- .../langchain_core/utils/function_calling.py | 18 +- libs/core/langchain_core/v1/chat_models.py | 61 +- libs/core/langchain_core/v1/messages.py | 140 +- .../unit_tests/load/test_serializable.py | 94 +- .../messages/test_content_block_factories.py | 14 - .../tests/unit_tests/messages/test_imports.py | 4 + .../prompts/__snapshots__/test_chat.ambr | 4 +- .../runnables/__snapshots__/test_graph.ambr | 29 +- .../__snapshots__/test_runnable.ambr | 43 +- libs/core/tests/unit_tests/test_messages.py | 16 +- libs/partners/ollama/README.md | 8 + .../ollama/langchain_ollama/__init__.py | 3 + .../ollama/langchain_ollama/_compat.py | 338 ++ .../ollama/langchain_ollama/_utils.py | 9 +- .../ollama/langchain_ollama/chat_models.py | 86 +- .../ollama/langchain_ollama/embeddings.py | 75 +- libs/partners/ollama/langchain_ollama/llms.py | 62 +- .../ollama/langchain_ollama/v1/__init__.py | 5 + .../v1/chat_models/__init__.py | 17 + .../langchain_ollama/v1/chat_models/base.py | 941 +++++ libs/partners/ollama/pyproject.toml | 1 + .../chat_models/test_chat_models.py | 54 +- .../chat_models/test_chat_models_reasoning.py | 96 +- .../chat_models/test_chat_models_standard.py | 4 +- .../tests/integration_tests/test_llms.py | 17 +- .../tests/integration_tests/v1/__init__.py | 0 .../test_chat_models_standard_v1.py | 258 ++ .../v1/chat_models/test_chat_models_v1.py | 443 +++ .../tests/unit_tests/test_chat_models.py | 22 +- .../tests/unit_tests/test_embeddings.py | 2 +- .../ollama/tests/unit_tests/v1/__init__.py | 0 .../tests/unit_tests/v1/test_chat_models.py | 621 ++++ .../tests/unit_tests/v1/test_imports.py | 9 + libs/partners/ollama/uv.lock | 211 +- libs/standard-tests/QUICK_START.md | 446 +++ libs/standard-tests/README.md | 79 +- libs/standard-tests/README_V1.md | 166 + .../langchain_tests/__init__.py | 3 + libs/standard-tests/langchain_tests/base.py | 2 +- .../integration_tests/__init__.py | 2 + .../integration_tests/chat_models.py | 205 +- .../integration_tests/chat_models_v1.py | 3015 +++++++++++++++++ .../langchain_tests/unit_tests/chat_models.py | 81 +- .../unit_tests/chat_models_v1.py | 934 +++++ .../tests/unit_tests/custom_chat_model.py | 10 +- .../tests/unit_tests/custom_chat_model_v1.py | 259 ++ .../unit_tests/test_custom_chat_model_v1.py | 117 + libs/standard-tests/uv.lock | 4 +- 60 files changed, 8617 insertions(+), 689 deletions(-) create mode 100644 libs/partners/ollama/langchain_ollama/_compat.py create mode 100644 libs/partners/ollama/langchain_ollama/v1/__init__.py create mode 100644 libs/partners/ollama/langchain_ollama/v1/chat_models/__init__.py create mode 100644 libs/partners/ollama/langchain_ollama/v1/chat_models/base.py create mode 100644 libs/partners/ollama/tests/integration_tests/v1/__init__.py create mode 100644 libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_standard_v1.py create mode 100644 libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_v1.py create mode 100644 libs/partners/ollama/tests/unit_tests/v1/__init__.py create mode 100644 libs/partners/ollama/tests/unit_tests/v1/test_chat_models.py create mode 100644 libs/partners/ollama/tests/unit_tests/v1/test_imports.py create mode 100644 libs/standard-tests/QUICK_START.md create mode 100644 libs/standard-tests/README_V1.md create mode 100644 libs/standard-tests/langchain_tests/integration_tests/chat_models_v1.py create mode 100644 libs/standard-tests/langchain_tests/unit_tests/chat_models_v1.py create mode 100644 libs/standard-tests/tests/unit_tests/custom_chat_model_v1.py create mode 100644 libs/standard-tests/tests/unit_tests/test_custom_chat_model_v1.py diff --git a/.vscode/settings.json b/.vscode/settings.json index 93dae04eff7..ec50c96705a 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -21,7 +21,7 @@ "[python]": { "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.organizeImports": "explicit", + "source.organizeImports.ruff": "explicit", "source.fixAll": "explicit" }, "editor.defaultFormatter": "charliermarsh.ruff" diff --git a/libs/core/langchain_core/load/dump.py b/libs/core/langchain_core/load/dump.py index b1993c38244..6df5b617975 100644 --- a/libs/core/langchain_core/load/dump.py +++ b/libs/core/langchain_core/load/dump.py @@ -1,11 +1,14 @@ """Dump objects to json.""" +import dataclasses +import inspect import json from typing import Any from pydantic import BaseModel from langchain_core.load.serializable import Serializable, to_json_not_implemented +from langchain_core.v1.messages import MessageV1Types def default(obj: Any) -> Any: @@ -19,6 +22,24 @@ def default(obj: Any) -> Any: """ if isinstance(obj, Serializable): return obj.to_json() + + # Handle v1 message classes + if type(obj) in MessageV1Types: + # Get the constructor signature to only include valid parameters + init_sig = inspect.signature(type(obj).__init__) + valid_params = set(init_sig.parameters.keys()) - {"self"} + + # Filter dataclass fields to only include constructor params + all_fields = dataclasses.asdict(obj) + kwargs = {k: v for k, v in all_fields.items() if k in valid_params} + + return { + "lc": 1, + "type": "constructor", + "id": ["langchain_core", "v1", "messages", type(obj).__name__], + "kwargs": kwargs, + } + return to_json_not_implemented(obj) diff --git a/libs/core/langchain_core/load/load.py b/libs/core/langchain_core/load/load.py index 0c2cbf1f65e..4f8612b1ceb 100644 --- a/libs/core/langchain_core/load/load.py +++ b/libs/core/langchain_core/load/load.py @@ -156,8 +156,13 @@ class Reviver: cls = getattr(mod, name) - # The class must be a subclass of Serializable. - if not issubclass(cls, Serializable): + # Import MessageV1Types lazily to avoid circular import: + # load.load -> v1.messages -> messages.ai -> messages.base -> + # load.serializable -> load.__init__ -> load.load + from langchain_core.v1.messages import MessageV1Types + + # The class must be a subclass of Serializable or a v1 message class. + if not (issubclass(cls, Serializable) or cls in MessageV1Types): msg = f"Invalid namespace: {value}" raise ValueError(msg) diff --git a/libs/core/langchain_core/messages/__init__.py b/libs/core/langchain_core/messages/__init__.py index 0faf0447295..410299ea5b5 100644 --- a/libs/core/langchain_core/messages/__init__.py +++ b/libs/core/langchain_core/messages/__init__.py @@ -54,6 +54,10 @@ if TYPE_CHECKING: convert_to_openai_data_block, convert_to_openai_image_block, is_data_content_block, + is_reasoning_block, + is_text_block, + is_tool_call_block, + is_tool_call_chunk, ) from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk from langchain_core.messages.human import HumanMessage, HumanMessageChunk @@ -127,6 +131,10 @@ __all__ = ( "filter_messages", "get_buffer_string", "is_data_content_block", + "is_reasoning_block", + "is_text_block", + "is_tool_call_block", + "is_tool_call_chunk", "merge_content", "merge_message_runs", "message_chunk_to_message", @@ -186,6 +194,10 @@ _dynamic_imports = { "filter_messages": "utils", "get_buffer_string": "utils", "is_data_content_block": "content_blocks", + "is_reasoning_block": "content_blocks", + "is_text_block": "content_blocks", + "is_tool_call_block": "content_blocks", + "is_tool_call_chunk": "content_blocks", "merge_message_runs": "utils", "message_chunk_to_message": "utils", "messages_from_dict": "utils", diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index b980761a121..45eb7d2ba5e 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -25,8 +25,10 @@ from langchain_core.utils.usage import _dict_int_op logger = logging.getLogger(__name__) +_LC_AUTO_PREFIX = "lc_" +"""LangChain auto-generated ID prefix for messages and content blocks.""" -_LC_ID_PREFIX = "run-" +_LC_ID_PREFIX = f"{_LC_AUTO_PREFIX}run-" """Internal tracing/callback system identifier. Used for: @@ -35,9 +37,6 @@ Used for: - Enables tracking parent-child relationships between operations """ -_LC_AUTO_PREFIX = "lc_" -"""LangChain auto-generated ID prefix for messages and content blocks.""" - class InputTokenDetails(TypedDict, total=False): """Breakdown of input token counts. @@ -438,13 +437,13 @@ def add_ai_message_chunks( chunk_id = id_ break else: - # second pass: prefer lc_* ids over run-* ids + # second pass: prefer lc_run-* ids over lc_* ids for id_ in candidates: - if id_ and id_.startswith(_LC_AUTO_PREFIX): + if id_ and id_.startswith(_LC_ID_PREFIX): chunk_id = id_ break else: - # third pass: take any remaining id (run-* ids) + # third pass: take any remaining id (auto-generated lc_* ids) for id_ in candidates: if id_: chunk_id = id_ diff --git a/libs/core/langchain_core/messages/content_blocks.py b/libs/core/langchain_core/messages/content_blocks.py index 025f16eeb37..727e2cd1085 100644 --- a/libs/core/langchain_core/messages/content_blocks.py +++ b/libs/core/langchain_core/messages/content_blocks.py @@ -5,26 +5,45 @@ change in future releases. This module provides a standardized data structure for representing inputs to and -outputs from Large Language Models. The core abstraction is the **Content Block**, a -``TypedDict`` that can represent a piece of text, an image, a tool call, or other -structured data. +outputs from LLMs. The core abstraction is the **Content Block**, a ``TypedDict`` that +can represent a piece of text, an image, a tool call, or other structured data. + +**Rationale** + +Different LLM providers use distinct and incompatible API schemas. This module +introduces a unified, provider-agnostic format to standardize these interactions. A +message to or from a model is simply a ``list`` of ``ContentBlock`` objects, allowing +for the natural interleaving of text, images, and other content in a single, ordered +sequence. + +An adapter for a specific provider is responsible for translating this standard list of +blocks into the format required by its API. + +**Extensibility** Data **not yet mapped** to a standard block may be represented using the ``NonStandardContentBlock``, which allows for provider-specific data to be included without losing the benefits of type checking and validation. Furthermore, provider-specific fields **within** a standard block are fully supported -by default. However, since current type checkers do not recognize this, we are temporarily -applying type ignore comments to suppress warnings. In the future, -`PEP 728 `__ will add an extra param, ``extra_items=Any``. -When this is supported, we will apply it to block signatures to signify to type checkers -that additional provider-specific fields are allowed. +by default in the ``extras`` field of each block. This allows for additional metadata +to be included without breaking the standard structure. + +Following widespread adoption of `PEP 728 `__, we will add +``extra_items=Any`` as a param to Content Blocks. This will signify to type checkers +that additional provider-specific fields are allowed outside of the ``extras`` field, +and that will become the new standard approach to adding provider-specific metadata. + +.. warning:: + Do not heavily rely on the ``extras`` field for provider-specific data! This field + is subject to deprecation in future releases as we move towards PEP 728. **Example with PEP 728 provider-specific fields:** .. code-block:: python - # Note `extra_items=Any` + # Content block definition + # NOTE: `extra_items=Any` class TextContentBlock(TypedDict, extra_items=Any): type: Literal["text"] id: NotRequired[str] @@ -36,6 +55,7 @@ that additional provider-specific fields are allowed. from langchain_core.messages.content_blocks import TextContentBlock + # Create a text content block with provider-specific fields my_block: TextContentBlock = { # Add required fields "type": "text", @@ -47,6 +67,7 @@ that additional provider-specific fields are allowed. "custom_field": "any value", } + # Mutating an existing block to add provider-specific fields openai_data = my_block["openai_metadata"] # Type: Any .. note:: @@ -54,24 +75,13 @@ that additional provider-specific fields are allowed. from type checkers that don't yet support it. The functionality works correctly in Python 3.13+ and will be fully supported as the ecosystem catches up. -**Rationale** - -Different LLM providers use distinct and incompatible API schemas. This module -introduces a unified, provider-agnostic format to standardize these interactions. A -message to or from a model is simply a `list` of `ContentBlock` objects, allowing for -the natural interleaving of text, images, and other content in a single, ordered -sequence. - -An adapter for a specific provider is responsible for translating this standard list of -blocks into the format required by its API. - **Key Block Types** The module defines several types of content blocks, including: - ``TextContentBlock``: Standard text. - ``ImageContentBlock``, ``Audio...``, ``Video...``, ``PlainText...``, ``File...``: For multimodal data. -- ``ToolCallContentBlock``, ``ToolOutputContentBlock``: For function calling. +- ``ToolCallContentBlock``: For function calling. - ``ReasoningContentBlock``: To capture a model's thought process. - ``Citation``: For annotations that link generated text to a source document. @@ -101,13 +111,19 @@ The module defines several types of content blocks, including: mime_type="image/png", ), ] + +Factory functions like ``create_text_block`` and ``create_image_block`` are provided +and offer benefits such as: +- Automatic ID generation (when not provided) +- No need to manually specify the ``type`` field + """ # noqa: E501 import warnings from typing import Any, Literal, Optional, Union from uuid import uuid4 -from typing_extensions import NotRequired, TypedDict, get_args, get_origin +from typing_extensions import NotRequired, TypedDict, TypeGuard def _ensure_id(id_val: Optional[str]) -> str: @@ -129,7 +145,7 @@ class Citation(TypedDict): """Annotation for citing data from a document. .. note:: - ``start/end`` indices refer to the **response text**, + ``start``/``end`` indices refer to the **response text**, not the source text. This means that the indices are relative to the model's response, not the original document (as specified in the ``url``). @@ -157,7 +173,7 @@ class Citation(TypedDict): # For future consideration, if needed: # provenance: NotRequired[str] - # """Provenance of the document, e.g., "Wikipedia", "arXiv", etc. + # """Provenance of the document, e.g., ``'Wikipedia'``, ``'arXiv'``, etc. # Included for future compatibility; not currently implemented. # """ @@ -238,7 +254,7 @@ class TextContentBlock(TypedDict): """Block text.""" annotations: NotRequired[list[Annotation]] - """Citations and other annotations.""" + """``Citation``s and other annotations.""" index: NotRequired[int] """Index of block in aggregate response. Used during streaming.""" @@ -299,7 +315,7 @@ class ToolCall(TypedDict): class ToolCallChunk(TypedDict): """A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -637,7 +653,6 @@ class AudioContentBlock(TypedDict): .. note:: ``create_audio_block`` may also be used as a factory to create an ``AudioContentBlock``. Benefits include: - * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -660,6 +675,7 @@ class AudioContentBlock(TypedDict): """MIME type of the audio. Required for base64. `Examples from IANA `__ + """ index: NotRequired[int] @@ -765,6 +781,7 @@ class FileContentBlock(TypedDict): """MIME type of the file. Required for base64. `Examples from IANA `__ + """ index: NotRequired[int] @@ -793,7 +810,7 @@ class NonStandardContentBlock(TypedDict): The purpose of this block should be to simply hold a provider-specific payload. If a provider's non-standard output includes reasoning and tool calls, it should be the adapter's job to parse that payload and emit the corresponding standard - ReasoningContentBlock and ToolCallContentBlocks. + ``ReasoningContentBlock`` and ``ToolCallContentBlocks``. .. note:: ``create_non_standard_block`` may also be used as a factory to create a @@ -832,6 +849,7 @@ DataContentBlock = Union[ ToolContentBlock = Union[ ToolCall, + ToolCallChunk, CodeInterpreterCall, CodeInterpreterOutput, CodeInterpreterResult, @@ -842,8 +860,8 @@ ToolContentBlock = Union[ ContentBlock = Union[ TextContentBlock, ToolCall, - InvalidToolCall, ToolCallChunk, + InvalidToolCall, ReasoningContentBlock, NonStandardContentBlock, DataContentBlock, @@ -851,19 +869,6 @@ ContentBlock = Union[ ] -def _extract_typedict_type_values(union_type: Any) -> set[str]: - """Extract the values of the 'type' field from a TypedDict union type.""" - result: set[str] = set() - for value in get_args(union_type): - annotation = value.__annotations__["type"] - if get_origin(annotation) is Literal: - result.update(get_args(annotation)) - else: - msg = f"{value} 'type' is not a Literal" - raise ValueError(msg) - return result - - KNOWN_BLOCK_TYPES = { "text", "text-plain", @@ -911,6 +916,33 @@ def is_data_content_block(block: dict) -> bool: ) +def is_tool_call_block(block: ContentBlock) -> TypeGuard[ToolCall]: + """Type guard to check if a content block is a ``ToolCall``.""" + return block.get("type") == "tool_call" + + +def is_tool_call_chunk(block: ContentBlock) -> TypeGuard[ToolCallChunk]: + """Type guard to check if a content block is a ``ToolCallChunk``.""" + return block.get("type") == "tool_call_chunk" + + +def is_text_block(block: ContentBlock) -> TypeGuard[TextContentBlock]: + """Type guard to check if a content block is a ``TextContentBlock``.""" + return block.get("type") == "text" + + +def is_reasoning_block(block: ContentBlock) -> TypeGuard[ReasoningContentBlock]: + """Type guard to check if a content block is a ``ReasoningContentBlock``.""" + return block.get("type") == "reasoning" + + +def is_invalid_tool_call_block( + block: ContentBlock, +) -> TypeGuard[InvalidToolCall]: + """Type guard to check if a content block is an ``InvalidToolCall``.""" + return block.get("type") == "invalid_tool_call" + + def convert_to_openai_image_block(block: dict[str, Any]) -> dict: """Convert image content block to format expected by OpenAI Chat Completions API.""" if "url" in block: @@ -994,7 +1026,7 @@ def create_text_block( Args: text: The text content of the block. id: Content block identifier. Generated automatically if not provided. - annotations: Citations and other annotations for the text. + annotations: ``Citation``s and other annotations for the text. index: Index of block in aggregate response. Used during streaming. Returns: @@ -1052,10 +1084,6 @@ def create_image_block( msg = "Must provide one of: url, base64, or file_id" raise ValueError(msg) - if base64 and not mime_type: - msg = "mime_type is required when using base64 data" - raise ValueError(msg) - block = ImageContentBlock(type="image", id=_ensure_id(id)) if url is not None: @@ -1238,8 +1266,7 @@ def create_file_block( def create_plaintext_block( - text: str, - *, + text: Optional[str] = None, url: Optional[str] = None, base64: Optional[str] = None, file_id: Optional[str] = None, @@ -1271,10 +1298,11 @@ def create_plaintext_block( block = PlainTextContentBlock( type="text-plain", mime_type="text/plain", - text=text, id=_ensure_id(id), ) + if text is not None: + block["text"] = text if url is not None: block["url"] = url if base64 is not None: diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 02d4679fe76..5585103d885 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -35,6 +35,7 @@ from langchain_core.messages import convert_to_openai_data_block, is_data_conten from langchain_core.messages.ai import AIMessage, AIMessageChunk from langchain_core.messages.base import BaseMessage, BaseMessageChunk from langchain_core.messages.chat import ChatMessage, ChatMessageChunk +from langchain_core.messages.content_blocks import ContentBlock from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk from langchain_core.messages.human import HumanMessage, HumanMessageChunk from langchain_core.messages.modifier import RemoveMessage @@ -43,7 +44,7 @@ from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk from langchain_core.v1.messages import AIMessage as AIMessageV1 from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 from langchain_core.v1.messages import HumanMessage as HumanMessageV1 -from langchain_core.v1.messages import MessageV1, MessageV1Types +from langchain_core.v1.messages import MessageV1, MessageV1Types, ResponseMetadata from langchain_core.v1.messages import SystemMessage as SystemMessageV1 from langchain_core.v1.messages import ToolMessage as ToolMessageV1 @@ -481,6 +482,80 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage: return message_ +def _convert_from_v0_to_v1(message: BaseMessage) -> MessageV1: + """Convert a v0 message to a v1 message.""" + if isinstance(message, HumanMessage): # Checking for v0 HumanMessage + return HumanMessageV1(message.content, id=message.id, name=message.name) # type: ignore[arg-type] + if isinstance(message, AIMessage): # Checking for v0 AIMessage + return AIMessageV1( + content=message.content, # type: ignore[arg-type] + id=message.id, + name=message.name, + lc_version="v1", + response_metadata=message.response_metadata, # type: ignore[arg-type] + usage_metadata=message.usage_metadata, + tool_calls=message.tool_calls, + invalid_tool_calls=message.invalid_tool_calls, + ) + if isinstance(message, SystemMessage): # Checking for v0 SystemMessage + return SystemMessageV1( + message.content, # type: ignore[arg-type] + id=message.id, + name=message.name, + ) + if isinstance(message, ToolMessage): # Checking for v0 ToolMessage + return ToolMessageV1( + message.content, # type: ignore[arg-type] + message.tool_call_id, + id=message.id, + name=message.name, + artifact=message.artifact, + status=message.status, + ) + msg = f"Unsupported v0 message type for conversion to v1: {type(message)}" + raise NotImplementedError(msg) + + +def _safe_convert_from_v0_to_v1(message: BaseMessage) -> MessageV1: + """Convert a v0 message to a v1 message.""" + from langchain_core.messages.content_blocks import create_text_block + + if isinstance(message, HumanMessage): # Checking for v0 HumanMessage + content: list[ContentBlock] = [create_text_block(str(message.content))] + return HumanMessageV1(content, id=message.id, name=message.name) + if isinstance(message, AIMessage): # Checking for v0 AIMessage + content = [create_text_block(str(message.content))] + + # Construct ResponseMetadata TypedDict from v0 response_metadata dict + # Since ResponseMetadata has total=False, we can safely cast the dict + response_metadata = cast("ResponseMetadata", message.response_metadata or {}) + return AIMessageV1( + content=content, + id=message.id, + name=message.name, + lc_version="v1", + response_metadata=response_metadata, + usage_metadata=message.usage_metadata, + tool_calls=message.tool_calls, + invalid_tool_calls=message.invalid_tool_calls, + ) + if isinstance(message, SystemMessage): # Checking for v0 SystemMessage + content = [create_text_block(str(message.content))] + return SystemMessageV1(content=content, id=message.id, name=message.name) + if isinstance(message, ToolMessage): # Checking for v0 ToolMessage + content = [create_text_block(str(message.content))] + return ToolMessageV1( + content, + message.tool_call_id, + id=message.id, + name=message.name, + artifact=message.artifact, + status=message.status, + ) + msg = f"Unsupported v0 message type for conversion to v1: {type(message)}" + raise NotImplementedError(msg) + + def _convert_to_message_v1(message: MessageLikeRepresentation) -> MessageV1: """Instantiate a message from a variety of message formats. @@ -507,6 +582,9 @@ def _convert_to_message_v1(message: MessageLikeRepresentation) -> MessageV1: message_: MessageV1 = message.to_message() else: message_ = message + elif isinstance(message, BaseMessage): + # Convert v0 messages to v1 messages + message_ = _convert_from_v0_to_v1(message) elif isinstance(message, str): message_ = _create_message_from_message_type_v1("human", message) elif isinstance(message, Sequence) and len(message) == 2: diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index e1a2b3324c5..ba57594314d 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -2381,7 +2381,7 @@ class Runnable(ABC, Generic[Input, Output]): :class:`~langchain_core.messages.content_blocks.ToolCall` input. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. - If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`. Returns: A BaseTool instance. diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index c4900723de2..d5ff3444888 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -504,7 +504,8 @@ class ChildTool(BaseTool): :class:`~langchain_core.messages.content_blocks.ToolCall` input. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. - If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`. + """ def __init__(self, **kwargs: Any) -> None: diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index 976f23a5999..7da794a39e7 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -127,7 +127,7 @@ def tool( :class:`~langchain_core.messages.content_blocks.ToolCall` input. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. - If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`. Returns: The tool. @@ -409,7 +409,7 @@ def convert_runnable_to_tool( :class:`~langchain_core.messages.content_blocks.ToolCall` input. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. - If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`. Returns: The tool. diff --git a/libs/core/langchain_core/tools/retriever.py b/libs/core/langchain_core/tools/retriever.py index 53de0b54a9a..b2afc6b5d82 100644 --- a/libs/core/langchain_core/tools/retriever.py +++ b/libs/core/langchain_core/tools/retriever.py @@ -93,7 +93,7 @@ def create_retriever_tool( :class:`~langchain_core.messages.content_blocks.ToolCall` input. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. - If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`. Returns: Tool class to pass to an agent. diff --git a/libs/core/langchain_core/tools/structured.py b/libs/core/langchain_core/tools/structured.py index a11ebb60c32..f106e5e06f7 100644 --- a/libs/core/langchain_core/tools/structured.py +++ b/libs/core/langchain_core/tools/structured.py @@ -162,7 +162,7 @@ class StructuredTool(BaseTool): :class:`~langchain_core.messages.content_blocks.ToolCall` input. If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. - If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`. kwargs: Additional arguments to pass to the tool diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index b69e2c331fa..3899dc000d0 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -629,15 +629,16 @@ def tool_example_to_messages( The list of messages per example by default corresponds to: - 1) HumanMessage: contains the content from which content should be extracted. - 2) AIMessage: contains the extracted information from the model - 3) ToolMessage: contains confirmation to the model that the model requested a tool - correctly. + 1. ``HumanMessage``: contains the content from which content should be extracted. + 2. ``AIMessage``: contains the extracted information from the model + 3. ``ToolMessage``: contains confirmation to the model that the model requested a + tool correctly. - If `ai_response` is specified, there will be a final AIMessage with that response. + If ``ai_response`` is specified, there will be a final ``AIMessage`` with that + response. - The ToolMessage is required because some chat models are hyper-optimized for agents - rather than for an extraction use case. + The ``ToolMessage`` is required because some chat models are hyper-optimized for + agents rather than for an extraction use case. Arguments: input: string, the user input @@ -646,7 +647,7 @@ def tool_example_to_messages( tool_outputs: Optional[list[str]], a list of tool call outputs. Does not need to be provided. If not provided, a placeholder value will be inserted. Defaults to None. - ai_response: Optional[str], if provided, content for a final AIMessage. + ai_response: Optional[str], if provided, content for a final ``AIMessage``. Returns: A list of messages @@ -728,6 +729,7 @@ def _parse_google_docstring( """Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide. + """ if docstring: docstring_blocks = docstring.split("\n\n") diff --git a/libs/core/langchain_core/v1/chat_models.py b/libs/core/langchain_core/v1/chat_models.py index 710c90c9dfd..09a0ab6e1fb 100644 --- a/libs/core/langchain_core/v1/chat_models.py +++ b/libs/core/langchain_core/v1/chat_models.py @@ -25,7 +25,7 @@ from pydantic import ( Field, field_validator, ) -from typing_extensions import TypeAlias, override +from typing_extensions import override from langchain_core.caches import BaseCache from langchain_core.callbacks import ( @@ -48,6 +48,7 @@ from langchain_core.messages import ( get_buffer_string, is_data_content_block, ) +from langchain_core.messages.ai import _LC_ID_PREFIX from langchain_core.messages.utils import ( convert_from_v1_message, convert_to_messages_v1, @@ -79,8 +80,8 @@ if TYPE_CHECKING: def _generate_response_from_error(error: BaseException) -> list[AIMessageV1]: - if hasattr(error, "response"): - response = error.response + response = getattr(error, "response", None) + if response is not None: metadata: dict = {} if hasattr(response, "headers"): try: @@ -116,9 +117,9 @@ def _format_for_tracing(messages: Sequence[MessageV1]) -> list[MessageV1]: for message in messages: message_to_trace = message for idx, block in enumerate(message.content): - # Update image content blocks to OpenAI # Chat Completions format. + # Update image content blocks to OpenAI Chat Completions format. if ( - block["type"] == "image" + block.get("type") == "image" and is_data_content_block(block) # type: ignore[arg-type] # permit unnecessary runtime check and block.get("source_type") != "id" ): @@ -190,7 +191,7 @@ def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) -> class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): - """Base class for chat models. + """Base class for v1 chat models. Key imperative methods: Methods that actually call the underlying model. @@ -338,7 +339,7 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): @property @override - def InputType(self) -> TypeAlias: + def InputType(self) -> Any: """Get the input type for this runnable.""" from langchain_core.prompt_values import ( ChatPromptValueConcrete, @@ -471,6 +472,9 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): run_manager.on_llm_error(e) raise + if run_manager and full_message.id and full_message.id.startswith("lc_"): + full_message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-0" + run_manager.on_llm_end(full_message) return full_message @@ -542,6 +546,9 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): ) raise + if run_manager and full_message.id and full_message.id.startswith("lc_"): + full_message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-0" + await run_manager.on_llm_end(full_message) return full_message @@ -613,6 +620,10 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): raise msg = add_ai_message_chunks(chunks[0], *chunks[1:]) + + if run_manager and msg.id and msg.id.startswith("lc_"): + msg.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-0" + run_manager.on_llm_end(msg) @override @@ -686,6 +697,10 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): raise msg = add_ai_message_chunks(chunks[0], *chunks[1:]) + + if run_manager and msg.id and msg.id.startswith("lc_"): + msg.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-0" + await run_manager.on_llm_end(msg) # --- Custom methods --- @@ -695,16 +710,13 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): def _get_invocation_params( self, - stop: Optional[list[str]] = None, **kwargs: Any, ) -> dict: params = self.dump() - params["stop"] = stop return {**params, **kwargs} def _get_ls_params( self, - stop: Optional[list[str]] = None, **kwargs: Any, ) -> LangSmithParams: """Get standard params for tracing.""" @@ -717,31 +729,26 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): default_provider = default_provider.lower() ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="chat") - if stop: - ls_params["ls_stop"] = stop # model - if hasattr(self, "model") and isinstance(self.model, str): - ls_params["ls_model_name"] = self.model - elif hasattr(self, "model_name") and isinstance(self.model_name, str): - ls_params["ls_model_name"] = self.model_name + model = getattr(self, "model", None) or getattr(self, "model_name", None) + if isinstance(model, str): + ls_params["ls_model_name"] = model # temperature - if "temperature" in kwargs and isinstance(kwargs["temperature"], float): - ls_params["ls_temperature"] = kwargs["temperature"] - elif hasattr(self, "temperature") and isinstance(self.temperature, float): - ls_params["ls_temperature"] = self.temperature + temperature = kwargs.get("temperature") or getattr(self, "temperature", None) + if isinstance(temperature, (int, float)): + ls_params["ls_temperature"] = temperature # max_tokens - if "max_tokens" in kwargs and isinstance(kwargs["max_tokens"], int): - ls_params["ls_max_tokens"] = kwargs["max_tokens"] - elif hasattr(self, "max_tokens") and isinstance(self.max_tokens, int): - ls_params["ls_max_tokens"] = self.max_tokens + max_tokens = kwargs.get("max_tokens") or getattr(self, "max_tokens", None) + if isinstance(max_tokens, int): + ls_params["ls_max_tokens"] = max_tokens return ls_params - def _get_llm_string(self, stop: Optional[list[str]] = None, **kwargs: Any) -> str: - params = self._get_invocation_params(stop=stop, **kwargs) + def _get_llm_string(self, **kwargs: Any) -> str: + params = self._get_invocation_params(**kwargs) params = {**params, **kwargs} return str(sorted(params.items())) @@ -811,7 +818,7 @@ class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): Union[typing.Dict[str, Any], type, Callable, BaseTool] # noqa: UP006 ], *, - tool_choice: Optional[Union[str]] = None, + tool_choice: Optional[str] = None, **kwargs: Any, ) -> Runnable[LanguageModelInput, AIMessageV1]: """Bind tools to the model. diff --git a/libs/core/langchain_core/v1/messages.py b/libs/core/langchain_core/v1/messages.py index 159e2faacaf..5c20ad552c8 100644 --- a/libs/core/langchain_core/v1/messages.py +++ b/libs/core/langchain_core/v1/messages.py @@ -47,13 +47,13 @@ class ResponseMetadata(TypedDict, total=False): Contains additional information returned by the provider, such as response headers, service tiers, log probabilities, system fingerprints, etc. - Extra keys are permitted from what is typed here (via `total=False`), allowing + Extra keys are permitted from what is typed here (via ``total=False``), allowing for provider-specific metadata to be included without breaking the type definition. """ model_provider: str - """Name and version of the provider that created the message (e.g., openai).""" + """Name and version of the provider that created the message (ex: ``'openai'``).""" model_name: str """Name of the model that generated the message.""" @@ -61,14 +61,14 @@ class ResponseMetadata(TypedDict, total=False): @dataclass class AIMessage: - """A message generated by an AI assistant. + """A v1 message generated by an AI assistant. Represents a response from an AI model, including text content, tool calls, and metadata about the generation process. Attributes: + type: Message type identifier, always ``'ai'``. id: Unique identifier for the message. - type: Message type identifier, always "ai". name: Optional human-readable name for the message. lc_version: Encoding version for the message. content: List of content blocks containing the message data. @@ -133,7 +133,7 @@ class AIMessage: invalid_tool_calls: Optional[list[types.InvalidToolCall]] = None, parsed: Optional[Union[dict[str, Any], BaseModel]] = None, ): - """Initialize an AI message. + """Initialize a v1 AI message. Args: content: Message content as string or list of content blocks. @@ -149,7 +149,7 @@ class AIMessage: parsed: Optional auto-parsed message contents, if applicable. """ if isinstance(content, str): - self.content = [{"type": "text", "text": content}] + self.content = [types.create_text_block(content)] else: self.content = content @@ -168,7 +168,7 @@ class AIMessage: content_tool_calls = { block["id"] for block in self.content - if block["type"] == "tool_call" and "id" in block + if types.is_tool_call_block(block) and "id" in block } for tool_call in tool_calls: if "id" in tool_call and tool_call["id"] in content_tool_calls: @@ -178,7 +178,7 @@ class AIMessage: content_tool_calls = { block["id"] for block in self.content - if block["type"] == "invalid_tool_call" and "id" in block + if types.is_invalid_tool_call_block(block) and "id" in block } for invalid_tool_call in invalid_tool_calls: if ( @@ -187,25 +187,26 @@ class AIMessage: ): continue self.content.append(invalid_tool_call) - self._tool_calls = [ - block for block in self.content if block["type"] == "tool_call" + self._tool_calls: list[types.ToolCall] = [ + block for block in self.content if types.is_tool_call_block(block) ] - self._invalid_tool_calls = [ - block for block in self.content if block["type"] == "invalid_tool_call" + self._invalid_tool_calls: list[types.InvalidToolCall] = [ + block for block in self.content if types.is_invalid_tool_call_block(block) ] @property def text(self) -> str: """Extract all text content from the AI message as a string.""" - text_blocks = [block for block in self.content if block["type"] == "text"] - return "".join(block["text"] for block in text_blocks) + return "".join( + block["text"] for block in self.content if types.is_text_block(block) + ) @property def tool_calls(self) -> list[types.ToolCall]: """Get the tool calls made by the AI.""" if not self._tool_calls: self._tool_calls = [ - block for block in self.content if block["type"] == "tool_call" + block for block in self.content if types.is_tool_call_block(block) ] return self._tool_calls @@ -219,7 +220,9 @@ class AIMessage: """Get the invalid tool calls made by the AI.""" if not self._invalid_tool_calls: self._invalid_tool_calls = [ - block for block in self.content if block["type"] == "invalid_tool_call" + block + for block in self.content + if types.is_invalid_tool_call_block(block) ] return self._invalid_tool_calls @@ -232,8 +235,8 @@ class AIMessageChunk(AIMessage): during streaming generation. Contains partial content and metadata. Attributes: + type: Message type identifier, always ``'ai_chunk'``. id: Unique identifier for the message chunk. - type: Message type identifier, always "ai_chunk". name: Optional human-readable name for the message. content: List of content blocks containing partial message data. tool_call_chunks: Optional list of partial tool call data. @@ -260,7 +263,7 @@ class AIMessageChunk(AIMessage): parsed: Optional[Union[dict[str, Any], BaseModel]] = None, chunk_position: Optional[Literal["last"]] = None, ): - """Initialize an AI message. + """Initialize a v1 AI message. Args: content: Message content as string or list of content blocks. @@ -294,14 +297,14 @@ class AIMessageChunk(AIMessage): content_tool_call_chunks = { block["id"] for block in self.content - if block.get("type") == "tool_call_chunk" and "id" in block + if types.is_tool_call_chunk(block) and "id" in block } for chunk in tool_call_chunks: if "id" in chunk and chunk["id"] in content_tool_call_chunks: continue self.content.append(chunk) self._tool_call_chunks = [ - block for block in self.content if block.get("type") == "tool_call_chunk" + block for block in self.content if types.is_tool_call_chunk(block) ] self._tool_calls: list[types.ToolCall] = [] @@ -312,9 +315,7 @@ class AIMessageChunk(AIMessage): """Get the tool calls made by the AI.""" if not self._tool_call_chunks: self._tool_call_chunks = [ - block - for block in self.content - if block.get("type") == "tool_call_chunk" + block for block in self.content if types.is_tool_call_chunk(block) ] return cast("list[types.ToolCallChunk]", self._tool_call_chunks) @@ -323,14 +324,15 @@ class AIMessageChunk(AIMessage): """Get the tool calls made by the AI.""" if not self._tool_calls: parsed_content = _init_tool_calls(self.content) - self._tool_calls = [ - block for block in parsed_content if block["type"] == "tool_call" - ] - self._invalid_tool_calls = [ - block - for block in parsed_content - if block["type"] == "invalid_tool_call" - ] + tool_calls: list[types.ToolCall] = [] + invalid_tool_calls: list[types.InvalidToolCall] = [] + for block in parsed_content: + if types.is_tool_call_block(block): + tool_calls.append(block) + elif types.is_invalid_tool_call_block(block): + invalid_tool_calls.append(block) + self._tool_calls = tool_calls + self._invalid_tool_calls = invalid_tool_calls return self._tool_calls @tool_calls.setter @@ -343,18 +345,19 @@ class AIMessageChunk(AIMessage): """Get the invalid tool calls made by the AI.""" if not self._invalid_tool_calls: parsed_content = _init_tool_calls(self.content) - self._tool_calls = [ - block for block in parsed_content if block["type"] == "tool_call" - ] - self._invalid_tool_calls = [ - block - for block in parsed_content - if block["type"] == "invalid_tool_call" - ] + tool_calls: list[types.ToolCall] = [] + invalid_tool_calls: list[types.InvalidToolCall] = [] + for block in parsed_content: + if types.is_tool_call_block(block): + tool_calls.append(block) + elif types.is_invalid_tool_call_block(block): + invalid_tool_calls.append(block) + self._tool_calls = tool_calls + self._invalid_tool_calls = invalid_tool_calls return self._invalid_tool_calls def __add__(self, other: Any) -> "AIMessageChunk": - """Add AIMessageChunk to this one.""" + """Add ``AIMessageChunk`` to this one.""" if isinstance(other, AIMessageChunk): return add_ai_message_chunks(self, other) if isinstance(other, (list, tuple)) and all( @@ -365,7 +368,7 @@ class AIMessageChunk(AIMessage): raise NotImplementedError(error_msg) def to_message(self) -> "AIMessage": - """Convert this AIMessageChunk to an AIMessage.""" + """Convert this ``AIMessageChunk`` to an ``AIMessage``.""" return AIMessage( content=_init_tool_calls(self.content), id=self.id, @@ -381,38 +384,35 @@ def _init_tool_calls(content: list[types.ContentBlock]) -> list[types.ContentBlo """Parse tool call chunks in content into tool calls.""" new_content = [] for block in content: - if block.get("type") != "tool_call_chunk": + if not types.is_tool_call_chunk(block): new_content.append(block) continue try: - args_ = ( - parse_partial_json(cast("str", block.get("args") or "")) - if block.get("args") - else {} - ) + args_str = block.get("args") + args_ = parse_partial_json(str(args_str)) if args_str else {} if isinstance(args_, dict): new_content.append( create_tool_call( - name=cast("str", block.get("name") or ""), + name=block.get("name") or "", args=args_, - id=cast("str", block.get("id", "")), + id=block.get("id", ""), ) ) else: new_content.append( create_invalid_tool_call( - name=cast("str", block.get("name", "")), - args=cast("str", block.get("args", "")), - id=cast("str", block.get("id", "")), + name=block.get("name", ""), + args=block.get("args", ""), + id=block.get("id", ""), error=None, ) ) except Exception: new_content.append( create_invalid_tool_call( - name=cast("str", block.get("name", "")), - args=cast("str", block.get("args", "")), - id=cast("str", block.get("id", "")), + name=block.get("name", ""), + args=block.get("args", ""), + id=block.get("id", ""), error=None, ) ) @@ -422,7 +422,7 @@ def _init_tool_calls(content: list[types.ContentBlock]) -> list[types.ContentBlo def add_ai_message_chunks( left: AIMessageChunk, *others: AIMessageChunk ) -> AIMessageChunk: - """Add multiple AIMessageChunks together.""" + """Add multiple ``AIMessageChunks`` together.""" if not others: return left content = cast( @@ -466,13 +466,13 @@ def add_ai_message_chunks( chunk_id = id_ break else: - # second pass: prefer lc_* ids over run-* ids + # second pass: prefer lc_run-* ids over lc_* ids for id_ in candidates: - if id_ and id_.startswith(_LC_AUTO_PREFIX): + if id_ and id_.startswith(_LC_ID_PREFIX): chunk_id = id_ break else: - # third pass: take any remaining id (run-* ids) + # third pass: take any remaining id (auto-generated lc_* ids) for id_ in candidates: if id_: chunk_id = id_ @@ -502,10 +502,10 @@ class HumanMessage: or other content types like images. Attributes: + type: Message type identifier, always ``'human'``. id: Unique identifier for the message. content: List of content blocks containing the user's input. name: Optional human-readable name for the message. - type: Message type identifier, always "human". """ id: str @@ -541,7 +541,7 @@ class HumanMessage: id: Optional[str] = None, name: Optional[str] = None, ): - """Initialize a human message. + """Initialize a v1 human message. Args: content: Message content as string or list of content blocks. @@ -562,7 +562,7 @@ class HumanMessage: Concatenated string of all text blocks in the message. """ return "".join( - block["text"] for block in self.content if block["type"] == "text" + block["text"] for block in self.content if types.is_text_block(block) ) @@ -574,9 +574,9 @@ class SystemMessage: behavior and understanding of the conversation. Attributes: + type: Message type identifier, always ``'system'``. id: Unique identifier for the message. content: List of content blocks containing system instructions. - type: Message type identifier, always "system". """ id: str @@ -608,7 +608,7 @@ class SystemMessage: custom_role: Optional[str] = None """If provided, a custom role for the system message. - Example: ``"developer"``. + Example: ``'developer'``. Integration packages may use this field to assign the system message role if it contains a recognized value. @@ -622,7 +622,7 @@ class SystemMessage: custom_role: Optional[str] = None, name: Optional[str] = None, ): - """Initialize a human message. + """Initialize a v1 system message. Args: content: Message content as string or list of content blocks. @@ -641,7 +641,7 @@ class SystemMessage: def text(self) -> str: """Extract all text content from the system message.""" return "".join( - block["text"] for block in self.content if block["type"] == "text" + block["text"] for block in self.content if types.is_text_block(block) ) @@ -653,12 +653,12 @@ class ToolMessage(ToolOutputMixin): including the result data and execution status. Attributes: + type: Message type identifier, always ``'tool'``. id: Unique identifier for the message. tool_call_id: ID of the tool call this message responds to. content: The result content from tool execution. artifact: Optional app-side payload not intended for the model. status: Execution status ("success" or "error"). - type: Message type identifier, always "tool". """ id: str @@ -709,7 +709,7 @@ class ToolMessage(ToolOutputMixin): artifact: Optional[Any] = None, status: Literal["success", "error"] = "success", ): - """Initialize a human message. + """Initialize a v1 tool message. Args: content: Message content as string or list of content blocks. @@ -717,7 +717,7 @@ class ToolMessage(ToolOutputMixin): id: Optional unique identifier for the message. name: Optional human-readable name for the message. artifact: Optional app-side payload not intended for the model. - status: Execution status ("success" or "error"). + status: Execution status (``'success'`` or ``'error'``). """ self.id = _ensure_id(id) self.tool_call_id = tool_call_id @@ -733,7 +733,7 @@ class ToolMessage(ToolOutputMixin): def text(self) -> str: """Extract all text content from the tool message.""" return "".join( - block["text"] for block in self.content if block["type"] == "text" + block["text"] for block in self.content if types.is_text_block(block) ) def __post_init__(self) -> None: diff --git a/libs/core/tests/unit_tests/load/test_serializable.py b/libs/core/tests/unit_tests/load/test_serializable.py index 59bb6d54697..e782709fd41 100644 --- a/libs/core/tests/unit_tests/load/test_serializable.py +++ b/libs/core/tests/unit_tests/load/test_serializable.py @@ -1,6 +1,9 @@ +import json + +import pytest from pydantic import BaseModel, ConfigDict, Field -from langchain_core.load import Serializable, dumpd, load +from langchain_core.load import Serializable, dumpd, dumps, load from langchain_core.load.serializable import _is_field_useful from langchain_core.messages import AIMessage from langchain_core.outputs import ChatGeneration, Generation @@ -276,3 +279,92 @@ def test_serialization_with_ignore_unserializable_fields() -> None: ] ] } + + +# Tests for dumps() function +def test_dumps_basic_serialization() -> None: + """Test basic string serialization with `dumps()`.""" + foo = Foo(bar=42, baz="test") + json_str = dumps(foo) + + # Should be valid JSON + parsed = json.loads(json_str) + assert parsed == { + "id": ["tests", "unit_tests", "load", "test_serializable", "Foo"], + "kwargs": {"bar": 42, "baz": "test"}, + "lc": 1, + "type": "constructor", + } + + +def test_dumps_pretty_formatting() -> None: + """Test pretty printing functionality.""" + foo = Foo(bar=1, baz="hello") + + # Test pretty=True with default indent + pretty_json = dumps(foo, pretty=True) + assert " " in pretty_json + + # Test custom indent (4-space) + custom_indent = dumps(foo, pretty=True, indent=4) + assert " " in custom_indent + + # Verify it's still valid JSON + parsed = json.loads(pretty_json) + assert parsed["kwargs"]["bar"] == 1 + + +def test_dumps_invalid_default_kwarg() -> None: + """Test that passing `'default'` as kwarg raises ValueError.""" + foo = Foo(bar=1, baz="test") + + with pytest.raises(ValueError, match="`default` should not be passed to dumps"): + dumps(foo, default=lambda x: x) + + +def test_dumps_additional_json_kwargs() -> None: + """Test that additional JSON kwargs are passed through.""" + foo = Foo(bar=1, baz="test") + + compact_json = dumps(foo, separators=(",", ":")) + assert ", " not in compact_json # Should be compact + + # Test sort_keys + sorted_json = dumps(foo, sort_keys=True) + parsed = json.loads(sorted_json) + assert parsed == dumpd(foo) + + +def test_dumps_non_serializable_object() -> None: + """Test `dumps()` behavior with non-serializable objects.""" + + class NonSerializable: + def __init__(self, value: int) -> None: + self.value = value + + obj = NonSerializable(42) + json_str = dumps(obj) + + # Should create a "not_implemented" representation + parsed = json.loads(json_str) + assert parsed["lc"] == 1 + assert parsed["type"] == "not_implemented" + assert "NonSerializable" in parsed["repr"] + + +def test_dumps_mixed_data_structure() -> None: + """Test `dumps()` with complex nested data structures.""" + data = { + "serializable": Foo(bar=1, baz="test"), + "list": [1, 2, {"nested": "value"}], + "primitive": "string", + } + + json_str = dumps(data) + parsed = json.loads(json_str) + + # Serializable object should be properly serialized + assert parsed["serializable"]["type"] == "constructor" + # Primitives should remain unchanged + assert parsed["list"] == [1, 2, {"nested": "value"}] + assert parsed["primitive"] == "string" diff --git a/libs/core/tests/unit_tests/messages/test_content_block_factories.py b/libs/core/tests/unit_tests/messages/test_content_block_factories.py index 51b30f501eb..ab655b2bc20 100644 --- a/libs/core/tests/unit_tests/messages/test_content_block_factories.py +++ b/libs/core/tests/unit_tests/messages/test_content_block_factories.py @@ -117,13 +117,6 @@ class TestImageBlockFactory: ): create_image_block() - def test_base64_without_mime_type_raises_error(self) -> None: - """Test that base64 without mime_type raises ValueError.""" - with pytest.raises( - ValueError, match="mime_type is required when using base64 data" - ): - create_image_block(base64="iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJ") - def test_with_index(self) -> None: """Test image block creation with index.""" block = create_image_block(url="https://example.com/image.jpg", index=1) @@ -166,13 +159,6 @@ class TestVideoBlockFactory: ): create_video_block() - def test_base64_without_mime_type_raises_error(self) -> None: - """Test that base64 without mime_type raises ValueError.""" - with pytest.raises( - ValueError, match="mime_type is required when using base64 data" - ): - create_video_block(base64="UklGRnoGAABXQVZFZm10IBAAAAABAAEA") - class TestAudioBlockFactory: """Test create_audio_block factory function.""" diff --git a/libs/core/tests/unit_tests/messages/test_imports.py b/libs/core/tests/unit_tests/messages/test_imports.py index 9fda5493244..750f2f49f06 100644 --- a/libs/core/tests/unit_tests/messages/test_imports.py +++ b/libs/core/tests/unit_tests/messages/test_imports.py @@ -43,6 +43,10 @@ EXPECTED_ALL = [ "convert_to_messages", "get_buffer_string", "is_data_content_block", + "is_reasoning_block", + "is_text_block", + "is_tool_call_block", + "is_tool_call_chunk", "merge_content", "message_chunk_to_message", "message_to_dict", diff --git a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr index 44175f5b93d..f45b2f7dae9 100644 --- a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr +++ b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr @@ -1062,7 +1062,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -2523,7 +2523,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr index c014396ecab..d9f1ec1204d 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr @@ -412,7 +412,6 @@ .. note:: ``create_audio_block`` may also be used as a factory to create an ``AudioContentBlock``. Benefits include: - * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time ''', @@ -618,7 +617,7 @@ Annotation for citing data from a document. .. note:: - ``start/end`` indices refer to the **response text**, + ``start``/``end`` indices refer to the **response text**, not the source text. This means that the indices are relative to the model's response, not the original document (as specified in the ``url``). @@ -1258,7 +1257,7 @@ The purpose of this block should be to simply hold a provider-specific payload. If a provider's non-standard output includes reasoning and tool calls, it should be the adapter's job to parse that payload and emit the corresponding standard - ReasoningContentBlock and ToolCallContentBlocks. + ``ReasoningContentBlock`` and ``ToolCallContentBlocks``. .. note:: ``create_non_standard_block`` may also be used as a factory to create a @@ -1440,7 +1439,7 @@ Contains additional information returned by the provider, such as response headers, service tiers, log probabilities, system fingerprints, etc. - Extra keys are permitted from what is typed here (via `total=False`), allowing + Extra keys are permitted from what is typed here (via ``total=False``), allowing for provider-specific metadata to be included without breaking the type definition. ''', @@ -1656,7 +1655,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -2541,10 +2540,10 @@ '$ref': '#/$defs/ToolCall', }), dict({ - '$ref': '#/$defs/InvalidToolCall', + '$ref': '#/$defs/ToolCallChunk', }), dict({ - '$ref': '#/$defs/ToolCallChunk', + '$ref': '#/$defs/InvalidToolCall', }), dict({ '$ref': '#/$defs/ReasoningContentBlock', @@ -2667,10 +2666,10 @@ '$ref': '#/$defs/ToolCall', }), dict({ - '$ref': '#/$defs/InvalidToolCall', + '$ref': '#/$defs/ToolCallChunk', }), dict({ - '$ref': '#/$defs/ToolCallChunk', + '$ref': '#/$defs/InvalidToolCall', }), dict({ '$ref': '#/$defs/ReasoningContentBlock', @@ -2793,10 +2792,10 @@ '$ref': '#/$defs/ToolCall', }), dict({ - '$ref': '#/$defs/InvalidToolCall', + '$ref': '#/$defs/ToolCallChunk', }), dict({ - '$ref': '#/$defs/ToolCallChunk', + '$ref': '#/$defs/InvalidToolCall', }), dict({ '$ref': '#/$defs/ReasoningContentBlock', @@ -2881,10 +2880,10 @@ '$ref': '#/$defs/ToolCall', }), dict({ - '$ref': '#/$defs/InvalidToolCall', + '$ref': '#/$defs/ToolCallChunk', }), dict({ - '$ref': '#/$defs/ToolCallChunk', + '$ref': '#/$defs/InvalidToolCall', }), dict({ '$ref': '#/$defs/ReasoningContentBlock', @@ -2992,10 +2991,10 @@ '$ref': '#/$defs/ToolCall', }), dict({ - '$ref': '#/$defs/InvalidToolCall', + '$ref': '#/$defs/ToolCallChunk', }), dict({ - '$ref': '#/$defs/ToolCallChunk', + '$ref': '#/$defs/InvalidToolCall', }), dict({ '$ref': '#/$defs/ReasoningContentBlock', diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr index 0d4407d8ca4..dadbdb103da 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr @@ -3006,7 +3006,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -4530,7 +4530,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -6066,7 +6066,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -7458,7 +7458,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -9024,7 +9024,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -9435,7 +9435,6 @@ .. note:: ``create_audio_block`` may also be used as a factory to create an ``AudioContentBlock``. Benefits include: - * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time ''', @@ -9638,7 +9637,7 @@ Annotation for citing data from a document. .. note:: - ``start/end`` indices refer to the **response text**, + ``start``/``end`` indices refer to the **response text**, not the source text. This means that the indices are relative to the model's response, not the original document (as specified in the ``url``). @@ -10267,7 +10266,7 @@ The purpose of this block should be to simply hold a provider-specific payload. If a provider's non-standard output includes reasoning and tool calls, it should be the adapter's job to parse that payload and emit the corresponding standard - ReasoningContentBlock and ToolCallContentBlocks. + ``ReasoningContentBlock`` and ``ToolCallContentBlocks``. .. note:: ``create_non_standard_block`` may also be used as a factory to create a @@ -10445,7 +10444,7 @@ Contains additional information returned by the provider, such as response headers, service tiers, log probabilities, system fingerprints, etc. - Extra keys are permitted from what is typed here (via `total=False`), allowing + Extra keys are permitted from what is typed here (via ``total=False``), allowing for provider-specific metadata to be included without breaking the type definition. ''', @@ -10658,7 +10657,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -11533,10 +11532,10 @@ '$ref': '#/definitions/ToolCall', }), dict({ - '$ref': '#/definitions/InvalidToolCall', + '$ref': '#/definitions/ToolCallChunk', }), dict({ - '$ref': '#/definitions/ToolCallChunk', + '$ref': '#/definitions/InvalidToolCall', }), dict({ '$ref': '#/definitions/ReasoningContentBlock', @@ -11658,10 +11657,10 @@ '$ref': '#/definitions/ToolCall', }), dict({ - '$ref': '#/definitions/InvalidToolCall', + '$ref': '#/definitions/ToolCallChunk', }), dict({ - '$ref': '#/definitions/ToolCallChunk', + '$ref': '#/definitions/InvalidToolCall', }), dict({ '$ref': '#/definitions/ReasoningContentBlock', @@ -11783,10 +11782,10 @@ '$ref': '#/definitions/ToolCall', }), dict({ - '$ref': '#/definitions/InvalidToolCall', + '$ref': '#/definitions/ToolCallChunk', }), dict({ - '$ref': '#/definitions/ToolCallChunk', + '$ref': '#/definitions/InvalidToolCall', }), dict({ '$ref': '#/definitions/ReasoningContentBlock', @@ -11870,10 +11869,10 @@ '$ref': '#/definitions/ToolCall', }), dict({ - '$ref': '#/definitions/InvalidToolCall', + '$ref': '#/definitions/ToolCallChunk', }), dict({ - '$ref': '#/definitions/ToolCallChunk', + '$ref': '#/definitions/InvalidToolCall', }), dict({ '$ref': '#/definitions/ReasoningContentBlock', @@ -11980,10 +11979,10 @@ '$ref': '#/definitions/ToolCall', }), dict({ - '$ref': '#/definitions/InvalidToolCall', + '$ref': '#/definitions/ToolCallChunk', }), dict({ - '$ref': '#/definitions/ToolCallChunk', + '$ref': '#/definitions/InvalidToolCall', }), dict({ '$ref': '#/definitions/ReasoningContentBlock', @@ -13212,7 +13211,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. @@ -14698,7 +14697,7 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their values of ``index`` are equal and not ``None``. diff --git a/libs/core/tests/unit_tests/test_messages.py b/libs/core/tests/unit_tests/test_messages.py index 04c6ca4578b..ea3ac1fb7d5 100644 --- a/libs/core/tests/unit_tests/test_messages.py +++ b/libs/core/tests/unit_tests/test_messages.py @@ -185,16 +185,17 @@ def test_message_chunks() -> None: # Test ID order of precedence null_id = AIMessageChunk(content="", id=None) default_id = AIMessageChunk( - content="", id="run-abc123" + content="", id="lc_run--abc123" ) # LangChain-assigned run ID meaningful_id = AIMessageChunk(content="", id="msg_def456") # provider-assigned ID - assert (null_id + default_id).id == "run-abc123" - assert (default_id + null_id).id == "run-abc123" + assert (null_id + default_id).id == "lc_run--abc123" + assert (default_id + null_id).id == "lc_run--abc123" assert (null_id + meaningful_id).id == "msg_def456" assert (meaningful_id + null_id).id == "msg_def456" + # Provider assigned IDs have highest precedence assert (default_id + meaningful_id).id == "msg_def456" assert (meaningful_id + default_id).id == "msg_def456" @@ -326,13 +327,12 @@ def test_message_chunks_v1() -> None: # Note: AIMessageChunkV1 always generates an ID if none provided auto_id = AIMessageChunkV1(content=[]) # Gets auto-generated lc_* ID default_id = AIMessageChunkV1( - content=[], id="run-abc123" + content=[], id="lc_run--abc123" ) # LangChain-assigned run ID meaningful_id = AIMessageChunkV1( content=[], id="msg_def456" ) # provider-assigned ID - # Provider-assigned IDs (non-run-* and non-lc_*) have highest precedence # Provider-assigned IDs always win over LangChain-generated IDs assert (auto_id + meaningful_id).id == "msg_def456" # provider-assigned wins assert (meaningful_id + auto_id).id == "msg_def456" # provider-assigned wins @@ -344,9 +344,9 @@ def test_message_chunks_v1() -> None: meaningful_id + default_id ).id == "msg_def456" # meaningful_id is provider-assigned - # Between auto-generated and run-* IDs, auto-generated wins (since lc_ != run-) - assert (auto_id + default_id).id == auto_id.id - assert (default_id + auto_id).id == auto_id.id + # Between auto-generated and lc_run--* IDs, run IDs win + assert (auto_id + default_id).id == default_id.id + assert (default_id + auto_id).id == default_id.id def test_chat_message_chunks() -> None: diff --git a/libs/partners/ollama/README.md b/libs/partners/ollama/README.md index 937ef85b1c1..e0feb34d8ac 100644 --- a/libs/partners/ollama/README.md +++ b/libs/partners/ollama/README.md @@ -32,6 +32,14 @@ llm = ChatOllama(model="llama3.1") llm.invoke("Sing a ballad of LangChain.") ``` +### v1 Chat Models + +For v1 chat models, you can use the `ChatOllama` class with the `v1` namespace. + +```python +from langchain_ollama.v1.chat_models import ChatOllama +``` + ## [Embeddings](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html#ollamaembeddings) `OllamaEmbeddings` class exposes embeddings from Ollama. diff --git a/libs/partners/ollama/langchain_ollama/__init__.py b/libs/partners/ollama/langchain_ollama/__init__.py index 4d9864fc6a2..3789c6de8a4 100644 --- a/libs/partners/ollama/langchain_ollama/__init__.py +++ b/libs/partners/ollama/langchain_ollama/__init__.py @@ -10,6 +10,7 @@ service. exist locally. This is useful for ensuring that the model is available before attempting to use it, especially in environments where models may not be pre-downloaded. + """ from importlib import metadata @@ -19,6 +20,8 @@ from langchain_ollama.embeddings import OllamaEmbeddings from langchain_ollama.llms import OllamaLLM try: + if __package__ is None: + raise metadata.PackageNotFoundError __version__ = metadata.version(__package__) except metadata.PackageNotFoundError: # Case where package metadata is not available. diff --git a/libs/partners/ollama/langchain_ollama/_compat.py b/libs/partners/ollama/langchain_ollama/_compat.py new file mode 100644 index 00000000000..ac2cedf4d5b --- /dev/null +++ b/libs/partners/ollama/langchain_ollama/_compat.py @@ -0,0 +1,338 @@ +"""LangChain v1 message conversion utilities for Ollama.""" + +from __future__ import annotations + +from typing import Any, cast +from uuid import uuid4 + +from langchain_core.messages import content_blocks as types +from langchain_core.messages.ai import UsageMetadata +from langchain_core.messages.content_blocks import ( + ImageContentBlock, + ReasoningContentBlock, + TextContentBlock, + ToolCall, + ToolCallChunk, +) +from langchain_core.v1.messages import ( + AIMessage, + AIMessageChunk, + HumanMessage, + MessageV1, + ResponseMetadata, + SystemMessage, + ToolMessage, +) + + +def _get_usage_metadata_from_response( + response: dict[str, Any], +) -> UsageMetadata | None: + """Extract usage metadata from Ollama response.""" + input_tokens = response.get("prompt_eval_count") + output_tokens = response.get("eval_count") + if input_tokens is not None and output_tokens is not None: + return UsageMetadata( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=input_tokens + output_tokens, + ) + return None + + +def _convert_from_v1_to_ollama_format(message: MessageV1) -> dict[str, Any]: + """Convert v1 message to Ollama API format.""" + if isinstance(message, HumanMessage): + return _convert_human_message_v1(message) + if isinstance(message, AIMessage): + return _convert_ai_message_v1(message) + if isinstance(message, SystemMessage): + return _convert_system_message_v1(message) + if isinstance(message, ToolMessage): + return _convert_tool_message_v1(message) + msg = f"Unsupported message type: {type(message)}" + raise ValueError(msg) + + +def _convert_content_blocks_to_ollama_format( + content: list[types.ContentBlock], +) -> tuple[str, list[str], list[dict[str, Any]]]: + """Convert v1 content blocks to Ollama API format. + + Returns: + Tuple of `(text_content, images, tool_calls)` + """ + text_content = "" + + images = [] + """Base64 encoded image data.""" + + tool_calls = [] + + for block in content: + block_type = block.get("type") + if block_type == "text": + text_block = cast(TextContentBlock, block) + text_content += text_block["text"] + elif block_type == "image": + image_block = cast(ImageContentBlock, block) + if image_block.get("base64"): + # Ollama doesn't need MIME type or other metadata + if not isinstance(image_block.get("base64"), str): + # (This shouldn't happen in practice, but just in case) + msg = "Image content must be base64 encoded string" + raise ValueError(msg) + if not image_block.get("base64", "").strip(): + msg = "Image content cannot be empty" + raise ValueError(msg) + # Ensure we have plain/raw base64 data + if image_block.get("base64", "").startswith("data:"): + # Strip the data URI scheme (e.g., 'data:image/png;base64,') + image_block["base64"] = image_block.get("base64", "").split(",")[1] + images.append(image_block.get("base64", "")) + else: + msg = "Only base64 image data is supported by Ollama" + raise ValueError(msg) + elif block_type == "tool_call": + tool_call_block = cast(ToolCall, block) + tool_calls.append( + { + "type": "function", + "id": tool_call_block["id"], + "function": { + "name": tool_call_block["name"], + "arguments": tool_call_block["args"], + }, + } + ) + elif block_type == "invalid_tool_call": + # InvalidToolCall blocks are handled by converting to text + # May revisit this in the future + name = block.get("name", "unknown") + error = block.get("error", "unknown error") + text_content += f"[Invalid tool call: {name} - {error}]" + else: + # Skip other content block types that aren't supported + msg = f"Unsupported content block type: {block_type}" + raise ValueError(msg) + + return text_content, images, tool_calls + + +def _convert_human_message_v1(message: HumanMessage) -> dict[str, Any]: + """Convert HumanMessage to Ollama format.""" + text_content, images, _ = _convert_content_blocks_to_ollama_format(message.content) + + msg: dict[str, Any] = { + "role": "user", + "content": text_content, + "images": images, + } + if message.name: + # Ollama doesn't have direct name support, include in content + msg["content"] = f"[{message.name}]: {text_content}" + + return msg + + +def _convert_ai_message_v1(message: AIMessage) -> dict[str, Any]: + """Convert AIMessage to Ollama format.""" + text_content, _, tool_calls = _convert_content_blocks_to_ollama_format( + message.content + ) + + msg: dict[str, Any] = { + "role": "assistant", + "content": text_content, + } + + if tool_calls: + msg["tool_calls"] = tool_calls + + if message.name: + # Ollama doesn't have direct name support, include in content + msg["content"] = f"[{message.name}]: {text_content}" + + return msg + + +def _convert_system_message_v1(message: SystemMessage) -> dict[str, Any]: + """Convert SystemMessage to Ollama format.""" + text_content, _, _ = _convert_content_blocks_to_ollama_format(message.content) + + return { + "role": "system", + "content": text_content, + } + + +def _convert_tool_message_v1(message: ToolMessage) -> dict[str, Any]: + """Convert ToolMessage to Ollama format.""" + text_content, _, _ = _convert_content_blocks_to_ollama_format(message.content) + + return { + "role": "tool", + "content": text_content, + "tool_call_id": message.tool_call_id, + } + + +def _convert_to_v1_from_ollama_format(response: dict[str, Any]) -> AIMessage: + """Convert Ollama API response to AIMessage.""" + content: list[types.ContentBlock] = [] + + # Handle text content + if "message" in response and "content" in response["message"]: + text_content = response["message"]["content"] + if text_content: + content.append(TextContentBlock(type="text", text=text_content)) + + # Handle reasoning content first (should come before main response) + if "message" in response and "thinking" in response["message"]: + thinking_content = response["message"]["thinking"] + if thinking_content: + content.append( + ReasoningContentBlock( + type="reasoning", + reasoning=thinking_content, + ) + ) + + # Handle tool calls + if "message" in response and "tool_calls" in response["message"]: + tool_calls = response["message"]["tool_calls"] + content.extend( + [ + ToolCall( + type="tool_call", + id=tool_call.get("id", str(uuid4())), + name=tool_call["function"]["name"], + args=tool_call["function"]["arguments"], + ) + for tool_call in tool_calls + ] + ) + + # Build response metadata + response_metadata = ResponseMetadata() + if "model" in response: + response_metadata["model_name"] = response["model"] + + # Cast to dict[str, Any] to allow provider-specific fields + # ResponseMetadata TypedDict only defines standard fields, but mypy doesn't + # understand that total=False allows arbitrary additional keys at runtime + metadata_as_dict = cast(dict[str, Any], response_metadata) + if "created_at" in response: + metadata_as_dict["created_at"] = response["created_at"] + if "done" in response: + metadata_as_dict["done"] = response["done"] + if "done_reason" in response: + metadata_as_dict["done_reason"] = response["done_reason"] + if "total_duration" in response: + metadata_as_dict["total_duration"] = response["total_duration"] + if "load_duration" in response: + metadata_as_dict["load_duration"] = response["load_duration"] + if "prompt_eval_count" in response: + metadata_as_dict["prompt_eval_count"] = response["prompt_eval_count"] + if "prompt_eval_duration" in response: + metadata_as_dict["prompt_eval_duration"] = response["prompt_eval_duration"] + if "eval_count" in response: + metadata_as_dict["eval_count"] = response["eval_count"] + if "eval_duration" in response: + metadata_as_dict["eval_duration"] = response["eval_duration"] + if "context" in response: + metadata_as_dict["context"] = response["context"] + + return AIMessage( + content=content, + response_metadata=response_metadata, + usage_metadata=_get_usage_metadata_from_response(response), + ) + + +def _convert_chunk_to_v1(chunk: dict[str, Any]) -> AIMessageChunk: + """Convert Ollama streaming chunk to AIMessageChunk.""" + content: list[types.ContentBlock] = [] + tool_call_chunks: list[ToolCallChunk] = [] + + # Handle reasoning content first in chunks + if "message" in chunk and "thinking" in chunk["message"]: + thinking_content = chunk["message"]["thinking"] + if thinking_content: + content.append( + ReasoningContentBlock( + type="reasoning", + reasoning=thinking_content, + ) + ) + + # Handle streaming text content + if "message" in chunk and "content" in chunk["message"]: + text_content = chunk["message"]["content"] + if text_content: + content.append(TextContentBlock(type="text", text=text_content)) + + # Handle streaming tool calls + if "message" in chunk and "tool_calls" in chunk["message"]: + tool_calls = chunk["message"]["tool_calls"] + for i, tool_call in enumerate(tool_calls): + tool_call_id = tool_call.get("id", f"lc_{uuid4()}") + tool_name = tool_call.get("function", {}).get("name", "") + tool_args = tool_call.get("function", {}).get("arguments", {}) + + # Add to content blocks for final representation + content.append( + ToolCall( + type="tool_call", + id=tool_call_id, + name=tool_name, + args=tool_args, + ) + ) + + # Add to tool call chunks for streaming + tool_call_chunks.append( + ToolCallChunk( + type="tool_call_chunk", + id=tool_call_id, + name=tool_name, + args=tool_args, + index=i, + ) + ) + + # Build response metadata for final chunks + response_metadata = None + if chunk.get("done") is True: + response_metadata = ResponseMetadata() + if "model" in chunk: + response_metadata["model_name"] = chunk["model"] + if "created_at" in chunk: + response_metadata["created_at"] = chunk["created_at"] # type: ignore[typeddict-unknown-key] + if "done_reason" in chunk: + response_metadata["done_reason"] = chunk["done_reason"] # type: ignore[typeddict-unknown-key] + if "total_duration" in chunk: + response_metadata["total_duration"] = chunk["total_duration"] # type: ignore[typeddict-unknown-key] + if "load_duration" in chunk: + response_metadata["load_duration"] = chunk["load_duration"] # type: ignore[typeddict-unknown-key] + if "prompt_eval_count" in chunk: + response_metadata["prompt_eval_count"] = chunk["prompt_eval_count"] # type: ignore[typeddict-unknown-key] + if "prompt_eval_duration" in chunk: + response_metadata["prompt_eval_duration"] = chunk["prompt_eval_duration"] # type: ignore[typeddict-unknown-key] + if "eval_count" in chunk: + response_metadata["eval_count"] = chunk["eval_count"] # type: ignore[typeddict-unknown-key] + if "eval_duration" in chunk: + response_metadata["eval_duration"] = chunk["eval_duration"] # type: ignore[typeddict-unknown-key] + if "context" in chunk: + response_metadata["context"] = chunk["context"] # type: ignore[typeddict-unknown-key] + + usage_metadata = None + if chunk.get("done") is True: + usage_metadata = _get_usage_metadata_from_response(chunk) + + return AIMessageChunk( + content=content, + response_metadata=response_metadata or ResponseMetadata(), + usage_metadata=usage_metadata, + tool_call_chunks=tool_call_chunks, + ) diff --git a/libs/partners/ollama/langchain_ollama/_utils.py b/libs/partners/ollama/langchain_ollama/_utils.py index f3cd6fe9a4d..8d08ed87a41 100644 --- a/libs/partners/ollama/langchain_ollama/_utils.py +++ b/libs/partners/ollama/langchain_ollama/_utils.py @@ -1,11 +1,11 @@ -"""Utility functions for validating Ollama models.""" +"""Utility function to validate Ollama models.""" from httpx import ConnectError from ollama import Client, ResponseError def validate_model(client: Client, model_name: str) -> None: - """Validate that a model exists in the Ollama instance. + """Validate that a model exists in the local Ollama instance. Args: client: The Ollama client. @@ -29,7 +29,10 @@ def validate_model(client: Client, model_name: str) -> None: ) raise ValueError(msg) except ConnectError as e: - msg = "Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download" # noqa: E501 + msg = ( + "Failed to connect to Ollama. Please check that Ollama is downloaded, " + "running and accessible. https://ollama.com/download" + ) raise ValueError(msg) from e except ResponseError as e: msg = ( diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index ae836ed5d08..0e9625890a8 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -87,8 +87,8 @@ def _parse_json_string( ) -> Any: """Attempt to parse a JSON string for tool calling. - It first tries to use the standard json.loads. If that fails, it falls - back to ast.literal_eval to safely parse Python literals, which is more + It first tries to use the standard ``json.loads``. If that fails, it falls + back to ``ast.literal_eval`` to safely parse Python literals, which is more robust against models using single quotes or containing apostrophes. Args: @@ -100,7 +100,8 @@ def _parse_json_string( The parsed JSON string or Python literal. Raises: - OutputParserException: If the string is invalid and skip=False. + OutputParserException: If the string is invalid and ``skip=False``. + """ try: return json.loads(json_string) @@ -138,7 +139,9 @@ def _parse_arguments_from_tool_call( Band-aid fix for issue in Ollama with inconsistent tool call argument structure. Should be removed/changed if fixed upstream. + See https://github.com/ollama/ollama/issues/6155 + """ if "function" not in raw_tool_call: return None @@ -446,26 +449,26 @@ class ChatOllama(BaseChatModel): mirostat: Optional[int] = None """Enable Mirostat sampling for controlling perplexity. - (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)""" + (default: ``0``, ``0`` = disabled, ``1`` = Mirostat, ``2`` = Mirostat 2.0)""" mirostat_eta: Optional[float] = None """Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make - the algorithm more responsive. (Default: 0.1)""" + the algorithm more responsive. (Default: ``0.1``)""" mirostat_tau: Optional[float] = None """Controls the balance between coherence and diversity of the output. A lower value will result in more focused and - coherent text. (Default: 5.0)""" + coherent text. (Default: ``5.0``)""" num_ctx: Optional[int] = None """Sets the size of the context window used to generate the - next token. (Default: 2048) """ + next token. (Default: ``2048``) """ num_gpu: Optional[int] = None - """The number of GPUs to use. On macOS it defaults to 1 to - enable metal support, 0 to disable.""" + """The number of GPUs to use. On macOS it defaults to ``1`` to + enable metal support, ``0`` to disable.""" num_thread: Optional[int] = None """Sets the number of threads to use during computation. @@ -475,20 +478,20 @@ class ChatOllama(BaseChatModel): num_predict: Optional[int] = None """Maximum number of tokens to predict when generating text. - (Default: 128, -1 = infinite generation, -2 = fill context)""" + (Default: ``128``, ``-1`` = infinite generation, ``-2`` = fill context)""" repeat_last_n: Optional[int] = None """Sets how far back for the model to look back to prevent - repetition. (Default: 64, 0 = disabled, -1 = num_ctx)""" + repetition. (Default: ``64``, ``0`` = disabled, ``-1`` = ``num_ctx``)""" repeat_penalty: Optional[float] = None - """Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) - will penalize repetitions more strongly, while a lower value (e.g., 0.9) - will be more lenient. (Default: 1.1)""" + """Sets how strongly to penalize repetitions. A higher value (e.g., ``1.5``) + will penalize repetitions more strongly, while a lower value (e.g., ``0.9``) + will be more lenient. (Default: ``1.1``)""" temperature: Optional[float] = None """The temperature of the model. Increasing the temperature will - make the model answer more creatively. (Default: 0.8)""" + make the model answer more creatively. (Default: ``0.8``)""" seed: Optional[int] = None """Sets the random number seed to use for generation. Setting this @@ -500,21 +503,21 @@ class ChatOllama(BaseChatModel): tfs_z: Optional[float] = None """Tail free sampling is used to reduce the impact of less probable - tokens from the output. A higher value (e.g., 2.0) will reduce the - impact more, while a value of 1.0 disables this setting. (default: 1)""" + tokens from the output. A higher value (e.g., ``2.0``) will reduce the + impact more, while a value of ``1.0`` disables this setting. (default: ``1``)""" top_k: Optional[int] = None - """Reduces the probability of generating nonsense. A higher value (e.g. 100) - will give more diverse answers, while a lower value (e.g. 10) - will be more conservative. (Default: 40)""" + """Reduces the probability of generating nonsense. A higher value (e.g. ``100``) + will give more diverse answers, while a lower value (e.g. ``10``) + will be more conservative. (Default: ``40``)""" top_p: Optional[float] = None - """Works together with top-k. A higher value (e.g., 0.95) will lead - to more diverse text, while a lower value (e.g., 0.5) will - generate more focused and conservative text. (Default: 0.9)""" + """Works together with top-k. A higher value (e.g., ``0.95``) will lead + to more diverse text, while a lower value (e.g., ``0.5``) will + generate more focused and conservative text. (Default: ``0.9``)""" format: Optional[Union[Literal["", "json"], JsonSchemaValue]] = None - """Specify the format of the output (options: "json", JSON schema).""" + """Specify the format of the output (options: ``'json'``, JSON schema).""" keep_alive: Optional[Union[int, str]] = None """How long the model will stay loaded into memory.""" @@ -524,32 +527,35 @@ class ChatOllama(BaseChatModel): client_kwargs: Optional[dict] = {} """Additional kwargs to pass to the httpx clients. + These arguments are passed to both synchronous and async clients. - Use sync_client_kwargs and async_client_kwargs to pass different arguments + + Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different arguments to synchronous and asynchronous clients. + """ async_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before + """Additional kwargs to merge with ``client_kwargs`` before passing to the httpx AsyncClient. + `Full list of params. `__ + """ sync_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before + """Additional kwargs to merge with ``client_kwargs`` before passing to the httpx Client. + `Full list of params. `__ + """ _client: Client = PrivateAttr() - """ - The client to use for making requests. - """ + """The client to use for making requests.""" _async_client: AsyncClient = PrivateAttr() - """ - The async client to use for making requests. - """ + """The async client to use for making requests.""" def _chat_params( self, @@ -1064,14 +1070,14 @@ class ChatOllama(BaseChatModel): - ``'function_calling'``: Uses Ollama's tool-calling API - ``'json_mode'``: - Specifies ``format="json"``. Note that if using JSON mode then you + Specifies ``format='json'``. Note that if using JSON mode then you must include instructions for formatting the output into the desired schema into the model call. include_raw: If False then only the parsed structured output is returned. If an error occurs during model output parsing it will be raised. If True - then both the raw model response (a BaseMessage) and the parsed model + then both the raw model response (a ``BaseMessage``) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. @@ -1085,7 +1091,7 @@ class ChatOllama(BaseChatModel): If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage + - ``'raw'``: ``BaseMessage`` - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - ``'parsing_error'``: Optional[BaseException] @@ -1130,7 +1136,7 @@ class ChatOllama(BaseChatModel): # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' # ) - .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=True + .. dropdown:: Example: ``schema=Pydantic`` class, ``method='json_schema'``, ``include_raw=True`` .. code-block:: python @@ -1159,7 +1165,7 @@ class ChatOllama(BaseChatModel): # 'parsing_error': None # } - .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False + .. dropdown:: Example: ``schema=Pydantic`` class, ``method='function_calling'``, ``include_raw=False`` .. code-block:: python @@ -1223,7 +1229,7 @@ class ChatOllama(BaseChatModel): # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } - .. dropdown:: Example: schema=OpenAI function schema, method="function_calling", include_raw=False + .. dropdown:: Example: ``schema=OpenAI`` function schema, ``method='function_calling'``, ``include_raw=False`` .. code-block:: python @@ -1253,7 +1259,7 @@ class ChatOllama(BaseChatModel): # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } - .. dropdown:: Example: schema=Pydantic class, method="json_mode", include_raw=True + .. dropdown:: Example: ``schema=Pydantic`` class, ``method='json_mode'``, ``include_raw=True`` .. code-block:: diff --git a/libs/partners/ollama/langchain_ollama/embeddings.py b/libs/partners/ollama/langchain_ollama/embeddings.py index ac5619a3b06..5f6c96a2997 100644 --- a/libs/partners/ollama/langchain_ollama/embeddings.py +++ b/libs/partners/ollama/langchain_ollama/embeddings.py @@ -21,12 +21,12 @@ class OllamaEmbeddings(BaseModel, Embeddings): """Ollama embedding model integration. Set up a local Ollama instance: - Install the Ollama package and set up a local Ollama instance - using the instructions here: https://github.com/ollama/ollama . + `Install the Ollama package `__ and set up a + local Ollama instance. You will need to choose a model to serve. - You can view a list of available models via the model library (https://ollama.com/library). + You can view a list of available models via `the model library `__. To fetch a model from the Ollama model library use ``ollama pull ``. @@ -39,8 +39,8 @@ class OllamaEmbeddings(BaseModel, Embeddings): This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model. - * On Mac, the models will be downloaded to ~/.ollama/models - * On Linux (or WSL), the models will be stored at /usr/share/ollama/.ollama/models + * On Mac, the models will be downloaded to ``~/.ollama/models`` + * On Linux (or WSL), the models will be stored at ``/usr/share/ollama/.ollama/models`` You can specify the exact version of the model of interest as such ``ollama pull vicuna:13b-v1.5-16k-q4_0``. @@ -132,6 +132,7 @@ class OllamaEmbeddings(BaseModel, Embeddings): """Whether to validate the model exists in ollama locally on initialization. .. versionadded:: 0.3.4 + """ base_url: Optional[str] = None @@ -139,60 +140,62 @@ class OllamaEmbeddings(BaseModel, Embeddings): client_kwargs: Optional[dict] = {} """Additional kwargs to pass to the httpx clients. + These arguments are passed to both synchronous and async clients. - Use sync_client_kwargs and async_client_kwargs to pass different arguments + + Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different arguments to synchronous and asynchronous clients. + """ async_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before passing to the httpx + """Additional kwargs to merge with ``client_kwargs`` before passing to the httpx AsyncClient. For a full list of the params, see the `HTTPX documentation `__. + """ sync_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before passing to the HTTPX Client. + """Additional kwargs to merge with ``client_kwargs`` before + passing to the HTTPX Client. For a full list of the params, see the `HTTPX documentation `__. + """ _client: Optional[Client] = PrivateAttr(default=None) - """ - The client to use for making requests. - """ + """The client to use for making requests.""" _async_client: Optional[AsyncClient] = PrivateAttr(default=None) - """ - The async client to use for making requests. - """ + """The async client to use for making requests.""" mirostat: Optional[int] = None """Enable Mirostat sampling for controlling perplexity. - (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)""" + (default: ``0``, ``0`` = disabled, ``1`` = Mirostat, ``2`` = Mirostat 2.0)""" mirostat_eta: Optional[float] = None """Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make - the algorithm more responsive. (Default: 0.1)""" + the algorithm more responsive. (Default: ``0.1``)""" mirostat_tau: Optional[float] = None """Controls the balance between coherence and diversity of the output. A lower value will result in more focused and - coherent text. (Default: 5.0)""" + coherent text. (Default: ``5.0``)""" num_ctx: Optional[int] = None """Sets the size of the context window used to generate the - next token. (Default: 2048) """ + next token. (Default: ``2048``) """ num_gpu: Optional[int] = None - """The number of GPUs to use. On macOS it defaults to 1 to - enable metal support, 0 to disable.""" + """The number of GPUs to use. On macOS it defaults to ``1`` to + enable metal support, ``0`` to disable.""" keep_alive: Optional[int] = None - """controls how long the model will stay loaded into memory - following the request (default: 5m) + """Controls how long the model will stay loaded into memory + following the request (default: ``5m``) """ num_thread: Optional[int] = None @@ -203,34 +206,34 @@ class OllamaEmbeddings(BaseModel, Embeddings): repeat_last_n: Optional[int] = None """Sets how far back for the model to look back to prevent - repetition. (Default: 64, 0 = disabled, -1 = num_ctx)""" + repetition. (Default: ``64``, ``0`` = disabled, ``-1`` = ``num_ctx``)""" repeat_penalty: Optional[float] = None - """Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) - will penalize repetitions more strongly, while a lower value (e.g., 0.9) - will be more lenient. (Default: 1.1)""" + """Sets how strongly to penalize repetitions. A higher value (e.g., ``1.5``) + will penalize repetitions more strongly, while a lower value (e.g., ``0.9``) + will be more lenient. (Default: ``1.1``)""" temperature: Optional[float] = None """The temperature of the model. Increasing the temperature will - make the model answer more creatively. (Default: 0.8)""" + make the model answer more creatively. (Default: ``0.8``)""" stop: Optional[list[str]] = None """Sets the stop tokens to use.""" tfs_z: Optional[float] = None """Tail free sampling is used to reduce the impact of less probable - tokens from the output. A higher value (e.g., 2.0) will reduce the - impact more, while a value of 1.0 disables this setting. (default: 1)""" + tokens from the output. A higher value (e.g., ``2.0``) will reduce the + impact more, while a value of ``1.0`` disables this setting. (default: ``1``)""" top_k: Optional[int] = None - """Reduces the probability of generating nonsense. A higher value (e.g. 100) - will give more diverse answers, while a lower value (e.g. 10) - will be more conservative. (Default: 40)""" + """Reduces the probability of generating nonsense. A higher value (e.g. ``100``) + will give more diverse answers, while a lower value (e.g. ``10``) + will be more conservative. (Default: ``40``)""" top_p: Optional[float] = None - """Works together with top-k. A higher value (e.g., 0.95) will lead - to more diverse text, while a lower value (e.g., 0.5) will - generate more focused and conservative text. (Default: 0.9)""" + """Works together with top-k. A higher value (e.g., ``0.95``) will lead + to more diverse text, while a lower value (e.g., ``0.5``) will + generate more focused and conservative text. (Default: ``0.9``)""" model_config = ConfigDict( extra="forbid", @@ -257,7 +260,7 @@ class OllamaEmbeddings(BaseModel, Embeddings): @model_validator(mode="after") def _set_clients(self) -> Self: - """Set clients to use for ollama.""" + """Set clients to use for Ollama.""" client_kwargs = self.client_kwargs or {} sync_client_kwargs = client_kwargs diff --git a/libs/partners/ollama/langchain_ollama/llms.py b/libs/partners/ollama/langchain_ollama/llms.py index b433606340d..5ca2c961a2d 100644 --- a/libs/partners/ollama/langchain_ollama/llms.py +++ b/libs/partners/ollama/langchain_ollama/llms.py @@ -61,26 +61,26 @@ class OllamaLLM(BaseLLM): mirostat: Optional[int] = None """Enable Mirostat sampling for controlling perplexity. - (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)""" + (default: ``0``, ``0`` = disabled, ``1`` = Mirostat, ``2`` = Mirostat 2.0)""" mirostat_eta: Optional[float] = None """Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make - the algorithm more responsive. (Default: 0.1)""" + the algorithm more responsive. (Default: ``0.1``)""" mirostat_tau: Optional[float] = None """Controls the balance between coherence and diversity of the output. A lower value will result in more focused and - coherent text. (Default: 5.0)""" + coherent text. (Default: ``5.0``)""" num_ctx: Optional[int] = None """Sets the size of the context window used to generate the - next token. (Default: 2048)""" + next token. (Default: ``2048``)""" num_gpu: Optional[int] = None - """The number of GPUs to use. On macOS it defaults to 1 to - enable metal support, 0 to disable.""" + """The number of GPUs to use. On macOS it defaults to ``1`` to + enable metal support, ``0`` to disable.""" num_thread: Optional[int] = None """Sets the number of threads to use during computation. @@ -90,20 +90,20 @@ class OllamaLLM(BaseLLM): num_predict: Optional[int] = None """Maximum number of tokens to predict when generating text. - (Default: 128, -1 = infinite generation, -2 = fill context)""" + (Default: ``128``, ``-1`` = infinite generation, ``-2`` = fill context)""" repeat_last_n: Optional[int] = None """Sets how far back for the model to look back to prevent - repetition. (Default: 64, 0 = disabled, -1 = num_ctx)""" + repetition. (Default: ``64``, ``0`` = disabled, ``-1`` = ``num_ctx``)""" repeat_penalty: Optional[float] = None - """Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) - will penalize repetitions more strongly, while a lower value (e.g., 0.9) - will be more lenient. (Default: 1.1)""" + """Sets how strongly to penalize repetitions. A higher value (e.g., ``1.5``) + will penalize repetitions more strongly, while a lower value (e.g., ``0.9``) + will be more lenient. (Default: ``1.1``)""" temperature: Optional[float] = None """The temperature of the model. Increasing the temperature will - make the model answer more creatively. (Default: 0.8)""" + make the model answer more creatively. (Default: ``0.8``)""" seed: Optional[int] = None """Sets the random number seed to use for generation. Setting this @@ -115,21 +115,21 @@ class OllamaLLM(BaseLLM): tfs_z: Optional[float] = None """Tail free sampling is used to reduce the impact of less probable - tokens from the output. A higher value (e.g., 2.0) will reduce the - impact more, while a value of 1.0 disables this setting. (default: 1)""" + tokens from the output. A higher value (e.g., ``2.0``) will reduce the + impact more, while a value of 1.0 disables this setting. (default: ``1``)""" top_k: Optional[int] = None - """Reduces the probability of generating nonsense. A higher value (e.g. 100) - will give more diverse answers, while a lower value (e.g. 10) - will be more conservative. (Default: 40)""" + """Reduces the probability of generating nonsense. A higher value (e.g. ``100``) + will give more diverse answers, while a lower value (e.g. ``10``) + will be more conservative. (Default: ``40``)""" top_p: Optional[float] = None - """Works together with top-k. A higher value (e.g., 0.95) will lead - to more diverse text, while a lower value (e.g., 0.5) will - generate more focused and conservative text. (Default: 0.9)""" + """Works together with top-k. A higher value (e.g., ``0.95``) will lead + to more diverse text, while a lower value (e.g., ``0.5``) will + generate more focused and conservative text. (Default: ``0.9``)""" format: Literal["", "json"] = "" - """Specify the format of the output (options: json)""" + """Specify the format of the output (options: ``'json'``)""" keep_alive: Optional[Union[int, str]] = None """How long the model will stay loaded into memory.""" @@ -139,33 +139,35 @@ class OllamaLLM(BaseLLM): client_kwargs: Optional[dict] = {} """Additional kwargs to pass to the httpx clients. + These arguments are passed to both synchronous and async clients. - Use sync_client_kwargs and async_client_kwargs to pass different arguments + + Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different arguments to synchronous and asynchronous clients. + """ async_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before passing to the HTTPX + """Additional kwargs to merge with ``client_kwargs`` before passing to the HTTPX AsyncClient. For a full list of the params, see the `HTTPX documentation `__. + """ sync_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before passing to the HTTPX Client. + """Additional kwargs to merge with ``client_kwargs`` before + passing to the HTTPX Client. For a full list of the params, see the `HTTPX documentation `__. + """ _client: Optional[Client] = PrivateAttr(default=None) - """ - The client to use for making requests. - """ + """The client to use for making requests.""" _async_client: Optional[AsyncClient] = PrivateAttr(default=None) - """ - The async client to use for making requests. - """ + """The async client to use for making requests.""" def _generate_params( self, diff --git a/libs/partners/ollama/langchain_ollama/v1/__init__.py b/libs/partners/ollama/langchain_ollama/v1/__init__.py new file mode 100644 index 00000000000..b4440f2d095 --- /dev/null +++ b/libs/partners/ollama/langchain_ollama/v1/__init__.py @@ -0,0 +1,5 @@ +from langchain_ollama.v1.chat_models import ( + ChatOllama, +) + +__all__ = ["ChatOllama"] diff --git a/libs/partners/ollama/langchain_ollama/v1/chat_models/__init__.py b/libs/partners/ollama/langchain_ollama/v1/chat_models/__init__.py new file mode 100644 index 00000000000..03906ab2608 --- /dev/null +++ b/libs/partners/ollama/langchain_ollama/v1/chat_models/__init__.py @@ -0,0 +1,17 @@ +from ollama import AsyncClient, Client + +from langchain_ollama._utils import validate_model +from langchain_ollama.v1.chat_models.base import ( + ChatOllama, + _parse_arguments_from_tool_call, + _parse_json_string, +) + +__all__ = [ + "AsyncClient", + "ChatOllama", + "Client", + "_parse_arguments_from_tool_call", + "_parse_json_string", + "validate_model", +] diff --git a/libs/partners/ollama/langchain_ollama/v1/chat_models/base.py b/libs/partners/ollama/langchain_ollama/v1/chat_models/base.py new file mode 100644 index 00000000000..1383970fd16 --- /dev/null +++ b/libs/partners/ollama/langchain_ollama/v1/chat_models/base.py @@ -0,0 +1,941 @@ +"""v1 Ollama implementation. + +Provides native support for v1 messages with standard content blocks. + +.. versionadded:: 1.0.0 + +""" + +from __future__ import annotations + +import ast +import json +import logging +from collections.abc import AsyncIterator, Iterator, Sequence +from operator import itemgetter +from typing import Any, Callable, Literal, Optional, Union, cast + +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import LanguageModelInput +from langchain_core.language_models.chat_models import LangSmithParams +from langchain_core.output_parsers import ( + JsonOutputKeyToolsParser, + JsonOutputParser, + PydanticOutputParser, + PydanticToolsParser, +) +from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough +from langchain_core.tools import BaseTool +from langchain_core.utils.function_calling import ( + convert_to_json_schema, + convert_to_openai_tool, +) +from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass +from langchain_core.v1.chat_models import ( + BaseChatModel, + agenerate_from_stream, + generate_from_stream, +) +from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1 +from ollama import AsyncClient, Client, Options +from pydantic import BaseModel, PrivateAttr, model_validator +from pydantic.json_schema import JsonSchemaValue +from pydantic.v1 import BaseModel as BaseModelV1 +from typing_extensions import Self, is_typeddict + +from langchain_ollama._compat import ( + _convert_chunk_to_v1, + _convert_from_v1_to_ollama_format, + _convert_to_v1_from_ollama_format, +) +from langchain_ollama._utils import validate_model + +log = logging.getLogger(__name__) + + +def _parse_json_string( + json_string: str, + *, + raw_tool_call: dict[str, Any], + skip: bool, +) -> Any: + """Attempt to parse a JSON string for tool calling. + + It first tries to use the standard ``json.loads``. If that fails, it falls + back to ``ast.literal_eval`` to safely parse Python literals, which is more + robust against models using single quotes or containing apostrophes. + + Args: + json_string: JSON string to parse. + raw_tool_call: Raw tool call to include in error message. + skip: Whether to ignore parsing errors and return the value anyways. + + Returns: + The parsed JSON string or Python literal. + + Raises: + OutputParserException: If the string is invalid and ``skip=False``. + + """ + try: + return json.loads(json_string) + except json.JSONDecodeError: + try: + # Use ast.literal_eval to safely parse Python-style dicts + # (e.g. with single quotes) + return ast.literal_eval(json_string) + except (SyntaxError, ValueError) as e: + # If both fail, and we're not skipping, raise an informative error. + if skip: + return json_string + msg = ( + f"Function {raw_tool_call['function']['name']} arguments:\n\n" + f"{raw_tool_call['function']['arguments']}" + "\n\nare not valid JSON or a Python literal. " + f"Received error: {e}" + ) + raise OutputParserException(msg) from e + except TypeError as e: + if skip: + return json_string + msg = ( + f"Function {raw_tool_call['function']['name']} arguments:\n\n" + f"{raw_tool_call['function']['arguments']}\n\nare not a string or a " + f"dictionary. Received TypeError {e}" + ) + raise OutputParserException(msg) from e + + +def _parse_arguments_from_tool_call( + raw_tool_call: dict[str, Any], +) -> Optional[dict[str, Any]]: + """Parse arguments by trying to parse any shallowly nested string-encoded JSON. + + Band-aid fix for issue in Ollama with inconsistent tool call argument structure. + Should be removed/changed if fixed upstream. + + `See #6155 `__. + + """ + if "function" not in raw_tool_call: + return None + arguments = raw_tool_call["function"]["arguments"] + parsed_arguments: dict = {} + if isinstance(arguments, dict): + for key, value in arguments.items(): + if isinstance(value, str): + parsed_value = _parse_json_string( + value, skip=True, raw_tool_call=raw_tool_call + ) + if isinstance(parsed_value, (dict, list)): + parsed_arguments[key] = parsed_value + else: + parsed_arguments[key] = value + else: + parsed_arguments[key] = value + else: + parsed_arguments = _parse_json_string( + arguments, skip=False, raw_tool_call=raw_tool_call + ) + return parsed_arguments + + +def _is_pydantic_class(obj: Any) -> bool: + return isinstance(obj, type) and is_basemodel_subclass(obj) + + +class ChatOllama(BaseChatModel): + r"""Ollama chat model with v1 message/content block support. + + This implementation provides native support for structured content blocks. + + .. dropdown:: Setup + :open: + + Install ``langchain-ollama`` and download any models you want to use from ollama. + + .. code-block:: bash + + ollama pull mistral:v0.3 + pip install -U langchain-ollama + + Key init args — completion params: + model: str + Name of Ollama model to use. + reasoning: Optional[bool] + Controls the reasoning/thinking mode for + `supported models `__. + + - ``True``: Enables reasoning mode. The model's reasoning process will be + captured and returned as a ``ReasoningContentBlock`` in the response + message content. The main response content will not include the reasoning tags. + - ``False``: Disables reasoning mode. The model will not perform any reasoning, + and the response will not include any reasoning content. + - ``None`` (Default): The model will use its default reasoning behavior. Note + however, if the model's default behavior *is* to perform reasoning, think tags + (```` and ````) will be present within the main response ``TextContentBlock``s + unless you set ``reasoning`` to ``True``. + temperature: float + Sampling temperature. Ranges from ``0.0`` to ``1.0``. + num_predict: Optional[int] + Max number of tokens to generate. + + See full list of supported init args and their descriptions in the params section. + + Instantiate: + .. code-block:: python + + from langchain_ollama.v1 import ChatOllama + + llm = ChatOllama( + model = "llama3", + temperature = 0.8, + num_predict = 256, + # other params ... + ) + + Invoke: + .. code-block:: python + + from langchain_core.v1.messages import HumanMessage + from langchain_core.messages.content_blocks import TextContentBlock + + messages = [ + HumanMessage("Hello!") + ] + llm.invoke(messages) + + .. code-block:: python + + AIMessage(content=[{'type': 'text', 'text': 'Hello! How can I help you today?'}], ...) + + Stream: + .. code-block:: python + + from langchain_core.v1.messages import HumanMessage + from langchain_core.messages.content_blocks import TextContentBlock + + messages = [ + HumanMessage(Return the words Hello World!") + ] + for chunk in llm.stream(messages): + print(chunk.content, end="") + + .. code-block:: python + + AIMessageChunk(content=[{'type': 'text', 'text': 'Hello'}], ...) + AIMessageChunk(content=[{'type': 'text', 'text': ' World'}], ...) + AIMessageChunk(content=[{'type': 'text', 'text': '!'}], ...) + + Multi-modal input: + .. code-block:: python + + from langchain_core.messages.content_blocks import ImageContentBlock + + response = llm.invoke([ + HumanMessage(content=[ + TextContentBlock(type="text", text="Describe this image:"), + ImageContentBlock( + type="image", + base64="base64_encoded_image", + ) + ]) + ]) + + Tool Calling: + .. code-block:: python + + from pydantic import BaseModel, Field + + class Multiply(BaseModel): + a: int = Field(..., description="First integer") + b: int = Field(..., description="Second integer") + + llm_with_tools = llm.bind_tools([Multiply]) + ans = llm_with_tools.invoke([ + HumanMessage("What is 45*67") + ]) + ans.tool_calls + + .. code-block:: python + + [ + { + 'name': 'Multiply', + 'args': {'a': 45, 'b': 67}, + 'id': '420c3f3b-df10-4188-945f-eb3abdb40622', + 'type': 'tool_call' + } + ] + + """ # noqa: E501 + + model: str + """Model name to use.""" + + streaming: bool = False + """Whether to use streaming for invocation. + + If True, invoke will use streaming internally. + + """ + + reasoning: Optional[bool] = None + """Controls the reasoning/thinking mode for supported models. + + - ``True``: Enables reasoning mode. The model's reasoning process will be + captured and returned as a ``ReasoningContentBlock`` in the response + message content. The main response content will not include the reasoning tags. + - ``False``: Disables reasoning mode. The model will not perform any reasoning, + and the response will not include any reasoning content. + - ``None`` (Default): The model will use its default reasoning behavior. Note + however, if the model's default behavior *is* to perform reasoning, think tags + (```` and ````) will be present within the main response content + unless you set ``reasoning`` to ``True``. + + """ + + validate_model_on_init: bool = False + """Whether to validate the model exists in Ollama locally on initialization.""" + + # Ollama-specific parameters + mirostat: Optional[int] = None + """Enable Mirostat sampling for controlling perplexity. + + (Default: ``0``, ``0`` = disabled, ``1`` = Mirostat, ``2`` = Mirostat 2.0) + + """ + + mirostat_eta: Optional[float] = None + """Influences how quickly the algorithm responds to feedback from generated text. + + A lower learning rate will result in slower adjustments, while a higher learning + rate will make the algorithm more responsive. + + (Default: ``0.1``) + + """ + + mirostat_tau: Optional[float] = None + """Controls the balance between coherence and diversity of the output. + + A lower value will result in more focused and coherent text. + + (Default: ``5.0``) + + """ + + num_ctx: Optional[int] = None + """Sets the size of the context window used to generate the next token. + + (Default: ``2048``) + + """ + + num_gpu: Optional[int] = None + """The number of GPUs to use. + + On macOS it defaults to ``1`` to enable metal support, ``0`` to disable. + + """ + + num_thread: Optional[int] = None + """Sets the number of threads to use during computation. + + By default, Ollama will detect this for optimal performance. It is recommended to + set this value to the number of physical CPU cores your system has (as opposed to + the logical number of cores). + + """ + + num_predict: Optional[int] = None + """Maximum number of tokens to predict when generating text. + + (Default: ``128``, ``-1`` = infinite generation, ``-2`` = fill context) + + """ + + repeat_last_n: Optional[int] = None + """Sets how far back for the model to look back to prevent repetition. + + (Default: ``64``, ``0`` = disabled, ``-1`` = ``num_ctx``) + + """ + + repeat_penalty: Optional[float] = None + """Sets how strongly to penalize repetitions. + + A higher value (e.g., ``1.5``) will penalize repetitions more strongly, while a + lower value (e.g., ``0.9``) will be more lenient. + + (Default: ``1.1``) + + """ + + temperature: Optional[float] = None + """The temperature of the model. + + Increasing the temperature will make the model answer more creatively. + + (Default: ``0.8``)""" + + seed: Optional[int] = None + """Sets the random number seed to use for generation. + + Setting this to a specific number will make the model generate the same text for the + same prompt. + + """ + + tfs_z: Optional[float] = None + """Tail free sampling is used to reduce the impact of less probable tokens from the output. + + A higher value (e.g., ``2.0``) will reduce the impact more, while a value of ``1.0`` disables this setting. + + (Default: ``1``) + + """ # noqa: E501 + + top_k: Optional[int] = None + """Reduces the probability of generating nonsense. + + A higher value (e.g. ``100``) will give more diverse answers, while a lower value + (e.g. ``10``) will be more conservative. + + (Default: ``40``) + + """ + + top_p: Optional[float] = None + """Works together with top-k. + + A higher value (e.g., ``0.95``) will lead to more diverse text, while a lower value + (e.g., ``0.5``) will generate more focused and conservative text. + + (Default: ``0.9``) + + """ + + format: Optional[Union[Literal["", "json"], JsonSchemaValue]] = None + """Specify the format of the output (Options: ``'json'``, JSON schema).""" + + keep_alive: Optional[Union[int, str]] = None + """How long the model will stay loaded into memory.""" + + base_url: Optional[str] = None + """Base url the model is hosted under.""" + + client_kwargs: Optional[dict] = {} + """Additional kwargs to pass to the httpx clients. + + These arguments are passed to both synchronous and async clients. + + Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different arguments + to synchronous and asynchronous clients. + + """ + + async_client_kwargs: Optional[dict] = {} + """Additional kwargs to merge with ``client_kwargs`` before + passing to the httpx AsyncClient. + + `Full list of params. `__ + + """ + + sync_client_kwargs: Optional[dict] = {} + """Additional kwargs to merge with ``client_kwargs`` before + passing to the httpx Client. + + `Full list of params. `__ + + """ + + _client: Client = PrivateAttr() + """The client to use for making requests.""" + + _async_client: AsyncClient = PrivateAttr() + """The async client to use for making requests.""" + + @model_validator(mode="after") + def _set_clients(self) -> Self: + """Set clients to use for ollama.""" + client_kwargs = self.client_kwargs or {} + + sync_client_kwargs = client_kwargs + if self.sync_client_kwargs: + sync_client_kwargs = {**sync_client_kwargs, **self.sync_client_kwargs} + + async_client_kwargs = client_kwargs + if self.async_client_kwargs: + async_client_kwargs = {**async_client_kwargs, **self.async_client_kwargs} + + self._client = Client(host=self.base_url, **sync_client_kwargs) + self._async_client = AsyncClient(host=self.base_url, **async_client_kwargs) + if self.validate_model_on_init: + validate_model(self._client, self.model) + return self + + def _get_ls_params(self, **kwargs: Any) -> LangSmithParams: + """Get standard params for tracing.""" + params = self._get_invocation_params(**kwargs) + ls_params = LangSmithParams( + ls_provider="ollama", + ls_model_name=self.model, + ls_model_type="chat", + ls_temperature=params.get("temperature", self.temperature), + ) + if ls_stop := params.get("stop", None): + ls_params["ls_stop"] = ls_stop + return ls_params + + def _get_invocation_params(self, **kwargs: Any) -> dict[str, Any]: + """Get parameters for model invocation.""" + params = { + "model": self.model, + "mirostat": self.mirostat, + "mirostat_eta": self.mirostat_eta, + "mirostat_tau": self.mirostat_tau, + "num_ctx": self.num_ctx, + "num_gpu": self.num_gpu, + "num_thread": self.num_thread, + "num_predict": self.num_predict, + "repeat_last_n": self.repeat_last_n, + "repeat_penalty": self.repeat_penalty, + "temperature": self.temperature, + "seed": self.seed, + "tfs_z": self.tfs_z, + "top_k": self.top_k, + "top_p": self.top_p, + "format": self.format, + "keep_alive": self.keep_alive, + } + params.update(kwargs) + return params + + @property + def _llm_type(self) -> str: + """Return type of chat model.""" + return "chat-ollama-v1" + + def _chat_params( + self, + messages: list[MessageV1], + *, + stream: bool = True, + **kwargs: Any, + ) -> dict[str, Any]: + """Build parameters for Ollama chat API.""" + # Convert v1 messages to Ollama format + ollama_messages = [_convert_from_v1_to_ollama_format(msg) for msg in messages] + + options_dict = kwargs.pop( + "options", + { + "mirostat": self.mirostat, + "mirostat_eta": self.mirostat_eta, + "mirostat_tau": self.mirostat_tau, + "num_ctx": self.num_ctx, + "num_gpu": self.num_gpu, + "num_thread": self.num_thread, + "num_predict": self.num_predict, + "repeat_last_n": self.repeat_last_n, + "repeat_penalty": self.repeat_penalty, + "temperature": self.temperature, + "seed": self.seed, + "tfs_z": self.tfs_z, + "top_k": self.top_k, + "top_p": self.top_p, + }, + ) + + params = { + "messages": ollama_messages, + "stream": kwargs.pop("stream", stream), + "model": kwargs.pop("model", self.model), + "think": kwargs.pop("reasoning", self.reasoning), + "format": kwargs.pop("format", self.format), + "options": Options(**options_dict), + "keep_alive": kwargs.pop("keep_alive", self.keep_alive), + **kwargs, + } + + if tools := kwargs.get("tools"): + params["tools"] = tools + + return params + + def _generate_stream( + self, + messages: list[MessageV1], + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[AIMessageChunk]: + """Generate streaming response with native v1 chunks.""" + chat_params = self._chat_params(messages, **kwargs) + + if chat_params["stream"]: + for part in self._client.chat(**chat_params): + if not isinstance(part, str): + # Skip empty load responses + if ( + part.get("done") is True + and part.get("done_reason") == "load" + and not part.get("message", {}).get("content", "").strip() + ): + log.warning( + "Ollama returned empty response with `done_reason='load'`. " + "Skipping this response." + ) + continue + + chunk = _convert_chunk_to_v1(part) + + if run_manager: + text_content = "".join( + str(block.get("text", "")) + for block in chunk.content + if block.get("type") == "text" + ) + run_manager.on_llm_new_token( + text_content, + chunk=chunk, + ) + yield chunk + else: + # Non-streaming case + response = self._client.chat(**chat_params) + ai_message = _convert_to_v1_from_ollama_format(response) + chunk = AIMessageChunk( + content=ai_message.content, + response_metadata=ai_message.response_metadata, + usage_metadata=ai_message.usage_metadata, + ) + yield chunk + + async def _agenerate_stream( + self, + messages: list[MessageV1], + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[AIMessageChunk]: + """Generate async streaming response with native v1 chunks.""" + chat_params = self._chat_params(messages, **kwargs) + + if chat_params["stream"]: + async for part in await self._async_client.chat(**chat_params): + if not isinstance(part, str): + # Skip empty load responses + if ( + part.get("done") is True + and part.get("done_reason") == "load" + and not part.get("message", {}).get("content", "").strip() + ): + log.warning( + "Ollama returned empty response with `done_reason='load'`. " + "Skipping this response." + ) + continue + + chunk = _convert_chunk_to_v1(part) + + if run_manager: + text_content = "".join( + str(block.get("text", "")) + for block in chunk.content + if block.get("type") == "text" + ) + await run_manager.on_llm_new_token( + text_content, + chunk=chunk, + ) + yield chunk + else: + # Non-streaming case + response = await self._async_client.chat(**chat_params) + ai_message = _convert_to_v1_from_ollama_format(response) + chunk = AIMessageChunk( + content=ai_message.content, + response_metadata=ai_message.response_metadata, + usage_metadata=ai_message.usage_metadata, + ) + yield chunk + + def _invoke( + self, + messages: list[MessageV1], + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AIMessage: + """Invoke the model with v1 messages and return a complete response. + + Args: + messages: List of v1 format messages. + run_manager: Callback manager for the run. + kwargs: Additional parameters. + + Returns: + Complete AI message response. + + """ + if self.streaming: + stream_iter = self._stream(messages, run_manager=run_manager, **kwargs) + return generate_from_stream(stream_iter) + + chat_params = self._chat_params(messages, stream=False, **kwargs) + response = self._client.chat(**chat_params) + return _convert_to_v1_from_ollama_format(response) + + async def _ainvoke( + self, + messages: list[MessageV1], + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AIMessage: + """Async invoke the model with v1 messages and return a complete response. + + Args: + messages: List of v1 format messages. + run_manager: Async callback manager for the run. + kwargs: Additional parameters. + + Returns: + Complete AI message response. + + """ + if self.streaming: + stream_iter = self._astream(messages, run_manager=run_manager, **kwargs) + return await agenerate_from_stream(stream_iter) + + # Non-streaming case: direct API call + chat_params = self._chat_params(messages, stream=False, **kwargs) + response = await self._async_client.chat(**chat_params) + return _convert_to_v1_from_ollama_format(response) + + def _stream( + self, + messages: list[MessageV1], + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[AIMessageChunk]: + """Stream response chunks using the v1 format. + + Args: + messages: List of v1 format messages. + run_manager: Callback manager for the run. + kwargs: Additional parameters. + + Yields: + AI message chunks in v1 format. + + """ + yield from self._generate_stream(messages, run_manager=run_manager, **kwargs) + + async def _astream( + self, + messages: list[MessageV1], + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[AIMessageChunk]: + """Async stream response chunks using the v1 format. + + Args: + messages: List of v1 format messages. + run_manager: Async callback manager for the run. + kwargs: Additional parameters. + + Yields: + AI message chunks in v1 format. + + """ + async for chunk in self._agenerate_stream( + messages, run_manager=run_manager, **kwargs + ): + yield chunk + + def bind_tools( + self, + tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]], + *, + tool_choice: Optional[Union[dict, str, bool]] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, AIMessage]: + """Bind tool-like objects to this chat model. + + Args: + tools: A list of tool definitions to bind to this chat model. + tool_choice: Tool choice parameter (currently ignored by Ollama). + kwargs: Additional parameters passed to ``bind()``. + + """ + formatted_tools = [convert_to_openai_tool(tool) for tool in tools] + return super().bind(tools=formatted_tools, **kwargs) + + def with_structured_output( + self, + schema: Union[dict, type], + *, + method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema", + include_raw: bool = False, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]: + """Model wrapper that returns outputs formatted to match the given schema. + + Args: + schema: The output schema. Can be passed in as: + + - a Pydantic class, + - a JSON schema + - a TypedDict class + - an OpenAI function/tool schema. + + If ``schema`` is a Pydantic class then the model output will be a + Pydantic instance of that class, and the model-generated fields will be + validated by the Pydantic class. Otherwise the model output will be a + dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool` + for more on how to properly specify types and descriptions of + schema fields when specifying a Pydantic or TypedDict class. + + method: The method for steering model generation, one of: + + - ``'json_schema'``: + Uses Ollama's `structured output API `__ + - ``'function_calling'``: + Uses Ollama's tool-calling API + - ``'json_mode'``: + Specifies ``format='json'``. Note that if using JSON mode then you + must include instructions for formatting the output into the + desired schema into the model call. + + include_raw: + If False then only the parsed structured output is returned. If + an error occurs during model output parsing it will be raised. If True + then both the raw model response (a ``BaseMessage``) and the parsed model + response will be returned. If an error occurs during output parsing it + will be caught and returned as well. The final output is always a dict + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + + kwargs: Additional keyword args aren't supported. + + Returns: + A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. + + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + + If ``include_raw`` is True, then Runnable outputs a dict with keys: + + - ``'raw'``: ``BaseMessage`` + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] + + .. versionchanged:: 0.2.2 + + Added support for structured output API via ``format`` parameter. + + .. versionchanged:: 0.3.0 + + Updated default ``method`` to ``'json_schema'``. + + """ # noqa: E501 + _ = kwargs.pop("strict", None) + if kwargs: + msg = f"Received unsupported arguments {kwargs}" + raise ValueError(msg) + is_pydantic_schema = _is_pydantic_class(schema) + if method == "function_calling": + if schema is None: + msg = ( + "schema must be specified when method is not 'json_mode'. " + "Received None." + ) + raise ValueError(msg) + formatted_tool = convert_to_openai_tool(schema) + tool_name = formatted_tool["function"]["name"] + llm = self.bind_tools( + [schema], + tool_choice=tool_name, + ls_structured_output_format={ + "kwargs": {"method": method}, + "schema": formatted_tool, + }, + ) + if is_pydantic_schema: + output_parser: Runnable = PydanticToolsParser( + tools=[schema], # type: ignore[list-item] + first_tool_only=True, + ) + else: + output_parser = JsonOutputKeyToolsParser( + key_name=tool_name, first_tool_only=True + ) + elif method == "json_mode": + llm = self.bind( + format="json", + ls_structured_output_format={ + "kwargs": {"method": method}, + "schema": schema, + }, + ) + output_parser = ( + PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type] + if is_pydantic_schema + else JsonOutputParser() + ) + elif method == "json_schema": + if schema is None: + msg = ( + "schema must be specified when method is not 'json_mode'. " + "Received None." + ) + raise ValueError(msg) + if is_pydantic_schema: + schema = cast(TypeBaseModel, schema) + if issubclass(schema, BaseModelV1): + response_format = schema.schema() + else: + response_format = schema.model_json_schema() + llm = self.bind( + format=response_format, + ls_structured_output_format={ + "kwargs": {"method": method}, + "schema": schema, + }, + ) + output_parser = PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type] + else: + if is_typeddict(schema): + response_format = convert_to_json_schema(schema) + if "required" not in response_format: + response_format["required"] = list( + response_format["properties"].keys() + ) + else: + # is JSON schema + response_format = cast(dict, schema) + llm = self.bind( + format=response_format, + ls_structured_output_format={ + "kwargs": {"method": method}, + "schema": response_format, + }, + ) + output_parser = JsonOutputParser() + else: + msg = ( + f"Unrecognized method argument. Expected one of 'function_calling', " + f"'json_schema', or 'json_mode'. Received: '{method}'" + ) + raise ValueError(msg) + + if include_raw: + parser_assign = RunnablePassthrough.assign( + parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None + ) + parser_none = RunnablePassthrough.assign(parsed=lambda _: None) + parser_with_fallback = parser_assign.with_fallbacks( + [parser_none], exception_key="parsing_error" + ) + return RunnableMap(raw=llm) | parser_with_fallback + return llm | output_parser diff --git a/libs/partners/ollama/pyproject.toml b/libs/partners/ollama/pyproject.toml index 2cafc288491..6a63a4615a1 100644 --- a/libs/partners/ollama/pyproject.toml +++ b/libs/partners/ollama/pyproject.toml @@ -117,6 +117,7 @@ omit = ["tests/*"] addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5" markers = [ "compile: mark placeholder test used to compile integration tests without running them", + "allow_socket: mark test to allow socket access", ] asyncio_mode = "auto" diff --git a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models.py b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models.py index c24a75297f1..650f77f184d 100644 --- a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models.py +++ b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models.py @@ -15,7 +15,7 @@ DEFAULT_MODEL_NAME = "llama3.1" @pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")]) def test_structured_output(method: str) -> None: - """Test to verify structured output via tool calling and ``format`` parameter.""" + """Test to verify structured output via tool calling and `format` parameter.""" class Joke(BaseModel): """Joke to tell user.""" @@ -27,40 +27,44 @@ def test_structured_output(method: str) -> None: query = "Tell me a joke about cats." # Pydantic - structured_llm = llm.with_structured_output(Joke, method=method) # type: ignore[arg-type] - result = structured_llm.invoke(query) - assert isinstance(result, Joke) + if method == "function_calling": + structured_llm = llm.with_structured_output(Joke, method="function_calling") + result = structured_llm.invoke(query) + assert isinstance(result, Joke) - for chunk in structured_llm.stream(query): - assert isinstance(chunk, Joke) + for chunk in structured_llm.stream(query): + assert isinstance(chunk, Joke) # JSON Schema - structured_llm = llm.with_structured_output(Joke.model_json_schema(), method=method) # type: ignore[arg-type] - result = structured_llm.invoke(query) - assert isinstance(result, dict) - assert set(result.keys()) == {"setup", "punchline"} + if method == "json_schema": + structured_llm = llm.with_structured_output( + Joke.model_json_schema(), method="json_schema" + ) + result = structured_llm.invoke(query) + assert isinstance(result, dict) + assert set(result.keys()) == {"setup", "punchline"} - for chunk in structured_llm.stream(query): + for chunk in structured_llm.stream(query): + assert isinstance(chunk, dict) assert isinstance(chunk, dict) - assert isinstance(chunk, dict) - assert set(chunk.keys()) == {"setup", "punchline"} + assert set(chunk.keys()) == {"setup", "punchline"} - # Typed Dict - class JokeSchema(TypedDict): - """Joke to tell user.""" + # Typed Dict + class JokeSchema(TypedDict): + """Joke to tell user.""" - setup: Annotated[str, "question to set up a joke"] - punchline: Annotated[str, "answer to resolve the joke"] + setup: Annotated[str, "question to set up a joke"] + punchline: Annotated[str, "answer to resolve the joke"] - structured_llm = llm.with_structured_output(JokeSchema, method=method) # type: ignore[arg-type] - result = structured_llm.invoke(query) - assert isinstance(result, dict) - assert set(result.keys()) == {"setup", "punchline"} + structured_llm = llm.with_structured_output(JokeSchema, method="json_schema") + result = structured_llm.invoke(query) + assert isinstance(result, dict) + assert set(result.keys()) == {"setup", "punchline"} - for chunk in structured_llm.stream(query): + for chunk in structured_llm.stream(query): + assert isinstance(chunk, dict) assert isinstance(chunk, dict) - assert isinstance(chunk, dict) - assert set(chunk.keys()) == {"setup", "punchline"} + assert set(chunk.keys()) == {"setup", "punchline"} @pytest.mark.parametrize(("model"), [(DEFAULT_MODEL_NAME)]) diff --git a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py index 19e2106e9ce..82198050b40 100644 --- a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py +++ b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py @@ -1,29 +1,17 @@ -"""Ollama specific chat model integration tests for reasoning models.""" +"""Ollama integration tests for reasoning chat models.""" import pytest -from langchain_core.messages import ( - AIMessageChunk, - BaseMessageChunk, - HumanMessage, -) -from pydantic import BaseModel, Field +from langchain_core.messages import AIMessageChunk, BaseMessageChunk, HumanMessage from langchain_ollama import ChatOllama SAMPLE = "What is 3^3?" -class MathAnswer(BaseModel): - """A mathematical expression and its numerical answer.""" - - expression: str = Field(description="The mathematical expression to evaluate.") - answer: int = Field(description="The numerical answer to the expression.") - - @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) def test_stream_no_reasoning(model: str) -> None: """Test streaming with `reasoning=False`""" - llm = ChatOllama(model=model, num_ctx=2**12) + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) messages = [ { "role": "user", @@ -39,16 +27,14 @@ def test_stream_no_reasoning(model: str) -> None: result += chunk assert isinstance(result, AIMessageChunk) assert result.content - assert "reasoning_content" not in result.additional_kwargs assert "" not in result.content and "" not in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] + assert "reasoning_content" not in result.additional_kwargs @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) async def test_astream_no_reasoning(model: str) -> None: """Test async streaming with `reasoning=False`""" - llm = ChatOllama(model=model, num_ctx=2**12) + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) messages = [ { "role": "user", @@ -64,10 +50,8 @@ async def test_astream_no_reasoning(model: str) -> None: result += chunk assert isinstance(result, AIMessageChunk) assert result.content - assert "reasoning_content" not in result.additional_kwargs assert "" not in result.content and "" not in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] + assert "reasoning_content" not in result.additional_kwargs @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) @@ -89,10 +73,10 @@ def test_stream_reasoning_none(model: str) -> None: result += chunk assert isinstance(result, AIMessageChunk) assert result.content - assert "reasoning_content" not in result.additional_kwargs assert "" in result.content and "" in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] + assert "reasoning_content" not in result.additional_kwargs + assert "" not in result.additional_kwargs.get("reasoning_content", "") + assert "" not in result.additional_kwargs.get("reasoning_content", "") @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) @@ -114,10 +98,10 @@ async def test_astream_reasoning_none(model: str) -> None: result += chunk assert isinstance(result, AIMessageChunk) assert result.content - assert "reasoning_content" not in result.additional_kwargs assert "" in result.content and "" in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] + assert "reasoning_content" not in result.additional_kwargs + assert "" not in result.additional_kwargs.get("reasoning_content", "") + assert "" not in result.additional_kwargs.get("reasoning_content", "") @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) @@ -175,27 +159,23 @@ async def test_reasoning_astream(model: str) -> None: @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) def test_invoke_no_reasoning(model: str) -> None: """Test using invoke with `reasoning=False`""" - llm = ChatOllama(model=model, num_ctx=2**12) + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) message = HumanMessage(content=SAMPLE) result = llm.invoke([message]) assert result.content assert "reasoning_content" not in result.additional_kwargs assert "" not in result.content and "" not in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) async def test_ainvoke_no_reasoning(model: str) -> None: """Test using async invoke with `reasoning=False`""" - llm = ChatOllama(model=model, num_ctx=2**12) + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) message = HumanMessage(content=SAMPLE) result = await llm.ainvoke([message]) assert result.content assert "reasoning_content" not in result.additional_kwargs assert "" not in result.content and "" not in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) @@ -207,8 +187,8 @@ def test_invoke_reasoning_none(model: str) -> None: assert result.content assert "reasoning_content" not in result.additional_kwargs assert "" in result.content and "" in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] + assert "" not in result.additional_kwargs.get("reasoning_content", "") + assert "" not in result.additional_kwargs.get("reasoning_content", "") @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) @@ -220,8 +200,8 @@ async def test_ainvoke_reasoning_none(model: str) -> None: assert result.content assert "reasoning_content" not in result.additional_kwargs assert "" in result.content and "" in result.content - assert "" not in result.additional_kwargs["reasoning_content"] - assert "" not in result.additional_kwargs["reasoning_content"] + assert "" not in result.additional_kwargs.get("reasoning_content", "") + assert "" not in result.additional_kwargs.get("reasoning_content", "") @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) @@ -250,3 +230,43 @@ async def test_reasoning_ainvoke(model: str) -> None: assert "" not in result.content and "" not in result.content assert "" not in result.additional_kwargs["reasoning_content"] assert "" not in result.additional_kwargs["reasoning_content"] + + +@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) +def test_think_tag_stripping_necessity(model: str) -> None: + """Test that demonstrates why ``_strip_think_tags`` is necessary. + + DeepSeek R1 models include reasoning/thinking as their default behavior. + When ``reasoning=False`` is set, the user explicitly wants no reasoning content, + but Ollama cannot disable thinking at the API level for these models. + Therefore, post-processing is required to strip the ```` tags. + + This test documents the specific behavior that necessitates the + ``_strip_think_tags`` function in the chat_models.py implementation. + """ + # Test with reasoning=None (default behavior - should include think tags) + llm_default = ChatOllama(model=model, reasoning=None, num_ctx=2**12) + message = HumanMessage(content=SAMPLE) + + result_default = llm_default.invoke([message]) + + # With reasoning=None, the model's default behavior includes tags + # This demonstrates why we need the stripping logic + assert "" in result_default.content + assert "" in result_default.content + assert "reasoning_content" not in result_default.additional_kwargs + + # Test with reasoning=False (explicit disable - should NOT include think tags) + llm_disabled = ChatOllama(model=model, reasoning=False, num_ctx=2**12) + + result_disabled = llm_disabled.invoke([message]) + + # With reasoning=False, think tags should be stripped from content + # This verifies that _strip_think_tags is working correctly + assert "" not in result_disabled.content + assert "" not in result_disabled.content + assert "reasoning_content" not in result_disabled.additional_kwargs + + # Verify the difference: same model, different reasoning settings + # Default includes tags, disabled strips them + assert result_default.content != result_disabled.content diff --git a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py index d596011bfb4..95742e924f2 100644 --- a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py +++ b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py @@ -40,7 +40,7 @@ class TestChatOllama(ChatModelIntegrationTests): @property def has_tool_choice(self) -> bool: # TODO: update after Ollama implements - # https://github.com/ollama/ollama/blob/main/docs/openai.md + # https://github.com/ollama/ollama/blob/main/docs/openai.md#supported-request-fields return False @property @@ -168,7 +168,7 @@ class TestChatOllama(ChatModelIntegrationTests): with pytest.raises(ValidationError) as excinfo: ChatOllama(model="any-model", validate_model_on_init=True) - assert "not found in Ollama" in str(excinfo.value) + assert "Failed to connect to Ollama" in str(excinfo.value) @patch("langchain_ollama.chat_models.Client.list") def test_init_response_error(self, mock_list: MagicMock) -> None: diff --git a/libs/partners/ollama/tests/integration_tests/test_llms.py b/libs/partners/ollama/tests/integration_tests/test_llms.py index 0ee236e62e5..9f2209d4d95 100644 --- a/libs/partners/ollama/tests/integration_tests/test_llms.py +++ b/libs/partners/ollama/tests/integration_tests/test_llms.py @@ -7,18 +7,19 @@ from langchain_core.runnables import RunnableConfig from langchain_ollama.llms import OllamaLLM MODEL_NAME = "llama3.1" +REASONING_MODEL_NAME = "deepseek-r1:1.5b" SAMPLE = "What is 3^3?" def test_stream_text_tokens() -> None: - """Test streaming raw string tokens from OllamaLLM.""" + """Test streaming raw string tokens from `OllamaLLM`.""" llm = OllamaLLM(model=MODEL_NAME) for token in llm.stream("I'm Pickle Rick"): assert isinstance(token, str) -@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) def test__stream_no_reasoning(model: str) -> None: """Test low-level chunk streaming of a simple prompt with `reasoning=False`.""" llm = OllamaLLM(model=model, num_ctx=2**12) @@ -39,7 +40,7 @@ def test__stream_no_reasoning(model: str) -> None: assert "reasoning_content" not in result_chunk.generation_info # type: ignore[operator] -@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) def test__stream_with_reasoning(model: str) -> None: """Test low-level chunk streaming with `reasoning=True`.""" llm = OllamaLLM(model=model, num_ctx=2**12, reasoning=True) @@ -64,14 +65,14 @@ def test__stream_with_reasoning(model: str) -> None: async def test_astream_text_tokens() -> None: - """Test async streaming raw string tokens from OllamaLLM.""" + """Test async streaming raw string tokens from `OllamaLLM`.""" llm = OllamaLLM(model=MODEL_NAME) async for token in llm.astream("I'm Pickle Rick"): assert isinstance(token, str) -@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) async def test__astream_no_reasoning(model: str) -> None: """Test low-level async chunk streaming with `reasoning=False`.""" llm = OllamaLLM(model=model, num_ctx=2**12) @@ -89,7 +90,7 @@ async def test__astream_no_reasoning(model: str) -> None: assert "reasoning_content" not in result_chunk.generation_info # type: ignore[operator] -@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")]) +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) async def test__astream_with_reasoning(model: str) -> None: """Test low-level async chunk streaming with `reasoning=True`.""" llm = OllamaLLM(model=model, num_ctx=2**12, reasoning=True) @@ -109,7 +110,7 @@ async def test__astream_with_reasoning(model: str) -> None: async def test_abatch() -> None: - """Test batch sync token generation from OllamaLLM.""" + """Test batch sync token generation from `OllamaLLM`.""" llm = OllamaLLM(model=MODEL_NAME) result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"]) @@ -129,7 +130,7 @@ async def test_abatch_tags() -> None: def test_batch() -> None: - """Test batch token generation from OllamaLLM.""" + """Test batch token generation from `OllamaLLM`.""" llm = OllamaLLM(model=MODEL_NAME) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) diff --git a/libs/partners/ollama/tests/integration_tests/v1/__init__.py b/libs/partners/ollama/tests/integration_tests/v1/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_standard_v1.py b/libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_standard_v1.py new file mode 100644 index 00000000000..fb69084460b --- /dev/null +++ b/libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_standard_v1.py @@ -0,0 +1,258 @@ +"""Test chat model v1 integration using standard integration tests.""" + +from unittest.mock import MagicMock, patch + +import pytest +from httpx import ConnectError +from langchain_core.messages.content_blocks import ToolCallChunk, is_reasoning_block +from langchain_core.tools import tool +from langchain_core.v1.chat_models import BaseChatModel +from langchain_core.v1.messages import AIMessage, AIMessageChunk, HumanMessage +from langchain_tests.integration_tests.chat_models_v1 import ChatModelV1IntegrationTests +from ollama import ResponseError +from pydantic import ValidationError + +from langchain_ollama.v1.chat_models import ChatOllama + +DEFAULT_MODEL_NAME = "llama3.1" +REASONING_MODEL_NAME = "deepseek-r1:1.5b" + + +@tool +def get_current_weather(location: str) -> dict: + """Gets the current weather in a given location.""" + if "boston" in location.lower(): + return {"temperature": "15°F", "conditions": "snow"} + return {"temperature": "unknown", "conditions": "unknown"} + + +class TestChatOllamaV1(ChatModelV1IntegrationTests): + @property + def chat_model_class(self) -> type[ChatOllama]: + return ChatOllama + + @property + def chat_model_params(self) -> dict: + return {"model": DEFAULT_MODEL_NAME} + + @property + def supports_reasoning_content_blocks(self) -> bool: + """ChatOllama supports reasoning content blocks.""" + return True + + @property + def supports_image_content_blocks(self) -> bool: + """ChatOllama supports image content blocks.""" + return True + + @property + def has_tool_calling(self) -> bool: + """ChatOllama supports tool calling.""" + return True + + @property + def supports_invalid_tool_calls(self) -> bool: + """ChatOllama supports invalid tool call handling.""" + return True + + @property + def supports_non_standard_blocks(self) -> bool: + """ChatOllama does not support non-standard content blocks.""" + return False + + @property + def supports_json_mode(self) -> bool: + return True + + @property + def has_tool_choice(self) -> bool: + # TODO: update after Ollama implements + # https://github.com/ollama/ollama/blob/main/docs/openai.md#supported-request-fields + return False + + def test_tool_streaming(self, model: BaseChatModel) -> None: + """Test that the model can stream tool calls.""" + chat_model_with_tools = model.bind_tools([get_current_weather]) + + prompt = [HumanMessage("What is the weather today in Boston?")] + + # Flags and collectors for validation + tool_chunk_found = False + final_tool_calls = [] + collected_tool_chunks: list[ToolCallChunk] = [] + + # Stream the response and inspect the chunks + for chunk in chat_model_with_tools.stream(prompt): + assert isinstance(chunk, AIMessageChunk), "Expected AIMessageChunk type" + + if chunk.tool_call_chunks: + tool_chunk_found = True + collected_tool_chunks.extend(chunk.tool_call_chunks) + + if chunk.tool_calls: + final_tool_calls.extend(chunk.tool_calls) + + assert tool_chunk_found, "Tool streaming did not produce any tool_call_chunks." + assert len(final_tool_calls) == 1, ( + f"Expected 1 final tool call, but got {len(final_tool_calls)}" + ) + + final_tool_call = final_tool_calls[0] + assert final_tool_call["name"] == "get_current_weather" + assert final_tool_call["args"] == {"location": "Boston"} + + assert len(collected_tool_chunks) > 0 + assert collected_tool_chunks[0]["name"] == "get_current_weather" + + # The ID should be consistent across chunks that have it + tool_call_id = collected_tool_chunks[0].get("id") + assert tool_call_id is not None + assert all( + chunk.get("id") == tool_call_id + for chunk in collected_tool_chunks + if chunk.get("id") + ) + assert final_tool_call["id"] == tool_call_id + + async def test_tool_astreaming(self, model: BaseChatModel) -> None: + """Test that the model can stream tool calls asynchronously.""" + chat_model_with_tools = model.bind_tools([get_current_weather]) + + prompt = [HumanMessage("What is the weather today in Boston?")] + + # Flags and collectors for validation + tool_chunk_found = False + final_tool_calls = [] + collected_tool_chunks: list[ToolCallChunk] = [] + + # Stream the response and inspect the chunks + async for chunk in chat_model_with_tools.astream(prompt): + assert isinstance(chunk, AIMessageChunk), "Expected AIMessageChunk type" + + if chunk.tool_call_chunks: + tool_chunk_found = True + collected_tool_chunks.extend(chunk.tool_call_chunks) + + if chunk.tool_calls: + final_tool_calls.extend(chunk.tool_calls) + + assert tool_chunk_found, "Tool streaming did not produce any tool_call_chunks." + assert len(final_tool_calls) == 1, ( + f"Expected 1 final tool call, but got {len(final_tool_calls)}" + ) + + final_tool_call = final_tool_calls[0] + assert final_tool_call["name"] == "get_current_weather" + assert final_tool_call["args"] == {"location": "Boston"} + + assert len(collected_tool_chunks) > 0 + assert collected_tool_chunks[0]["name"] == "get_current_weather" + + # The ID should be consistent across chunks that have it + tool_call_id = collected_tool_chunks[0].get("id") + assert tool_call_id is not None + assert all( + chunk.get("id") == tool_call_id + for chunk in collected_tool_chunks + if chunk.get("id") + ) + assert final_tool_call["id"] == tool_call_id + + @pytest.mark.xfail( + reason=("Ollama does not yet support tool_choice forcing, may be unreliable") + ) + def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None: + # TODO: shouldn't need to xfail this + super().test_tool_calling_with_no_arguments(model) + + @pytest.mark.xfail( + reason=( + "Ollama does not support tool_choice forcing, agent loop may be unreliable" + ) + ) + def test_agent_loop(self, model: BaseChatModel) -> None: + super().test_agent_loop(model) + + # TODO + # @pytest.mark.xfail( + # reason=( + # "No single Ollama model supports both multimodal content and reasoning. " + # "Override skips test due to model limitations." + # ) + # ) + # def test_multimodal_reasoning(self, model: BaseChatModel) -> None: + # """Test complex reasoning with multiple content types. + + # This test overrides the default model to use a reasoning-capable model + # with reasoning mode explicitly enabled. Note that this test requires + # both multimodal support AND reasoning support. + # """ + # if not self.supports_multimodal_reasoning: + # pytest.skip("Model does not support multimodal reasoning.") + + # pytest.skip( + # "TODO: Update this when we have a model that supports both multimodal and " # noqa: E501 + # "reasoning." + # ) + + @pytest.mark.xfail( + reason=( + f"{DEFAULT_MODEL_NAME} does not support reasoning. Override uses " + "reasoning-capable model with `reasoning=True` enabled." + ), + strict=False, + ) + def test_reasoning_content_blocks_basic(self, model: BaseChatModel) -> None: + """Test that the model can generate ``ReasoningContentBlock``. + + This test overrides the default model to use a reasoning-capable model + with reasoning mode explicitly enabled. + """ + if not self.supports_reasoning_content_blocks: + pytest.skip("Model does not support ReasoningContentBlock.") + + reasoning_enabled_model = ChatOllama( + model=REASONING_MODEL_NAME, reasoning=True, validate_model_on_init=True + ) + + message = HumanMessage("Think step by step: What is 2 + 2?") + result = reasoning_enabled_model.invoke([message]) + assert isinstance(result, AIMessage) + if isinstance(result.content, list): + reasoning_blocks = [ + block + for block in result.content + if isinstance(block, dict) and is_reasoning_block(block) + ] + assert len(reasoning_blocks) > 0, ( + "Expected reasoning content blocks but found none. " + f"Content blocks: {[block.get('type') for block in result.content]}" + ) + + # Additional Ollama reasoning tests in v1/chat_models/test_chat_models_v1.py + + @patch("langchain_ollama.v1.chat_models.Client.list") + def test_init_model_not_found(self, mock_list: MagicMock) -> None: + """Test that a ValueError is raised when the model is not found.""" + mock_list.side_effect = ValueError("Test model not found") + with pytest.raises(ValueError) as excinfo: + ChatOllama(model="non-existent-model", validate_model_on_init=True) + assert "Test model not found" in str(excinfo.value) + + @patch("langchain_ollama.v1.chat_models.Client.list") + def test_init_connection_error(self, mock_list: MagicMock) -> None: + """Test that a ValidationError is raised on connect failure during init.""" + mock_list.side_effect = ConnectError("Test connection error") + + with pytest.raises(ValidationError) as excinfo: + ChatOllama(model="any-model", validate_model_on_init=True) + assert "Failed to connect to Ollama" in str(excinfo.value) + + @patch("langchain_ollama.v1.chat_models.Client.list") + def test_init_response_error(self, mock_list: MagicMock) -> None: + """Test that a ResponseError is raised.""" + mock_list.side_effect = ResponseError("Test response error") + + with pytest.raises(ValidationError) as excinfo: + ChatOllama(model="any-model", validate_model_on_init=True) + assert "Received an error from the Ollama API" in str(excinfo.value) diff --git a/libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_v1.py b/libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_v1.py new file mode 100644 index 00000000000..0b9869bfd2c --- /dev/null +++ b/libs/partners/ollama/tests/integration_tests/v1/chat_models/test_chat_models_v1.py @@ -0,0 +1,443 @@ +"""Ollama-specific v1 chat model integration tests. + +Standard tests are handled in `test_chat_models_v1_standard.py`. + +""" + +from __future__ import annotations + +from typing import Annotated, Optional + +import pytest +from langchain_core.messages.content_blocks import is_reasoning_block +from langchain_core.v1.messages import AIMessageChunk, HumanMessage +from pydantic import BaseModel, Field +from typing_extensions import TypedDict + +from langchain_ollama.v1.chat_models import ChatOllama + +DEFAULT_MODEL_NAME = "llama3.1" +REASONING_MODEL_NAME = "deepseek-r1:1.5b" + +SAMPLE = "What is 3^3?" + + +@pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")]) +def test_structured_output(method: str) -> None: + """Test to verify structured output via tool calling and `format` parameter.""" + + class Joke(BaseModel): + """Joke to tell user.""" + + setup: str = Field(description="question to set up a joke") + punchline: str = Field(description="answer to resolve the joke") + + llm = ChatOllama(model=DEFAULT_MODEL_NAME, temperature=0) + query = "Tell me a joke about cats." + + # Pydantic + if method == "function_calling": + structured_llm = llm.with_structured_output(Joke, method="function_calling") + result = structured_llm.invoke(query) + assert isinstance(result, Joke) + + for chunk in structured_llm.stream(query): + assert isinstance(chunk, Joke) + + # JSON Schema + if method == "json_schema": + structured_llm = llm.with_structured_output( + Joke.model_json_schema(), method="json_schema" + ) + result = structured_llm.invoke(query) + assert isinstance(result, dict) + assert set(result.keys()) == {"setup", "punchline"} + + for chunk in structured_llm.stream(query): + assert isinstance(chunk, dict) + assert isinstance(chunk, dict) + assert set(chunk.keys()) == {"setup", "punchline"} + + # Typed Dict + class JokeSchema(TypedDict): + """Joke to tell user.""" + + setup: Annotated[str, "question to set up a joke"] + punchline: Annotated[str, "answer to resolve the joke"] + + structured_llm = llm.with_structured_output(JokeSchema, method="json_schema") + result = structured_llm.invoke(query) + assert isinstance(result, dict) + assert set(result.keys()) == {"setup", "punchline"} + + for chunk in structured_llm.stream(query): + assert isinstance(chunk, dict) + assert isinstance(chunk, dict) + assert set(chunk.keys()) == {"setup", "punchline"} + + +@pytest.mark.parametrize(("model"), [(DEFAULT_MODEL_NAME)]) +def test_structured_output_deeply_nested(model: str) -> None: + """Test to verify structured output with a nested objects.""" + llm = ChatOllama(model=model, temperature=0) + + class Person(BaseModel): + """Information about a person.""" + + name: Optional[str] = Field(default=None, description="The name of the person") + hair_color: Optional[str] = Field( + default=None, description="The color of the person's hair if known" + ) + height_in_meters: Optional[str] = Field( + default=None, description="Height measured in meters" + ) + + class Data(BaseModel): + """Extracted data about people.""" + + people: list[Person] + + chat = llm.with_structured_output(Data) + text = ( + "Alan Smith is 6 feet tall and has blond hair." + "Alan Poe is 3 feet tall and has grey hair." + ) + result = chat.invoke(text) + assert isinstance(result, Data) + + for chunk in chat.stream(text): + assert isinstance(chunk, Data) + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +def test_stream_no_reasoning(model: str) -> None: + """Test streaming with `reasoning=False`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) + result = None + for chunk in llm.stream(SAMPLE): + assert isinstance(chunk, AIMessageChunk) + if result is None: + result = chunk + continue + result += chunk + assert isinstance(result, AIMessageChunk) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" not in content_types, ( + f"Expected no reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +async def test_astream_no_reasoning(model: str) -> None: + """Test async streaming with `reasoning=False`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) + result = None + async for chunk in llm.astream(SAMPLE): + assert isinstance(chunk, AIMessageChunk) + if result is None: + result = chunk + continue + result += chunk + assert isinstance(result, AIMessageChunk) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" not in content_types, ( + f"Expected no reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +def test_stream_reasoning_none(model: str) -> None: + """Test streaming with `reasoning=None`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=None) + result = None + for chunk in llm.stream(SAMPLE): + assert isinstance(chunk, AIMessageChunk) + if result is None: + result = chunk + continue + result += chunk + assert isinstance(result, AIMessageChunk) + assert result.content + + assert "" in result.text and "" in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +async def test_astream_reasoning_none(model: str) -> None: + """Test async streaming with `reasoning=None`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=None) + result = None + async for chunk in llm.astream(SAMPLE): + assert isinstance(chunk, AIMessageChunk) + if result is None: + result = chunk + continue + result += chunk + assert isinstance(result, AIMessageChunk) + assert result.content + + assert "" in result.text and "" in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +def test_reasoning_stream(model: str) -> None: + """Test streaming with `reasoning=True`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=True) + result = None + for chunk in llm.stream(SAMPLE): + assert isinstance(chunk, AIMessageChunk) + if result is None: + result = chunk + continue + result += chunk + assert isinstance(result, AIMessageChunk) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" in content_types, ( + f"Expected reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + # Assert non-empty reasoning content in ReasoningContentBlock + reasoning_blocks = [block for block in result.content if is_reasoning_block(block)] + for block in reasoning_blocks: + assert block.get("reasoning"), "Expected non-empty reasoning content" + assert len(block.get("reasoning", "")) > 0, ( + "Expected reasoning content to be non-empty" + ) + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +async def test_reasoning_astream(model: str) -> None: + """Test async streaming with `reasoning=True`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=True) + result = None + async for chunk in llm.astream(SAMPLE): + assert isinstance(chunk, AIMessageChunk) + if result is None: + result = chunk + continue + result += chunk + assert isinstance(result, AIMessageChunk) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" in content_types, ( + f"Expected reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + # Assert non-empty reasoning content in ReasoningContentBlock + reasoning_blocks = [block for block in result.content if is_reasoning_block(block)] + for block in reasoning_blocks: + assert block.get("reasoning"), "Expected non-empty reasoning content" + assert len(block.get("reasoning", "")) > 0, ( + "Expected reasoning content to be non-empty" + ) + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +def test_invoke_no_reasoning(model: str) -> None: + """Test using invoke with `reasoning=False`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) + message = HumanMessage(SAMPLE) + result = llm.invoke([message]) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" not in content_types, ( + f"Expected no reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +async def test_ainvoke_no_reasoning(model: str) -> None: + """Test using async invoke with `reasoning=False`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) + message = HumanMessage(content=SAMPLE) + result = await llm.ainvoke([message]) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" not in content_types, ( + f"Expected no reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +def test_invoke_reasoning_none(model: str) -> None: + """Test using invoke with `reasoning=None`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=None) + message = HumanMessage(content=SAMPLE) + result = llm.invoke([message]) + assert result.content + + assert "" in result.text and "" in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +async def test_ainvoke_reasoning_none(model: str) -> None: + """Test using async invoke with `reasoning=None`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=None) + message = HumanMessage(content=SAMPLE) + result = await llm.ainvoke([message]) + assert result.content + + assert "" in result.text and "" in result.text + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +def test_reasoning_invoke(model: str) -> None: + """Test invoke with `reasoning=True`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=True) + message = HumanMessage(content=SAMPLE) + result = llm.invoke([message]) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" in content_types, ( + f"Expected reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + # Assert non-empty reasoning content in ReasoningContentBlock + reasoning_blocks = [block for block in result.content if is_reasoning_block(block)] + for block in reasoning_blocks: + assert block.get("reasoning"), "Expected non-empty reasoning content" + assert len(block.get("reasoning", "")) > 0, ( + "Expected reasoning content to be non-empty" + ) + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +async def test_reasoning_ainvoke(model: str) -> None: + """Test invoke with `reasoning=True`""" + llm = ChatOllama(model=model, num_ctx=2**12, reasoning=True) + message = HumanMessage(content=SAMPLE) + result = await llm.ainvoke([message]) + assert result.content + + content_types = set() + for content_block in result.content: + type_ = content_block.get("type") + if type_: + content_types.add(type_) + + assert "reasoning" in content_types, ( + f"Expected reasoning content, got types: {content_types}" + ) + assert "non_standard" not in content_types, ( + f"Expected no non-standard content, got types: {content_types}" + ) + assert "" not in result.text and "" not in result.text + + # Assert non-empty reasoning content in ReasoningContentBlock + reasoning_blocks = [block for block in result.content if is_reasoning_block(block)] + for block in reasoning_blocks: + assert block.get("reasoning"), "Expected non-empty reasoning content" + assert len(block.get("reasoning", "")) > 0, ( + "Expected reasoning content to be non-empty" + ) + + +@pytest.mark.parametrize(("model"), [(REASONING_MODEL_NAME)]) +def test_think_tag_stripping_necessity(model: str) -> None: + """Test that demonstrates why ``_strip_think_tags`` is necessary. + + DeepSeek R1 models include reasoning/thinking as their default behavior. + When ``reasoning=False`` is set, the user explicitly wants no reasoning content, + but Ollama cannot disable thinking at the API level for these models. + Therefore, post-processing is required to strip the ```` tags. + + This test documents the specific behavior that necessitates the + ``_strip_think_tags`` function in the chat_models.py implementation. + """ + # Test with reasoning=None (default behavior - should include think tags) + llm_default = ChatOllama(model=model, reasoning=None, num_ctx=2**12) + message = HumanMessage(content=SAMPLE) + + result_default = llm_default.invoke([message]) + + # With reasoning=None, the model's default behavior includes tags + # This demonstrates why we need the stripping logic + assert "" in result_default.text + assert "" in result_default.text + + # Test with reasoning=False (explicit disable - should NOT include think tags) + llm_disabled = ChatOllama(model=model, reasoning=False, num_ctx=2**12) + + result_disabled = llm_disabled.invoke([message]) + + # With reasoning=False, think tags should be stripped from content + # This verifies that _strip_think_tags is working correctly + assert "" not in result_disabled.text + assert "" not in result_disabled.text + + # Verify the difference: same model, different reasoning settings + # Default includes tags, disabled strips them + assert result_default.content != result_disabled.content diff --git a/libs/partners/ollama/tests/unit_tests/test_chat_models.py b/libs/partners/ollama/tests/unit_tests/test_chat_models.py index b32ad638cb8..3fb742fc4af 100644 --- a/libs/partners/ollama/tests/unit_tests/test_chat_models.py +++ b/libs/partners/ollama/tests/unit_tests/test_chat_models.py @@ -1,4 +1,4 @@ -"""Test chat model integration.""" +"""Unit tests for ChatOllama.""" import json import logging @@ -33,6 +33,16 @@ class TestChatOllama(ChatModelUnitTests): def test__parse_arguments_from_tool_call() -> None: + """Test that string arguments are preserved as strings in tool call parsing. + + This test verifies the fix for PR #30154 which addressed an issue where + string-typed tool arguments (like IDs or long strings) were being incorrectly + processed. The parser should preserve string values as strings rather than + attempting to parse them as JSON when they're already valid string arguments. + + The test uses a long string ID to ensure string arguments maintain their + original type after parsing, which is critical for tools expecting string inputs. + """ raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501 raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"] response = _parse_arguments_from_tool_call(raw_tool_calls[0]) @@ -54,6 +64,7 @@ def _mock_httpx_client_stream( def test_arbitrary_roles_accepted_in_chatmessages( monkeypatch: pytest.MonkeyPatch, ) -> None: + """Test that `ChatOllama` accepts arbitrary roles in `ChatMessage`.""" monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream) llm = ChatOllama( model=MODEL_NAME, @@ -94,9 +105,6 @@ dummy_raw_tool_call = { } -# --- Regression tests for tool-call argument parsing (see #30910) --- - - @pytest.mark.parametrize( "input_string, expected_output", [ @@ -113,14 +121,14 @@ dummy_raw_tool_call = { def test_parse_json_string_success_cases( input_string: str, expected_output: Any ) -> None: - """Tests that _parse_json_string correctly parses valid and fixable strings.""" + """Tests that `_parse_json_string` correctly parses valid and fixable strings.""" raw_tool_call = {"function": {"name": "test_func", "arguments": input_string}} result = _parse_json_string(input_string, raw_tool_call=raw_tool_call, skip=False) assert result == expected_output def test_parse_json_string_failure_case_raises_exception() -> None: - """Tests that _parse_json_string raises an exception for truly malformed strings.""" + """Tests that `_parse_json_string` raises an exception for malformed strings.""" malformed_string = "{'key': 'value',,}" raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}} with pytest.raises(OutputParserException): @@ -132,7 +140,7 @@ def test_parse_json_string_failure_case_raises_exception() -> None: def test_parse_json_string_skip_returns_input_on_failure() -> None: - """Tests that skip=True returns the original string on parse failure.""" + """Tests that `skip=True` returns the original string on parse failure.""" malformed_string = "{'not': valid,,,}" raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}} result = _parse_json_string( diff --git a/libs/partners/ollama/tests/unit_tests/test_embeddings.py b/libs/partners/ollama/tests/unit_tests/test_embeddings.py index 93f996f59bc..feca762deb6 100644 --- a/libs/partners/ollama/tests/unit_tests/test_embeddings.py +++ b/libs/partners/ollama/tests/unit_tests/test_embeddings.py @@ -32,7 +32,7 @@ def test_validate_model_on_init(mock_validate_model: Any) -> None: @patch("langchain_ollama.embeddings.Client") def test_embed_documents_passes_options(mock_client_class: Any) -> None: - """Test that embed_documents method passes options including num_gpu.""" + """Test that `embed_documents()` passes options, including `num_gpu`.""" # Create a mock client instance mock_client = Mock() mock_client_class.return_value = mock_client diff --git a/libs/partners/ollama/tests/unit_tests/v1/__init__.py b/libs/partners/ollama/tests/unit_tests/v1/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/ollama/tests/unit_tests/v1/test_chat_models.py b/libs/partners/ollama/tests/unit_tests/v1/test_chat_models.py new file mode 100644 index 00000000000..e6b82b391c5 --- /dev/null +++ b/libs/partners/ollama/tests/unit_tests/v1/test_chat_models.py @@ -0,0 +1,621 @@ +"""Unit tests for ChatOllama.""" + +import json +import logging +from collections.abc import AsyncIterator, Generator, Iterator +from typing import Any +from unittest.mock import MagicMock, patch + +import pytest +from langchain_core.exceptions import OutputParserException +from langchain_core.messages.content_blocks import ( + create_image_block, + create_text_block, +) +from langchain_core.v1.messages import AIMessage, HumanMessage, MessageV1, SystemMessage +from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1UnitTests + +from langchain_ollama._compat import ( + _convert_chunk_to_v1, + _convert_from_v1_to_ollama_format, + _convert_to_v1_from_ollama_format, +) +from langchain_ollama.v1.chat_models import ( + ChatOllama, + _parse_arguments_from_tool_call, + _parse_json_string, +) + +MODEL_NAME = "llama3.1" + + +class TestMessageConversion: + """Test v1 message conversion utilities.""" + + def test_convert_human_message_v1_text_only(self) -> None: + """Test converting HumanMessage with text content.""" + message = HumanMessage("Hello world") + + result = _convert_from_v1_to_ollama_format(message) + + assert result["role"] == "user" + assert result["content"] == "Hello world" + assert result["images"] == [] + + def test_convert_ai_message_v1(self) -> None: + """Test converting AIMessage with text content.""" + message = AIMessage("Hello! How can I help?") + + result = _convert_from_v1_to_ollama_format(message) + + assert result["role"] == "assistant" + assert result["content"] == "Hello! How can I help?" + + def test_convert_system_message_v1(self) -> None: + """Test converting SystemMessage.""" + message = SystemMessage("You are a helpful assistant.") + + result = _convert_from_v1_to_ollama_format(message) + + assert result["role"] == "system" + assert result["content"] == "You are a helpful assistant." + + def test_convert_human_message_v1_with_image(self) -> None: + """Test converting HumanMessage with text and image content. + + Each uses `_convert_from_v1_to_ollama_format` to ensure + that the conversion handles both text and image blocks correctly. Thus, we don't + need additional tests for other message types that also use this function. + + """ + message_a = HumanMessage( + content=[ + create_text_block("Describe this image:"), + create_image_block(base64="base64imagedata"), + ] + ) + + result_a = _convert_from_v1_to_ollama_format(message_a) + + assert result_a["role"] == "user" + assert result_a["content"] == "Describe this image:" + assert result_a["images"] == ["base64imagedata"] + + # Make sure multiple images are handled correctly + message_b = HumanMessage( + content=[ + create_text_block("Describe this image:"), + create_image_block(base64="base64imagedata"), + create_image_block(base64="base64dataimage"), + ] + ) + + result_b = _convert_from_v1_to_ollama_format(message_b) + + assert result_b["role"] == "user" + assert result_b["content"] == "Describe this image:" + assert result_b["images"] == ["base64imagedata", "base64dataimage"] + + def test_convert_from_ollama_format(self) -> None: + """Test converting Ollama response to `AIMessage`.""" + ollama_response = { + "model": MODEL_NAME, + "created_at": "2024-01-01T00:00:00Z", + "message": { + "role": "assistant", + "content": "Hello! How can I help you today?", + }, + "done": True, + "done_reason": "stop", + "total_duration": 1000000, + "prompt_eval_count": 10, + "eval_count": 20, + } + + result = _convert_to_v1_from_ollama_format(ollama_response) + + assert isinstance(result, AIMessage) + assert len(result.content) == 1 + assert result.content[0].get("type") == "text" + assert result.content[0].get("text") == "Hello! How can I help you today?" + assert result.response_metadata.get("model_name") == MODEL_NAME + assert result.response_metadata.get("done") is True + + def test_convert_from_ollama_format_with_context(self) -> None: + """Test converting Ollama response with context field to `AIMessage`.""" + test_context = [1, 2, 3, 4, 5] # Example tokenized context + ollama_response = { + "model": MODEL_NAME, + "created_at": "2024-01-01T00:00:00Z", + "message": { + "role": "assistant", + "content": "Hello! How can I help you today?", + }, + "done": True, + "done_reason": "stop", + "total_duration": 1000000, + "prompt_eval_count": 10, + "eval_count": 20, + "context": test_context, + } + + result = _convert_to_v1_from_ollama_format(ollama_response) + + assert isinstance(result, AIMessage) + assert len(result.content) == 1 + assert result.content[0].get("type") == "text" + assert result.content[0].get("text") == "Hello! How can I help you today?" + assert result.response_metadata.get("model_name") == MODEL_NAME + assert result.response_metadata.get("done") is True + assert result.response_metadata.get("context") == test_context + + def test_convert_chunk_to_v1(self) -> None: + """Test converting Ollama streaming chunk to `AIMessageChunkV1`.""" + chunk = { + "model": MODEL_NAME, + "created_at": "2024-01-01T00:00:00Z", + "message": {"role": "assistant", "content": "Hello"}, + "done": False, + } + + result = _convert_chunk_to_v1(chunk) + + assert len(result.content) == 1 + assert result.content[0].get("type") == "text" + assert result.content[0].get("text") == "Hello" + + def test_convert_chunk_to_v1_with_context(self) -> None: + """Test converting Ollama streaming chunk with context to `AIMessageChunkV1`.""" + test_context = [10, 20, 30, 40, 50] # Example tokenized context + chunk = { + "model": MODEL_NAME, + "created_at": "2024-01-01T00:00:00Z", + "message": {"role": "assistant", "content": "Hello"}, + "done": True, + "done_reason": "stop", + "context": test_context, + "prompt_eval_count": 5, + "eval_count": 3, + } + + result = _convert_chunk_to_v1(chunk) + + assert len(result.content) == 1 + assert result.content[0].get("type") == "text" + assert result.content[0].get("text") == "Hello" + assert result.response_metadata.get("context") == test_context + + def test_convert_empty_content(self) -> None: + """Test converting empty content blocks.""" + message = HumanMessage(content=[]) + + result = _convert_from_v1_to_ollama_format(message) + + assert result["role"] == "user" + assert result["content"] == "" + assert result["images"] == [] + + +class TestChatOllama(ChatModelV1UnitTests): + """Test `ChatOllama`.""" + + @property + def chat_model_class(self) -> type[ChatOllama]: + return ChatOllama + + @property + def chat_model_params(self) -> dict: + return {"model": MODEL_NAME} + + @property + def has_tool_calling(self) -> bool: + """`ChatOllama` supports tool calling (e.g., `qwen3` models).""" + return True + + @property + def has_tool_choice(self) -> bool: + """`ChatOllama` supports tool choice parameter.""" + return True + + @property + def has_structured_output(self) -> bool: + """`ChatOllama` supports structured output via `with_structured_output`.""" + return True + + @property + def supports_reasoning_content_blocks(self) -> bool: + """`ChatOllama` supports reasoning/thinking content blocks (e.g., `qwen3`).""" + return True + + @property + def supports_image_content_blocks(self) -> bool: + """`ChatOllama` supports image content blocks (e.g., `gemma3`).""" + return True + + @property + def supports_non_standard_blocks(self) -> bool: + """Override to indicate Ollama doesn't support non-standard content blocks. + + So far, everything returned by Ollama fits into the standard + `text`, `image`, and `thinking` content blocks. + + """ + return False + + # TODO: investigate type ignore, this feels hacky + @pytest.fixture + def model(self) -> Generator[ChatOllama, None, None]: # type: ignore[override] + """Create a ChatOllama instance for testing.""" + sync_patcher = patch("langchain_ollama.v1.chat_models.base.Client") + async_patcher = patch("langchain_ollama.v1.chat_models.base.AsyncClient") + + mock_sync_client_class = sync_patcher.start() + mock_async_client_class = async_patcher.start() + + mock_sync_client = MagicMock() + mock_async_client = MagicMock() + + mock_sync_client_class.return_value = mock_sync_client + mock_async_client_class.return_value = mock_async_client + + def mock_chat_response(*args: Any, **kwargs: Any) -> Iterator[dict[str, Any]]: + # Check request characteristics + request_data = kwargs.get("messages", []) + has_tools = "tools" in kwargs + + # Check if this is a reasoning request + is_reasoning_request = any( + isinstance(msg, dict) + and "Think step by step" in str(msg.get("content", "")) + for msg in request_data + ) + + # Basic response structure + base_response = { + "model": MODEL_NAME, + "created_at": "2024-01-01T00:00:00Z", + "done": True, + "done_reason": "stop", + "prompt_eval_count": 10, + "eval_count": 20, + } + + # Generate appropriate response based on request type + if has_tools: + # Mock tool call response + base_response["message"] = { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "function": { + "name": "sample_tool", + "arguments": '{"query": "test"}', + } + } + ], + } + elif is_reasoning_request: + # Mock response with reasoning content block + base_response["message"] = { + "role": "assistant", + "content": "The answer is 4.", + "thinking": "Let me think step by step: 2 + 2 = 4", + } + else: + # Regular text response + base_response["message"] = { + "role": "assistant", + "content": "Test response", + } + + return iter([base_response]) + + async def mock_async_chat_iterator( + *args: Any, **kwargs: Any + ) -> AsyncIterator[dict[str, Any]]: + for item in mock_chat_response(*args, **kwargs): + yield item + + mock_sync_client.chat.side_effect = mock_chat_response + mock_async_client.chat.side_effect = mock_async_chat_iterator + + model_instance = self.chat_model_class(**self.chat_model_params) + yield model_instance + sync_patcher.stop() + async_patcher.stop() + + def test_initialization(self) -> None: + """Test `ChatOllama` initialization.""" + with ( + patch("langchain_ollama.v1.chat_models.base.Client"), + patch("langchain_ollama.v1.chat_models.base.AsyncClient"), + ): + llm = ChatOllama(model=MODEL_NAME) + + assert llm.model == MODEL_NAME + assert llm._llm_type == "chat-ollama-v1" + + def test_chat_params(self) -> None: + """Test `_chat_params()`.""" + with ( + patch("langchain_ollama.v1.chat_models.base.Client"), + patch("langchain_ollama.v1.chat_models.base.AsyncClient"), + ): + llm = ChatOllama(model=MODEL_NAME, temperature=0.7) + + messages: list[MessageV1] = [HumanMessage("Hello")] + + params = llm._chat_params(messages) + + assert params["model"] == MODEL_NAME + assert len(params["messages"]) == 1 + assert params["messages"][0]["role"] == "user" + assert params["messages"][0]["content"] == "Hello" + + # Ensure options carry over + assert params["options"].temperature == 0.7 + + def test_ls_params(self) -> None: + """Test LangSmith parameters.""" + with ( + patch("langchain_ollama.v1.chat_models.base.Client"), + patch("langchain_ollama.v1.chat_models.base.AsyncClient"), + ): + llm = ChatOllama(model=MODEL_NAME, temperature=0.5) + + ls_params = llm._get_ls_params() + + assert ls_params.get("ls_provider") == "ollama" + assert ls_params.get("ls_model_name") == MODEL_NAME + assert ls_params.get("ls_model_type") == "chat" + assert ls_params.get("ls_temperature") == 0.5 + + def test_bind_tools_basic(self) -> None: + """Test basic tool binding functionality.""" + with ( + patch("langchain_ollama.v1.chat_models.base.Client"), + patch("langchain_ollama.v1.chat_models.base.AsyncClient"), + ): + llm = ChatOllama(model=MODEL_NAME) + + def test_tool(query: str) -> str: + """A test tool.""" + return f"Result for: {query}" + + bound_llm = llm.bind_tools([test_tool]) + + # Should return a bound model + assert bound_llm is not None + + +# Missing: `test_arbitrary_roles_accepted_in_chatmessages` +# Not brought over since it would appear that it's just a workaround to `think=True` +# But can be added if needed in the future. + + +@pytest.mark.allow_socket +@patch("langchain_ollama.v1.chat_models.base.validate_model") +@patch("langchain_ollama.v1.chat_models.base.Client") +def test_validate_model_on_init( + mock_client_class: Any, mock_validate_model: Any +) -> None: + """Test that local model presence is validated on initialization when requested.""" + mock_client = MagicMock() + mock_client_class.return_value = mock_client + + # Test that validate_model is called when validate_model_on_init=True + ChatOllama(model=MODEL_NAME, validate_model_on_init=True) + mock_validate_model.assert_called_once() + mock_validate_model.reset_mock() + + # Test that validate_model is NOT called when validate_model_on_init=False + ChatOllama(model=MODEL_NAME, validate_model_on_init=False) + mock_validate_model.assert_not_called() + + # Test that validate_model is NOT called by default + ChatOllama(model=MODEL_NAME) + mock_validate_model.assert_not_called() + + +# Define a dummy raw_tool_call for the function signature +dummy_raw_tool_call = { + "function": {"name": "test_func", "arguments": ""}, +} + + +@pytest.mark.parametrize( + "input_string, expected_output", + [ + # Case 1: Standard double-quoted JSON + ('{"key": "value", "number": 123}', {"key": "value", "number": 123}), + # Case 2: Single-quoted string (the original bug) + ("{'key': 'value', 'number': 123}", {"key": "value", "number": 123}), + # Case 3: String with an internal apostrophe + ('{"text": "It\'s a great test!"}', {"text": "It's a great test!"}), + # Case 4: Mixed quotes that ast can handle + ("{'text': \"It's a great test!\"}", {"text": "It's a great test!"}), + ], +) +def test_parse_json_string_success_cases( + input_string: str, expected_output: Any +) -> None: + """Tests that `_parse_json_string` correctly parses valid and fixable strings.""" + raw_tool_call = {"function": {"name": "test_func", "arguments": input_string}} + result = _parse_json_string(input_string, raw_tool_call=raw_tool_call, skip=False) + assert result == expected_output + + +def test_parse_json_string_failure_case_raises_exception() -> None: + """Tests that `_parse_json_string` raises an exception for malformed strings.""" + malformed_string = "{'key': 'value',,}" + raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}} + with pytest.raises(OutputParserException): + _parse_json_string( + malformed_string, + raw_tool_call=raw_tool_call, + skip=False, + ) + + +def test_parse_json_string_skip_returns_input_on_failure() -> None: + """Tests that `skip=True` returns the original string on parse failure.""" + malformed_string = "{'not': valid,,,}" + raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}} + result = _parse_json_string( + malformed_string, + raw_tool_call=raw_tool_call, + skip=True, + ) + assert result == malformed_string + + +def test__parse_arguments_from_tool_call() -> None: + """Test that string arguments are preserved as strings in tool call parsing. + + This test verifies the fix for PR #30154 which addressed an issue where + string-typed tool arguments (like IDs or long strings) were being incorrectly + processed. The parser should preserve string values as strings rather than + attempting to parse them as JSON when they're already valid string arguments. + + The test uses a long string ID to ensure string arguments maintain their + original type after parsing, which is critical for tools expecting string inputs. + """ + raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501 + raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"] + response = _parse_arguments_from_tool_call(raw_tool_calls[0]) + assert response is not None + assert isinstance(response["arg_1"], str) + + +@pytest.mark.allow_socket +def test_load_response_with_empty_content_is_skipped( + caplog: pytest.LogCaptureFixture, +) -> None: + """Test that load responses with empty content log a warning and are skipped.""" + load_only_response = [ + { + "model": "test-model", + "created_at": "2025-01-01T00:00:00.000000000Z", + "done": True, + "done_reason": "load", + "message": {"role": "assistant", "content": ""}, + } + ] + + with patch("langchain_ollama.v1.chat_models.base.Client") as mock_client_class: + mock_client = MagicMock() + mock_client_class.return_value = mock_client + mock_client.chat.return_value = iter(load_only_response) + + llm = ChatOllama(model="test-model", streaming=True) + + with ( + caplog.at_level(logging.WARNING), + pytest.raises(ValueError, match="No generations found in stream"), + ): + llm.invoke([HumanMessage("Hello")]) + + assert "Ollama returned empty response with `done_reason='load'`" in caplog.text + + +@pytest.mark.allow_socket +def test_load_response_with_whitespace_content_is_skipped( + caplog: pytest.LogCaptureFixture, +) -> None: + """Test load responses w/ only whitespace content log a warning and are skipped.""" + load_whitespace_response = [ + { + "model": "test-model", + "created_at": "2025-01-01T00:00:00.000000000Z", + "done": True, + "done_reason": "load", + "message": {"role": "assistant", "content": " \n \t "}, + } + ] + + with patch("langchain_ollama.v1.chat_models.base.Client") as mock_client_class: + mock_client = MagicMock() + mock_client_class.return_value = mock_client + mock_client.chat.return_value = iter(load_whitespace_response) + + llm = ChatOllama(model="test-model", streaming=True) + + with ( + caplog.at_level(logging.WARNING), + pytest.raises(ValueError, match="No generations found in stream"), + ): + llm.invoke([HumanMessage("Hello")]) + assert "Ollama returned empty response with `done_reason='load'`" in caplog.text + + +@pytest.mark.allow_socket +def test_load_followed_by_content_response( + caplog: pytest.LogCaptureFixture, +) -> None: + """Test load responses log a warning and are skipped when followed by content.""" + load_then_content_response = [ + { + "model": "test-model", + "created_at": "2025-01-01T00:00:00.000000000Z", + "done": True, + "done_reason": "load", + "message": {"role": "assistant", "content": ""}, + }, + { + "model": "test-model", + "created_at": "2025-01-01T00:00:01.000000000Z", + "done": True, + "done_reason": "stop", + "message": { + "role": "assistant", + "content": "Hello! How can I help you today?", + }, + }, + ] + + with patch("langchain_ollama.v1.chat_models.base.Client") as mock_client_class: + mock_client = MagicMock() + mock_client_class.return_value = mock_client + mock_client.chat.return_value = iter(load_then_content_response) + + llm = ChatOllama(model="test-model", streaming=True) + + with caplog.at_level(logging.WARNING): + result = llm.invoke([HumanMessage("Hello")]) + + assert "Ollama returned empty response with `done_reason='load'`" in caplog.text + assert len(result.content) == 1 + assert result.text == "Hello! How can I help you today?" + assert result.response_metadata.get("done_reason") == "stop" + + +@pytest.mark.allow_socket +def test_load_response_with_actual_content_is_not_skipped( + caplog: pytest.LogCaptureFixture, +) -> None: + """Test load responses with actual content are NOT skipped and log no warning.""" + load_with_content_response = [ + { + "model": "test-model", + "created_at": "2025-01-01T00:00:00.000000000Z", + "done": True, + "done_reason": "load", + "message": {"role": "assistant", "content": "This is actual content"}, + } + ] + + with patch("langchain_ollama.v1.chat_models.base.Client") as mock_client_class: + mock_client = MagicMock() + mock_client_class.return_value = mock_client + mock_client.chat.return_value = iter(load_with_content_response) + + llm = ChatOllama(model="test-model", streaming=True) + + with caplog.at_level(logging.WARNING): + result = llm.invoke([HumanMessage("Hello")]) + + assert len(result.content) == 1 + assert result.text == "This is actual content" + assert result.response_metadata.get("done_reason") == "load" + assert not caplog.text diff --git a/libs/partners/ollama/tests/unit_tests/v1/test_imports.py b/libs/partners/ollama/tests/unit_tests/v1/test_imports.py new file mode 100644 index 00000000000..e2965ed769c --- /dev/null +++ b/libs/partners/ollama/tests/unit_tests/v1/test_imports.py @@ -0,0 +1,9 @@ +from langchain_ollama.v1 import __all__ + +EXPECTED_ALL = [ + "ChatOllama", +] + + +def test_all_imports() -> None: + assert sorted(EXPECTED_ALL) == sorted(__all__) diff --git a/libs/partners/ollama/uv.lock b/libs/partners/ollama/uv.lock index bb34f95c883..aaec31a6c26 100644 --- a/libs/partners/ollama/uv.lock +++ b/libs/partners/ollama/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.9" resolution-markers = [ "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", @@ -305,7 +305,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.70" +version = "0.4.0.dev0" source = { editable = "../../core" } dependencies = [ { name = "jsonpatch" }, @@ -1022,113 +1022,126 @@ wheels = [ [[package]] name = "pydantic" -version = "2.10.6" +version = "2.11.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-types" }, { name = "pydantic-core" }, { name = "typing-extensions" }, + { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681, upload-time = "2025-01-24T01:42:12.693Z" } +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696, upload-time = "2025-01-24T01:42:10.371Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, ] [[package]] name = "pydantic-core" -version = "2.27.2" +version = "2.33.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443, upload-time = "2024-12-18T11:31:54.917Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3a/bc/fed5f74b5d802cf9a03e83f60f18864e90e3aed7223adaca5ffb7a8d8d64/pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa", size = 1895938, upload-time = "2024-12-18T11:27:14.406Z" }, - { url = "https://files.pythonhosted.org/packages/71/2a/185aff24ce844e39abb8dd680f4e959f0006944f4a8a0ea372d9f9ae2e53/pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c", size = 1815684, upload-time = "2024-12-18T11:27:16.489Z" }, - { url = "https://files.pythonhosted.org/packages/c3/43/fafabd3d94d159d4f1ed62e383e264f146a17dd4d48453319fd782e7979e/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a", size = 1829169, upload-time = "2024-12-18T11:27:22.16Z" }, - { url = "https://files.pythonhosted.org/packages/a2/d1/f2dfe1a2a637ce6800b799aa086d079998959f6f1215eb4497966efd2274/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5", size = 1867227, upload-time = "2024-12-18T11:27:25.097Z" }, - { url = "https://files.pythonhosted.org/packages/7d/39/e06fcbcc1c785daa3160ccf6c1c38fea31f5754b756e34b65f74e99780b5/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c", size = 2037695, upload-time = "2024-12-18T11:27:28.656Z" }, - { url = "https://files.pythonhosted.org/packages/7a/67/61291ee98e07f0650eb756d44998214231f50751ba7e13f4f325d95249ab/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7", size = 2741662, upload-time = "2024-12-18T11:27:30.798Z" }, - { url = "https://files.pythonhosted.org/packages/32/90/3b15e31b88ca39e9e626630b4c4a1f5a0dfd09076366f4219429e6786076/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a", size = 1993370, upload-time = "2024-12-18T11:27:33.692Z" }, - { url = "https://files.pythonhosted.org/packages/ff/83/c06d333ee3a67e2e13e07794995c1535565132940715931c1c43bfc85b11/pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236", size = 1996813, upload-time = "2024-12-18T11:27:37.111Z" }, - { url = "https://files.pythonhosted.org/packages/7c/f7/89be1c8deb6e22618a74f0ca0d933fdcb8baa254753b26b25ad3acff8f74/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962", size = 2005287, upload-time = "2024-12-18T11:27:40.566Z" }, - { url = "https://files.pythonhosted.org/packages/b7/7d/8eb3e23206c00ef7feee17b83a4ffa0a623eb1a9d382e56e4aa46fd15ff2/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9", size = 2128414, upload-time = "2024-12-18T11:27:43.757Z" }, - { url = "https://files.pythonhosted.org/packages/4e/99/fe80f3ff8dd71a3ea15763878d464476e6cb0a2db95ff1c5c554133b6b83/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af", size = 2155301, upload-time = "2024-12-18T11:27:47.36Z" }, - { url = "https://files.pythonhosted.org/packages/2b/a3/e50460b9a5789ca1451b70d4f52546fa9e2b420ba3bfa6100105c0559238/pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4", size = 1816685, upload-time = "2024-12-18T11:27:50.508Z" }, - { url = "https://files.pythonhosted.org/packages/57/4c/a8838731cb0f2c2a39d3535376466de6049034d7b239c0202a64aaa05533/pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31", size = 1982876, upload-time = "2024-12-18T11:27:53.54Z" }, - { url = "https://files.pythonhosted.org/packages/c2/89/f3450af9d09d44eea1f2c369f49e8f181d742f28220f88cc4dfaae91ea6e/pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc", size = 1893421, upload-time = "2024-12-18T11:27:55.409Z" }, - { url = "https://files.pythonhosted.org/packages/9e/e3/71fe85af2021f3f386da42d291412e5baf6ce7716bd7101ea49c810eda90/pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7", size = 1814998, upload-time = "2024-12-18T11:27:57.252Z" }, - { url = "https://files.pythonhosted.org/packages/a6/3c/724039e0d848fd69dbf5806894e26479577316c6f0f112bacaf67aa889ac/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15", size = 1826167, upload-time = "2024-12-18T11:27:59.146Z" }, - { url = "https://files.pythonhosted.org/packages/2b/5b/1b29e8c1fb5f3199a9a57c1452004ff39f494bbe9bdbe9a81e18172e40d3/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306", size = 1865071, upload-time = "2024-12-18T11:28:02.625Z" }, - { url = "https://files.pythonhosted.org/packages/89/6c/3985203863d76bb7d7266e36970d7e3b6385148c18a68cc8915fd8c84d57/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99", size = 2036244, upload-time = "2024-12-18T11:28:04.442Z" }, - { url = "https://files.pythonhosted.org/packages/0e/41/f15316858a246b5d723f7d7f599f79e37493b2e84bfc789e58d88c209f8a/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459", size = 2737470, upload-time = "2024-12-18T11:28:07.679Z" }, - { url = "https://files.pythonhosted.org/packages/a8/7c/b860618c25678bbd6d1d99dbdfdf0510ccb50790099b963ff78a124b754f/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048", size = 1992291, upload-time = "2024-12-18T11:28:10.297Z" }, - { url = "https://files.pythonhosted.org/packages/bf/73/42c3742a391eccbeab39f15213ecda3104ae8682ba3c0c28069fbcb8c10d/pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d", size = 1994613, upload-time = "2024-12-18T11:28:13.362Z" }, - { url = "https://files.pythonhosted.org/packages/94/7a/941e89096d1175d56f59340f3a8ebaf20762fef222c298ea96d36a6328c5/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b", size = 2002355, upload-time = "2024-12-18T11:28:16.587Z" }, - { url = "https://files.pythonhosted.org/packages/6e/95/2359937a73d49e336a5a19848713555605d4d8d6940c3ec6c6c0ca4dcf25/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474", size = 2126661, upload-time = "2024-12-18T11:28:18.407Z" }, - { url = "https://files.pythonhosted.org/packages/2b/4c/ca02b7bdb6012a1adef21a50625b14f43ed4d11f1fc237f9d7490aa5078c/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6", size = 2153261, upload-time = "2024-12-18T11:28:21.471Z" }, - { url = "https://files.pythonhosted.org/packages/72/9d/a241db83f973049a1092a079272ffe2e3e82e98561ef6214ab53fe53b1c7/pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c", size = 1812361, upload-time = "2024-12-18T11:28:23.53Z" }, - { url = "https://files.pythonhosted.org/packages/e8/ef/013f07248041b74abd48a385e2110aa3a9bbfef0fbd97d4e6d07d2f5b89a/pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc", size = 1982484, upload-time = "2024-12-18T11:28:25.391Z" }, - { url = "https://files.pythonhosted.org/packages/10/1c/16b3a3e3398fd29dca77cea0a1d998d6bde3902fa2706985191e2313cc76/pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4", size = 1867102, upload-time = "2024-12-18T11:28:28.593Z" }, - { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127, upload-time = "2024-12-18T11:28:30.346Z" }, - { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340, upload-time = "2024-12-18T11:28:32.521Z" }, - { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900, upload-time = "2024-12-18T11:28:34.507Z" }, - { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177, upload-time = "2024-12-18T11:28:36.488Z" }, - { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046, upload-time = "2024-12-18T11:28:39.409Z" }, - { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386, upload-time = "2024-12-18T11:28:41.221Z" }, - { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060, upload-time = "2024-12-18T11:28:44.709Z" }, - { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870, upload-time = "2024-12-18T11:28:46.839Z" }, - { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822, upload-time = "2024-12-18T11:28:48.896Z" }, - { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364, upload-time = "2024-12-18T11:28:50.755Z" }, - { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303, upload-time = "2024-12-18T11:28:54.122Z" }, - { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064, upload-time = "2024-12-18T11:28:56.074Z" }, - { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046, upload-time = "2024-12-18T11:28:58.107Z" }, - { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092, upload-time = "2024-12-18T11:29:01.335Z" }, - { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709, upload-time = "2024-12-18T11:29:03.193Z" }, - { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273, upload-time = "2024-12-18T11:29:05.306Z" }, - { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027, upload-time = "2024-12-18T11:29:07.294Z" }, - { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888, upload-time = "2024-12-18T11:29:09.249Z" }, - { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738, upload-time = "2024-12-18T11:29:11.23Z" }, - { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138, upload-time = "2024-12-18T11:29:16.396Z" }, - { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025, upload-time = "2024-12-18T11:29:20.25Z" }, - { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633, upload-time = "2024-12-18T11:29:23.877Z" }, - { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404, upload-time = "2024-12-18T11:29:25.872Z" }, - { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130, upload-time = "2024-12-18T11:29:29.252Z" }, - { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946, upload-time = "2024-12-18T11:29:31.338Z" }, - { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387, upload-time = "2024-12-18T11:29:33.481Z" }, - { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453, upload-time = "2024-12-18T11:29:35.533Z" }, - { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186, upload-time = "2024-12-18T11:29:37.649Z" }, - { url = "https://files.pythonhosted.org/packages/27/97/3aef1ddb65c5ccd6eda9050036c956ff6ecbfe66cb7eb40f280f121a5bb0/pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993", size = 1896475, upload-time = "2024-12-18T11:30:18.316Z" }, - { url = "https://files.pythonhosted.org/packages/ad/d3/5668da70e373c9904ed2f372cb52c0b996426f302e0dee2e65634c92007d/pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308", size = 1772279, upload-time = "2024-12-18T11:30:20.547Z" }, - { url = "https://files.pythonhosted.org/packages/8a/9e/e44b8cb0edf04a2f0a1f6425a65ee089c1d6f9c4c2dcab0209127b6fdfc2/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4", size = 1829112, upload-time = "2024-12-18T11:30:23.255Z" }, - { url = "https://files.pythonhosted.org/packages/1c/90/1160d7ac700102effe11616e8119e268770f2a2aa5afb935f3ee6832987d/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf", size = 1866780, upload-time = "2024-12-18T11:30:25.742Z" }, - { url = "https://files.pythonhosted.org/packages/ee/33/13983426df09a36d22c15980008f8d9c77674fc319351813b5a2739b70f3/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76", size = 2037943, upload-time = "2024-12-18T11:30:28.036Z" }, - { url = "https://files.pythonhosted.org/packages/01/d7/ced164e376f6747e9158c89988c293cd524ab8d215ae4e185e9929655d5c/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118", size = 2740492, upload-time = "2024-12-18T11:30:30.412Z" }, - { url = "https://files.pythonhosted.org/packages/8b/1f/3dc6e769d5b7461040778816aab2b00422427bcaa4b56cc89e9c653b2605/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630", size = 1995714, upload-time = "2024-12-18T11:30:34.358Z" }, - { url = "https://files.pythonhosted.org/packages/07/d7/a0bd09bc39283530b3f7c27033a814ef254ba3bd0b5cfd040b7abf1fe5da/pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54", size = 1997163, upload-time = "2024-12-18T11:30:37.979Z" }, - { url = "https://files.pythonhosted.org/packages/2d/bb/2db4ad1762e1c5699d9b857eeb41959191980de6feb054e70f93085e1bcd/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f", size = 2005217, upload-time = "2024-12-18T11:30:40.367Z" }, - { url = "https://files.pythonhosted.org/packages/53/5f/23a5a3e7b8403f8dd8fc8a6f8b49f6b55c7d715b77dcf1f8ae919eeb5628/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362", size = 2127899, upload-time = "2024-12-18T11:30:42.737Z" }, - { url = "https://files.pythonhosted.org/packages/c2/ae/aa38bb8dd3d89c2f1d8362dd890ee8f3b967330821d03bbe08fa01ce3766/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96", size = 2155726, upload-time = "2024-12-18T11:30:45.279Z" }, - { url = "https://files.pythonhosted.org/packages/98/61/4f784608cc9e98f70839187117ce840480f768fed5d386f924074bf6213c/pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e", size = 1817219, upload-time = "2024-12-18T11:30:47.718Z" }, - { url = "https://files.pythonhosted.org/packages/57/82/bb16a68e4a1a858bb3768c2c8f1ff8d8978014e16598f001ea29a25bf1d1/pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67", size = 1985382, upload-time = "2024-12-18T11:30:51.871Z" }, - { url = "https://files.pythonhosted.org/packages/46/72/af70981a341500419e67d5cb45abe552a7c74b66326ac8877588488da1ac/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e", size = 1891159, upload-time = "2024-12-18T11:30:54.382Z" }, - { url = "https://files.pythonhosted.org/packages/ad/3d/c5913cccdef93e0a6a95c2d057d2c2cba347815c845cda79ddd3c0f5e17d/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8", size = 1768331, upload-time = "2024-12-18T11:30:58.178Z" }, - { url = "https://files.pythonhosted.org/packages/f6/f0/a3ae8fbee269e4934f14e2e0e00928f9346c5943174f2811193113e58252/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3", size = 1822467, upload-time = "2024-12-18T11:31:00.6Z" }, - { url = "https://files.pythonhosted.org/packages/d7/7a/7bbf241a04e9f9ea24cd5874354a83526d639b02674648af3f350554276c/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f", size = 1979797, upload-time = "2024-12-18T11:31:07.243Z" }, - { url = "https://files.pythonhosted.org/packages/4f/5f/4784c6107731f89e0005a92ecb8a2efeafdb55eb992b8e9d0a2be5199335/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133", size = 1987839, upload-time = "2024-12-18T11:31:09.775Z" }, - { url = "https://files.pythonhosted.org/packages/6d/a7/61246562b651dff00de86a5f01b6e4befb518df314c54dec187a78d81c84/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc", size = 1998861, upload-time = "2024-12-18T11:31:13.469Z" }, - { url = "https://files.pythonhosted.org/packages/86/aa/837821ecf0c022bbb74ca132e117c358321e72e7f9702d1b6a03758545e2/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50", size = 2116582, upload-time = "2024-12-18T11:31:17.423Z" }, - { url = "https://files.pythonhosted.org/packages/81/b0/5e74656e95623cbaa0a6278d16cf15e10a51f6002e3ec126541e95c29ea3/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9", size = 2151985, upload-time = "2024-12-18T11:31:19.901Z" }, - { url = "https://files.pythonhosted.org/packages/63/37/3e32eeb2a451fddaa3898e2163746b0cffbbdbb4740d38372db0490d67f3/pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151", size = 2004715, upload-time = "2024-12-18T11:31:22.821Z" }, - { url = "https://files.pythonhosted.org/packages/29/0e/dcaea00c9dbd0348b723cae82b0e0c122e0fa2b43fa933e1622fd237a3ee/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656", size = 1891733, upload-time = "2024-12-18T11:31:26.876Z" }, - { url = "https://files.pythonhosted.org/packages/86/d3/e797bba8860ce650272bda6383a9d8cad1d1c9a75a640c9d0e848076f85e/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278", size = 1768375, upload-time = "2024-12-18T11:31:29.276Z" }, - { url = "https://files.pythonhosted.org/packages/41/f7/f847b15fb14978ca2b30262548f5fc4872b2724e90f116393eb69008299d/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb", size = 1822307, upload-time = "2024-12-18T11:31:33.123Z" }, - { url = "https://files.pythonhosted.org/packages/9c/63/ed80ec8255b587b2f108e514dc03eed1546cd00f0af281e699797f373f38/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd", size = 1979971, upload-time = "2024-12-18T11:31:35.755Z" }, - { url = "https://files.pythonhosted.org/packages/a9/6d/6d18308a45454a0de0e975d70171cadaf454bc7a0bf86b9c7688e313f0bb/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc", size = 1987616, upload-time = "2024-12-18T11:31:38.534Z" }, - { url = "https://files.pythonhosted.org/packages/82/8a/05f8780f2c1081b800a7ca54c1971e291c2d07d1a50fb23c7e4aef4ed403/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b", size = 1998943, upload-time = "2024-12-18T11:31:41.853Z" }, - { url = "https://files.pythonhosted.org/packages/5e/3e/fe5b6613d9e4c0038434396b46c5303f5ade871166900b357ada4766c5b7/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b", size = 2116654, upload-time = "2024-12-18T11:31:44.756Z" }, - { url = "https://files.pythonhosted.org/packages/db/ad/28869f58938fad8cc84739c4e592989730bfb69b7c90a8fff138dff18e1e/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2", size = 2152292, upload-time = "2024-12-18T11:31:48.613Z" }, - { url = "https://files.pythonhosted.org/packages/a1/0c/c5c5cd3689c32ed1fe8c5d234b079c12c281c051759770c05b8bed6412b5/pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35", size = 2004961, upload-time = "2024-12-18T11:31:52.446Z" }, + { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" }, + { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" }, + { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" }, + { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" }, + { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" }, + { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" }, + { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" }, + { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" }, + { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" }, + { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" }, + { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" }, + { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" }, + { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" }, + { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" }, + { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" }, + { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" }, + { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" }, + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, + { url = "https://files.pythonhosted.org/packages/53/ea/bbe9095cdd771987d13c82d104a9c8559ae9aec1e29f139e286fd2e9256e/pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d", size = 2028677, upload-time = "2025-04-23T18:32:27.227Z" }, + { url = "https://files.pythonhosted.org/packages/49/1d/4ac5ed228078737d457a609013e8f7edc64adc37b91d619ea965758369e5/pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954", size = 1864735, upload-time = "2025-04-23T18:32:29.019Z" }, + { url = "https://files.pythonhosted.org/packages/23/9a/2e70d6388d7cda488ae38f57bc2f7b03ee442fbcf0d75d848304ac7e405b/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb", size = 1898467, upload-time = "2025-04-23T18:32:31.119Z" }, + { url = "https://files.pythonhosted.org/packages/ff/2e/1568934feb43370c1ffb78a77f0baaa5a8b6897513e7a91051af707ffdc4/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7", size = 1983041, upload-time = "2025-04-23T18:32:33.655Z" }, + { url = "https://files.pythonhosted.org/packages/01/1a/1a1118f38ab64eac2f6269eb8c120ab915be30e387bb561e3af904b12499/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4", size = 2136503, upload-time = "2025-04-23T18:32:35.519Z" }, + { url = "https://files.pythonhosted.org/packages/5c/da/44754d1d7ae0f22d6d3ce6c6b1486fc07ac2c524ed8f6eca636e2e1ee49b/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b", size = 2736079, upload-time = "2025-04-23T18:32:37.659Z" }, + { url = "https://files.pythonhosted.org/packages/4d/98/f43cd89172220ec5aa86654967b22d862146bc4d736b1350b4c41e7c9c03/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3", size = 2006508, upload-time = "2025-04-23T18:32:39.637Z" }, + { url = "https://files.pythonhosted.org/packages/2b/cc/f77e8e242171d2158309f830f7d5d07e0531b756106f36bc18712dc439df/pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a", size = 2113693, upload-time = "2025-04-23T18:32:41.818Z" }, + { url = "https://files.pythonhosted.org/packages/54/7a/7be6a7bd43e0a47c147ba7fbf124fe8aaf1200bc587da925509641113b2d/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782", size = 2074224, upload-time = "2025-04-23T18:32:44.033Z" }, + { url = "https://files.pythonhosted.org/packages/2a/07/31cf8fadffbb03be1cb520850e00a8490c0927ec456e8293cafda0726184/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9", size = 2245403, upload-time = "2025-04-23T18:32:45.836Z" }, + { url = "https://files.pythonhosted.org/packages/b6/8d/bbaf4c6721b668d44f01861f297eb01c9b35f612f6b8e14173cb204e6240/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e", size = 2242331, upload-time = "2025-04-23T18:32:47.618Z" }, + { url = "https://files.pythonhosted.org/packages/bb/93/3cc157026bca8f5006250e74515119fcaa6d6858aceee8f67ab6dc548c16/pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9", size = 1910571, upload-time = "2025-04-23T18:32:49.401Z" }, + { url = "https://files.pythonhosted.org/packages/5b/90/7edc3b2a0d9f0dda8806c04e511a67b0b7a41d2187e2003673a996fb4310/pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3", size = 1956504, upload-time = "2025-04-23T18:32:51.287Z" }, + { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" }, + { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" }, + { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" }, + { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" }, + { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" }, + { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" }, + { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" }, + { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" }, + { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" }, + { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" }, + { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" }, + { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" }, + { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" }, + { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" }, + { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" }, + { url = "https://files.pythonhosted.org/packages/08/98/dbf3fdfabaf81cda5622154fda78ea9965ac467e3239078e0dcd6df159e7/pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101", size = 2024034, upload-time = "2025-04-23T18:33:32.843Z" }, + { url = "https://files.pythonhosted.org/packages/8d/99/7810aa9256e7f2ccd492590f86b79d370df1e9292f1f80b000b6a75bd2fb/pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64", size = 1858578, upload-time = "2025-04-23T18:33:34.912Z" }, + { url = "https://files.pythonhosted.org/packages/d8/60/bc06fa9027c7006cc6dd21e48dbf39076dc39d9abbaf718a1604973a9670/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d", size = 1892858, upload-time = "2025-04-23T18:33:36.933Z" }, + { url = "https://files.pythonhosted.org/packages/f2/40/9d03997d9518816c68b4dfccb88969756b9146031b61cd37f781c74c9b6a/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535", size = 2068498, upload-time = "2025-04-23T18:33:38.997Z" }, + { url = "https://files.pythonhosted.org/packages/d8/62/d490198d05d2d86672dc269f52579cad7261ced64c2df213d5c16e0aecb1/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d", size = 2108428, upload-time = "2025-04-23T18:33:41.18Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ec/4cd215534fd10b8549015f12ea650a1a973da20ce46430b68fc3185573e8/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6", size = 2069854, upload-time = "2025-04-23T18:33:43.446Z" }, + { url = "https://files.pythonhosted.org/packages/1a/1a/abbd63d47e1d9b0d632fee6bb15785d0889c8a6e0a6c3b5a8e28ac1ec5d2/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca", size = 2237859, upload-time = "2025-04-23T18:33:45.56Z" }, + { url = "https://files.pythonhosted.org/packages/80/1c/fa883643429908b1c90598fd2642af8839efd1d835b65af1f75fba4d94fe/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039", size = 2239059, upload-time = "2025-04-23T18:33:47.735Z" }, + { url = "https://files.pythonhosted.org/packages/d4/29/3cade8a924a61f60ccfa10842f75eb12787e1440e2b8660ceffeb26685e7/pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27", size = 2066661, upload-time = "2025-04-23T18:33:49.995Z" }, ] [[package]] @@ -1443,6 +1456,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, ] +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, +] + [[package]] name = "urllib3" version = "1.26.20" diff --git a/libs/standard-tests/QUICK_START.md b/libs/standard-tests/QUICK_START.md new file mode 100644 index 00000000000..e826638e2c4 --- /dev/null +++ b/libs/standard-tests/QUICK_START.md @@ -0,0 +1,446 @@ +# Standard Tests V1 - Quick Start Guide + +This guide shows you how to quickly get started with the new content blocks v1 test suite. + +## 🚀 Quick Usage + +### 1. Basic Setup + +New imports: + +```python +# v0 +from langchain_tests.unit_tests.chat_models import ChatModelUnitTests + +# v1 +from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1UnitTests +``` + +### 2. Minimal Configuration + +```python +class TestMyChatModelV1(ChatModelV1UnitTests): + @property + def chat_model_class(self): + return MyChatModelV1 + + # Enable content blocks support + @property + def supports_content_blocks_v1(self): + return True + + # The rest should be the same + @property + def chat_model_params(self): + return {"api_key": "test-key"} +``` + +### 3. Run Tests + +```bash +uv run --group test pytest tests/unit_tests/test_my_model_v1.py -v +``` + +## ⚙️ Feature Configuration + +Like before, only enable the features your model supports: + +```python +class TestAdvancedModelV1(ChatModelV1UnitTests): + # REQUIRED + @property + def supports_content_blocks_v1(self): + return True + + # Multimodal features + @property + def supports_image_content_blocks(self): + return True # ✅ Enable if supported + + @property + def supports_video_content_blocks(self): + return False # ❌ Disable if not supported, but will default to False if not explicitly set + + # Advanced features + @property + def supports_reasoning_content_blocks(self): + """Model generates reasoning steps""" + return True + + @property + def supports_citations(self): + """Model provides source citations""" + return True + + @property + def supports_tool_calls(self): + """Tool calling with metadata""" + return True +``` + +## 📋 Feature Reference + +| Property | Description | Default | +|----------|-------------|---------| +| `supports_content_blocks_v1` | Core content blocks support | `True` | +| `supports_text_content_blocks` | Basic text blocks | `True` | +| `supports_image_content_blocks` | Image content blocks (v1) | `False` | +| `supports_video_content_blocks` | Video content blocks (v1) | `False` | +| `supports_audio_content_blocks` | Audio content blocks (v1) | `False` | +| `supports_file_content_blocks` | File content blocks | `False` | +| `supports_reasoning_content_blocks` | Reasoning/thinking blocks | `False` | +| `supports_citations` | Citation annotations | `False` | +| `supports_web_search_blocks` | Web search integration | `False` | +| `supports_enhanced_tool_calls` | Tool calling | `False` | +| `supports_non_standard_blocks` | Custom content blocks | `True` | + +**Note:** These defaults are provided by the base test class. You only need to override properties where your model's capabilities differ from the default. + +## 🔧 Common Patterns + +### For Text-Only Models + +```python +@property +def supports_content_blocks_v1(self): + return True + +# All multimodal features inherit False defaults from base class +# No need to override them unless your model supports them +``` + +### For Multimodal Models + +Set the v1 content block features that your model supports: + +- `supports_image_content_blocks` +- `supports_video_content_blocks` +- `supports_audio_content_blocks` + +### For Advanced AI Models + +Set the features that your model supports, including reasoning and citations: + +- `supports_reasoning_content_blocks` +- `supports_citations` +- `supports_web_search_blocks` + +## 🚨 Troubleshooting + +### Tests Failing? + +1. **Check feature flags** - Only enable what your model actually supports +2. **Verify API keys** - Integration tests may need credentials +3. **Check model parameters** - Make sure initialization params are correct + +### Tests Skipping? + +This is normal! Tests skip automatically when features aren't supported. Only tests for enabled features will run. + +## 🏃‍♂️ Migration Checklist + +- [ ] Update test base class imports +- [ ] Add `supports_content_blocks_v1 = True` +- [ ] Configure feature flags based on model capabilities +- [ ] Run tests to verify configuration +- [ ] Adjust any failing/skipping tests as needed + +## 📚 Next Steps + +- Read `README_V1.md` for complete feature documentation +- Look at `tests/unit_tests/test_chat_models_v1.py` for working examples + +# Example Files + +## Unit Tests + +```python +"""Example test implementation using ``ChatModelV1UnitTests``. + +This file demonstrates how to use the new content blocks v1 test suite +for testing chat models that support the enhanced content blocks system. +""" + +from typing import Any + +from langchain_core.language_models.v1.chat_models import BaseChatModelV1 +from langchain_core.language_models import GenericFakeChatModel +from langchain_core.messages import BaseMessage +from langchain_core.messages.content_blocks import TextContentBlock + +from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1UnitTests + + +class FakeChatModelV1(GenericFakeChatModel): + """Fake chat model that supports content blocks v1 format. + + This is a test implementation that demonstrates content blocks support. + """ + + def _call(self, messages: Any, stop: Any = None, **kwargs: Any) -> BaseMessage: + """Override to handle content blocks format.""" + # Process messages and handle content blocks + response = super()._call(messages, stop, **kwargs) + + # Convert response to content blocks format if needed + if isinstance(response.content, str): + # Convert string response to TextContentBlock format + from langchain_core.messages import AIMessage + + text_block: TextContentBlock = {"type": "text", "text": response.content} + return AIMessage(content=[text_block]) + + return response + + +class TestFakeChatModelV1(ChatModelV1UnitTests): + """Test implementation using the new content blocks v1 test suite.""" + + @property + def chat_model_class(self) -> type[BaseChatModelV1]: + """Return the fake chat model class for testing.""" + return FakeChatModelV1 + + @property + def chat_model_params(self) -> dict[str, Any]: + """Parameters for initializing the fake chat model.""" + return { + "messages": iter( + [ + "This is a test response with content blocks support.", + "Another test response for validation.", + "Final test response for comprehensive testing.", + ] + ) + } + + # Content blocks v1 support configuration + @property + def supports_content_blocks_v1(self) -> bool: + """This fake model supports content blocks v1.""" + return True + + @property + def supports_text_content_blocks(self) -> bool: + """This fake model supports TextContentBlock.""" + return True + + @property + def supports_reasoning_content_blocks(self) -> bool: + """This fake model does not support ReasoningContentBlock.""" + return False + + @property + def supports_citations(self) -> bool: + """This fake model does not support citations.""" + return False + + @property + def supports_tool_calls(self) -> bool: + """This fake model supports tool calls.""" + return True + + @property + def has_tool_calling(self) -> bool: + """Enable tool calling tests.""" + return True + + @property + def supports_image_content_blocks(self) -> bool: + """This fake model does not support image content blocks.""" + return False + + @property + def supports_non_standard_blocks(self) -> bool: + """This fake model supports non-standard blocks.""" + return True +``` + +## Integration Tests + +```python +"""Example integration test implementation using ChatModelV1IntegrationTests. + +This file demonstrates how to use the new content blocks v1 integration test suite +for testing real chat models that support the enhanced content blocks system. + +Note: This is a template/example. Real implementations should replace +FakeChatModelV1 with actual chat model classes. +""" + +import os +from typing import Any + +import pytest +from langchain_core.language_models import BaseChatModel, GenericFakeChatModel + +from langchain_tests.integration_tests.chat_models_v1 import ChatModelV1IntegrationTests + + +# Example fake model for demonstration (replace with real model in practice) +class FakeChatModelV1Integration(GenericFakeChatModel): + """Fake chat model for integration testing demonstration.""" + + @property + def _llm_type(self) -> str: + return "fake_chat_model_v1_integration" + + +class TestFakeChatModelV1Integration(ChatModelV1IntegrationTests): + """Example integration test using content blocks v1 test suite. + + In practice, this would test a real chat model that supports content blocks. + Replace FakeChatModelV1Integration with your actual chat model class. + """ + + @property + def chat_model_class(self) -> type[BaseChatModel]: + """Return the chat model class to test.""" + return FakeChatModelV1Integration + + @property + def chat_model_params(self) -> dict[str, Any]: + """Parameters for initializing the chat model.""" + return { + "messages": iter( + [ + "Integration test response with content blocks.", + "Multimodal content analysis response.", + "Tool calling response with structured output.", + "Citation-enhanced response with sources.", + "Web search integration response.", + ] + ) + } + + # Content blocks v1 support configuration + @property + def supports_content_blocks_v1(self) -> bool: + """Enable content blocks v1 testing.""" + return True + + @property + def supports_text_content_blocks(self) -> bool: + """Enable TextContentBlock testing.""" + return True + + @property + def supports_reasoning_content_blocks(self) -> bool: + """Disable reasoning blocks for this fake model.""" + return False + + @property + def supports_citations(self) -> bool: + """Disable citations for this fake model.""" + return False + + @property + def supports_web_search_blocks(self) -> bool: + """Disable web search for this fake model.""" + return False + + @property + def supports_tool_calls(self) -> bool: + """Enable tool calling tests.""" + return True + + @property + def has_tool_calling(self) -> bool: + """Enable tool calling tests.""" + return True + + @property + def supports_image_inputs(self) -> bool: + """Disable image inputs for this fake model.""" + return False + + @property + def supports_video_inputs(self) -> bool: + """Disable video inputs for this fake model.""" + return False + + @property + def supports_audio_inputs(self) -> bool: + """Disable audio inputs for this fake model.""" + return False + + @property + def supports_file_content_blocks(self) -> bool: + """Disable file content blocks for this fake model.""" + return False + + @property + def supports_non_standard_blocks(self) -> bool: + """Enable non-standard blocks support.""" + return True + + +# Example of a more realistic integration test configuration +# that would require API keys and external services +class TestRealChatModelV1IntegrationTemplate(ChatModelV1IntegrationTests): + """Template for testing real chat models with content blocks v1. + + This class shows how you would configure tests for a real model + that requires API keys and supports various content block features. + """ + + @pytest.fixture(scope="class", autouse=True) + def check_api_key(self) -> None: + """Check that required API key is available.""" + if not os.getenv("YOUR_MODEL_API_KEY"): + pytest.skip("YOUR_MODEL_API_KEY not set, skipping integration tests") + + @property + def chat_model_class(self) -> type[BaseChatModel]: + """Return your actual chat model class.""" + # Replace with your actual model, e.g.: + # from your_package import YourChatModel + # return YourChatModel + return FakeChatModelV1Integration # Placeholder + + @property + def chat_model_params(self) -> dict[str, Any]: + """Parameters for your actual chat model.""" + return { + # "api_key": os.getenv("YOUR_MODEL_API_KEY"), + # "model": "your-model-name", + # "temperature": 0.1, + # Add your model's specific parameters + } + + # Configure which features your model supports + @property + def supports_content_blocks_v1(self) -> bool: + return True # Set based on your model's capabilities + + @property + def supports_image_inputs(self) -> bool: + return True # Set based on your model's capabilities + + @property + def supports_reasoning_content_blocks(self) -> bool: + return True # Set based on your model's capabilities + + @property + def supports_citations(self) -> bool: + return True # Set based on your model's capabilities + + @property + def supports_web_search_blocks(self) -> bool: + return False # Set based on your model's capabilities + + @property + def supports_enhanced_tool_calls(self) -> bool: + return True # Set based on your model's capabilities + + @property + def has_tool_calling(self) -> bool: + return True # Set based on your model's capabilities + + # Add any model-specific test overrides or skips + @pytest.mark.skip(reason="Template class - not for actual testing") + def test_all_inherited_tests(self) -> None: + """This template class should not run actual tests.""" + pass + +``` diff --git a/libs/standard-tests/README.md b/libs/standard-tests/README.md index 77f6780062a..8355f3c4f23 100644 --- a/libs/standard-tests/README.md +++ b/libs/standard-tests/README.md @@ -14,62 +14,68 @@ also break your CI if we introduce tests that your integration doesn't pass. Pip: - ```bash - pip install -U langchain-tests - ``` +```bash +pip install -U langchain-tests +``` Poetry: - ```bash - poetry add langchain-tests - ``` +```bash +poetry add langchain-tests +``` + +uv: + +```bash +uv add langchain-tests +``` ## Usage -To add standard tests to an integration package's e.g. ChatModel, you need to create +To add standard tests to an integration package (e.g., for a ChatModel), you need to create -1. A unit test class that inherits from ChatModelUnitTests -2. An integration test class that inherits from ChatModelIntegrationTests +1. A unit test class that inherits from `ChatModelUnitTests` +2. An integration test class that inherits from `ChatModelIntegrationTests` `tests/unit_tests/test_standard.py`: - ```python - """Standard LangChain interface tests""" +```python +"""Standard LangChain interface tests""" - from typing import Type +from typing import Type - import pytest - from langchain_core.language_models import BaseChatModel - from langchain_tests.unit_tests import ChatModelUnitTests +import pytest +from langchain_core.language_models import BaseChatModel +from langchain_tests.unit_tests import ChatModelUnitTests - from langchain_parrot_chain import ChatParrotChain +from langchain_parrot_chain import ChatParrotChain - class TestParrotChainStandard(ChatModelUnitTests): - @pytest.fixture - def chat_model_class(self) -> Type[BaseChatModel]: - return ChatParrotChain - ``` +class TestParrotChainStandard(ChatModelUnitTests): + @pytest.fixture + def chat_model_class(self) -> Type[BaseChatModel]: + return ChatParrotChain +``` `tests/integration_tests/test_standard.py`: - ```python - """Standard LangChain interface tests""" +```python +"""Standard LangChain interface tests""" - from typing import Type +from typing import Type - import pytest - from langchain_core.language_models import BaseChatModel - from langchain_tests.integration_tests import ChatModelIntegrationTests +import pytest +from langchain_core.language_models import BaseChatModel +from langchain_tests.integration_tests import ChatModelIntegrationTests - from langchain_parrot_chain import ChatParrotChain +from langchain_parrot_chain import ChatParrotChain - class TestParrotChainStandard(ChatModelIntegrationTests): - @pytest.fixture - def chat_model_class(self) -> Type[BaseChatModel]: - return ChatParrotChain - ``` +class TestParrotChainStandard(ChatModelIntegrationTests): + @pytest.fixture + def chat_model_class(self) -> Type[BaseChatModel]: + return ChatParrotChain +``` ## Reference @@ -80,3 +86,10 @@ as required is optional. - `chat_model_params`: The keyword arguments to pass to the chat model constructor - `chat_model_has_tool_calling`: Whether the chat model can call tools. By default, this is set to `hasattr(chat_model_class, 'bind_tools)` - `chat_model_has_structured_output`: Whether the chat model can structured output. By default, this is set to `hasattr(chat_model_class, 'with_structured_output')` + +## Content Blocks V1 Support + +For chat models that support the new content blocks v1 format (multimodal content, reasoning blocks, citations, etc.), use the v1 test suite instead: + +- See `QUICK_START.md` and `README_V1.md` for v1 testing documentation +- Use `ChatModelV1Tests` from `langchain_tests.unit_tests.chat_models_v1` diff --git a/libs/standard-tests/README_V1.md b/libs/standard-tests/README_V1.md new file mode 100644 index 00000000000..11c653dc3c1 --- /dev/null +++ b/libs/standard-tests/README_V1.md @@ -0,0 +1,166 @@ +# Standard Tests V1 - Content Blocks Support + +## Overview + +The standard tests v1 package provides comprehensive testing for chat models that support the new content blocks format. This includes: + +- **Streaming support**: Content blocks in streaming responses +- **Multimodal content**: `Text`, `Image`, `Video`, `Audio`, and `File` `ContentBlock`s +- **Reasoning content**: Reasoning steps as `ReasoningContentBlock` +- **Provider-specific extensions**: `NonStandardContentBlock` for unique provider features + +## Usage + +### Basic Unit Tests + +```python +from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1UnitTests +from your_package import YourChatModel + +class TestYourChatModelV1(ChatModelV1UnitTests): + @property + def chat_model_class(self): + return YourChatModel + + @property + def chat_model_params(self): + return {"api_key": "test-key", "model": "your-model"} + + # Configure supported features + @property + def supports_content_blocks_v1(self): + return True + + @property + def supports_image_content_blocks(self): + return True + + @property + def supports_reasoning_content_blocks(self): + return True +``` + +### Integration Tests + +```python +from langchain_tests.integration_tests.chat_models_v1 import ChatModelV1IntegrationTests +from your_package import YourChatModel + +class TestYourChatModelV1Integration(ChatModelV1IntegrationTests): + @property + def chat_model_class(self): + return YourChatModel + + @property + def chat_model_params(self): + return { + "api_key": os.getenv("YOUR_API_KEY"), + "model": "your-model-name" + } + + # Configure which features to test + @property + def supports_citations(self): + return True + + @property + def supports_web_search_blocks(self): + return False # If your model doesn't support this +``` + +## Configuration Properties + +### Core Content Blocks Support + +- `supports_content_blocks_v1`: Enable content blocks v1 testing **(required)** +- `supports_text_content_blocks`: `TextContentBlock` support - very unlikely this will be set to `False` +- `supports_reasoning_content_blocks`: `ReasoningContentBlock` support, e.g. for reasoning models + +### Multimodal Support + +- `supports_image_content_blocks`: `ImageContentBlock`s (v1 format) +- `supports_video_content_blocks`: `VideoContentBlock`s (v1 format) +- `supports_audio_content_blocks`: `AudioContentBlock`s (v1 format) +- `supports_plaintext_content_blocks`: `PlainTextContentBlock`s (plaintext from documents) +- `supports_file_content_blocks`: `FileContentBlock`s + +### Tool Calling + +- `supports_tool_calls`: Tool calling with content blocks +- `supports_invalid_tool_calls`: Error handling for invalid tool calls +- `supports_tool_call_chunks`: Streaming tool call support + +### Advanced Features + +- `supports_citations`: Citation annotations +- `supports_web_search_blocks`: Built-in web search +- `supports_code_interpreter`: Code execution blocks +- `supports_non_standard_blocks`: Custom content blocks + +## Test Categories + +### Unit Tests (`ChatModelV1Tests`) + +- Content block format validation +- Ser/deserialization +- Multimodal content handling +- Tool calling with content blocks +- Error handling for invalid blocks +- Backward compatibility with string content + +### Integration Tests (`ChatModelV1IntegrationTests`) + +- Real multimodal content processing +- Advanced reasoning with content blocks +- Citation generation with external sources +- Web search integration +- File processing and analysis +- Performance benchmarking +- Streaming content blocks +- Asynchronous processing + +## Migration from Standard Tests + +### For Test Authors + +1. **Inherit from new base classes**: + + ```python + # v0 + from langchain_tests.unit_tests.chat_models import ChatModelUnitTests + + # v1 + from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1UnitTests + ``` + +2. **Configure content blocks support**: + + ```python + @property + def supports_content_blocks_v1(self): + return True # Enable v1 features + ``` + +3. **Set feature flags** based on your chat model's capabilities + +## Examples + +See the test files in `tests/unit_tests/test_chat_models_v1.py` and `tests/integration_tests/test_chat_models_v1.py` for complete examples of how to implement tests for your chat model. + +## Best Practices + +1. **Start with basic content blocks** (text) and gradually enable advanced features +2. **Test error handling** for unsupported content block types +3. **Validate serialization** to persist message histories (passing back in content blocks) +4. **Test streaming** if your model supports it with content blocks + +## Contributing + +When new content block types or features are added: + +1. Add the content block type to the imports +2. Create test helper methods for the new type +3. Add configuration properties for the feature +4. Implement corresponding test methods +5. Update this documentation +6. Add examples in the test files (`tests/unit_tests/test_chat_models_v1.py` and `tests/integration_tests/test_chat_models_v1.py`) diff --git a/libs/standard-tests/langchain_tests/__init__.py b/libs/standard-tests/langchain_tests/__init__.py index b03553e9cd7..3677a77a697 100644 --- a/libs/standard-tests/langchain_tests/__init__.py +++ b/libs/standard-tests/langchain_tests/__init__.py @@ -3,4 +3,7 @@ To learn how to use these classes, see the `integration standard testing `__ guide. + +This package provides both the original test suites and the v1 test suites that support +the new content blocks system introduced in ``langchain_core.messages.content_blocks``. """ diff --git a/libs/standard-tests/langchain_tests/base.py b/libs/standard-tests/langchain_tests/base.py index bc262b31c19..c54b84055a7 100644 --- a/libs/standard-tests/langchain_tests/base.py +++ b/libs/standard-tests/langchain_tests/base.py @@ -9,7 +9,7 @@ class BaseStandardTests(ABC): :private: """ - # find path to standard test implementations + # Find path to standard test implementations comparison_class = None def explore_bases(cls: type) -> None: diff --git a/libs/standard-tests/langchain_tests/integration_tests/__init__.py b/libs/standard-tests/langchain_tests/integration_tests/__init__.py index fbe4888d1e5..02979aa789f 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/__init__.py +++ b/libs/standard-tests/langchain_tests/integration_tests/__init__.py @@ -20,6 +20,7 @@ for module in modules: from .base_store import BaseStoreAsyncTests, BaseStoreSyncTests from .cache import AsyncCacheTestSuite, SyncCacheTestSuite from .chat_models import ChatModelIntegrationTests +from .chat_models_v1 import ChatModelV1IntegrationTests from .embeddings import EmbeddingsIntegrationTests from .retrievers import RetrieversIntegrationTests from .tools import ToolsIntegrationTests @@ -30,6 +31,7 @@ __all__ = [ "BaseStoreAsyncTests", "BaseStoreSyncTests", "ChatModelIntegrationTests", + "ChatModelV1IntegrationTests", "EmbeddingsIntegrationTests", "RetrieversIntegrationTests", "SyncCacheTestSuite", diff --git a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py index 090b1f6dc73..d6666ed55a6 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py @@ -32,9 +32,7 @@ from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-unt from typing_extensions import TypedDict from vcr.cassette import Cassette -from langchain_tests.unit_tests.chat_models import ( - ChatModelTests, -) +from langchain_tests.unit_tests.chat_models import ChatModelTests from langchain_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION @@ -110,6 +108,7 @@ def magic_function_no_args() -> int: def _validate_tool_call_message(message: BaseMessage) -> None: assert isinstance(message, AIMessage) assert len(message.tool_calls) == 1 + tool_call = message.tool_calls[0] assert tool_call["name"] == "magic_function" assert tool_call["args"] == {"input": 3} @@ -120,6 +119,7 @@ def _validate_tool_call_message(message: BaseMessage) -> None: def _validate_tool_call_message_no_args(message: BaseMessage) -> None: assert isinstance(message, AIMessage) assert len(message.tool_calls) == 1 + tool_call = message.tool_calls[0] assert tool_call["name"] == "magic_function_no_args" assert tool_call["args"] == {} @@ -137,6 +137,7 @@ def unicode_customer(customer_name: str, description: str) -> str: Returns: A confirmation message about the customer creation. + """ return f"Created customer: {customer_name} - {description}" @@ -173,7 +174,7 @@ class ChatModelIntegrationTests(ChatModelTests): API references for individual test methods include troubleshooting tips. - Test subclasses must implement the following two properties: + Test subclasses **must** implement the following two properties: chat_model_class The chat model class to test, e.g., ``ChatParrotLink``. @@ -426,10 +427,10 @@ class ChatModelIntegrationTests(ChatModelTests): .. dropdown:: returns_usage_metadata Boolean property indicating whether the chat model returns usage metadata - on invoke and streaming responses. + on invoke and streaming responses. Defaults to ``True``. - ``usage_metadata`` is an optional dict attribute on AIMessages that track input - and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html + ``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input + and output tokens. `See more. `__ Example: @@ -440,7 +441,7 @@ class ChatModelIntegrationTests(ChatModelTests): return False Models supporting ``usage_metadata`` should also return the name of the - underlying model in the ``response_metadata`` of the AIMessage. + underlying model in the ``response_metadata`` of the ``AIMessage``. .. dropdown:: supports_anthropic_inputs @@ -525,8 +526,8 @@ class ChatModelIntegrationTests(ChatModelTests): Property controlling what usage metadata details are emitted in both invoke and stream. - ``usage_metadata`` is an optional dict attribute on AIMessages that track input - and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html + ``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input + and output tokens. `See more. `__ It includes optional keys ``input_token_details`` and ``output_token_details`` that can track usage details associated with special types of tokens, such as @@ -682,13 +683,13 @@ class ChatModelIntegrationTests(ChatModelTests): return {} def test_invoke(self, model: BaseChatModel) -> None: - """Test to verify that `model.invoke(simple_message)` works. + """Test to verify that ``model.invoke(simple_message)`` works. This should pass for all integrations. .. dropdown:: Troubleshooting - If this test fails, you should make sure your _generate method + If this test fails, you should make sure your ``_generate`` method does not raise any exceptions, and that it returns a valid :class:`~langchain_core.outputs.chat_result.ChatResult` like so: @@ -708,7 +709,7 @@ class ChatModelIntegrationTests(ChatModelTests): assert len(result.content) > 0 async def test_ainvoke(self, model: BaseChatModel) -> None: - """Test to verify that `await model.ainvoke(simple_message)` works. + """Test to verify that ``await model.ainvoke(simple_message)`` works. This should pass for all integrations. Passing this test does not indicate a "natively async" implementation, but rather that the model can be used @@ -718,7 +719,7 @@ class ChatModelIntegrationTests(ChatModelTests): First, debug :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`. - because `ainvoke` has a default implementation that calls `invoke` in an + because ``ainvoke`` has a default implementation that calls ``invoke`` in an async context. If that test passes but not this one, you should make sure your _agenerate @@ -741,7 +742,7 @@ class ChatModelIntegrationTests(ChatModelTests): assert len(result.content) > 0 def test_stream(self, model: BaseChatModel) -> None: - """Test to verify that `model.stream(simple_message)` works. + """Test to verify that ``model.stream(simple_message)`` works. This should pass for all integrations. Passing this test does not indicate a "streaming" implementation, but rather that the model can be used in a @@ -751,10 +752,10 @@ class ChatModelIntegrationTests(ChatModelTests): First, debug :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`. - because `stream` has a default implementation that calls `invoke` and yields - the result as a single chunk. + because ``stream`` has a default implementation that calls ``invoke`` and + yields the result as a single chunk. - If that test passes but not this one, you should make sure your _stream + If that test passes but not this one, you should make sure your ``_stream`` method does not raise any exceptions, and that it yields valid :class:`~langchain_core.outputs.chat_generation.ChatGenerationChunk` objects like so: @@ -770,11 +771,12 @@ class ChatModelIntegrationTests(ChatModelTests): for chunk in model.stream("Hello"): assert chunk is not None assert isinstance(chunk, AIMessageChunk) + assert isinstance(chunk.content, (str, list)) num_chunks += 1 assert num_chunks > 0 async def test_astream(self, model: BaseChatModel) -> None: - """Test to verify that `await model.astream(simple_message)` works. + """Test to verify that ``await model.astream(simple_message)`` works. This should pass for all integrations. Passing this test does not indicate a "natively async" or "streaming" implementation, but rather that the model can @@ -786,11 +788,11 @@ class ChatModelIntegrationTests(ChatModelTests): :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_stream`. and :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke`. - because `astream` has a default implementation that calls `_stream` in an - async context if it is implemented, or `ainvoke` and yields the result as a - single chunk if not. + because ``astream`` has a default implementation that calls ``_stream`` in + an async context if it is implemented, or ``ainvoke`` and yields the result + as a single chunk if not. - If those tests pass but not this one, you should make sure your _astream + If those tests pass but not this one, you should make sure your ``_astream`` method does not raise any exceptions, and that it yields valid :class:`~langchain_core.outputs.chat_generation.ChatGenerationChunk` objects like so: @@ -811,7 +813,7 @@ class ChatModelIntegrationTests(ChatModelTests): assert num_chunks > 0 def test_batch(self, model: BaseChatModel) -> None: - """Test to verify that `model.batch([messages])` works. + """Test to verify that ``model.batch([messages])`` works. This should pass for all integrations. Tests the model's ability to process multiple prompts in a single batch. @@ -820,12 +822,13 @@ class ChatModelIntegrationTests(ChatModelTests): First, debug :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke` - because `batch` has a default implementation that calls `invoke` for each - message in the batch. + because ``batch`` has a default implementation that calls ``invoke`` for + each message in the batch. - If that test passes but not this one, you should make sure your `batch` + If that test passes but not this one, you should make sure your ``batch`` method does not raise any exceptions, and that it returns a list of valid :class:`~langchain_core.messages.AIMessage` objects. + """ batch_results = model.batch(["Hello", "Hey"]) assert batch_results is not None @@ -838,7 +841,7 @@ class ChatModelIntegrationTests(ChatModelTests): assert len(result.content) > 0 async def test_abatch(self, model: BaseChatModel) -> None: - """Test to verify that `await model.abatch([messages])` works. + """Test to verify that ``await model.abatch([messages])`` works. This should pass for all integrations. Tests the model's ability to process multiple prompts in a single batch asynchronously. @@ -849,12 +852,13 @@ class ChatModelIntegrationTests(ChatModelTests): :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_batch` and :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke` - because `abatch` has a default implementation that calls `ainvoke` for each - message in the batch. + because ``abatch`` has a default implementation that calls ``ainvoke`` for + each message in the batch. - If those tests pass but not this one, you should make sure your `abatch` + If those tests pass but not this one, you should make sure your ``abatch`` method does not raise any exceptions, and that it returns a list of valid :class:`~langchain_core.messages.AIMessage` objects. + """ batch_results = await model.abatch(["Hello", "Hey"]) assert batch_results is not None @@ -877,18 +881,20 @@ class ChatModelIntegrationTests(ChatModelTests): First, debug :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke` - because this test also uses `model.invoke()`. + because this test also uses ``model.invoke()``. If that test passes but not this one, you should verify that: 1. Your model correctly processes the message history 2. The model maintains appropriate context from previous messages 3. The response is a valid :class:`~langchain_core.messages.AIMessage` + """ messages = [ HumanMessage("hello"), AIMessage("hello"), HumanMessage("how are you"), ] + result = model.invoke(messages) assert result is not None assert isinstance(result, AIMessage) @@ -906,17 +912,17 @@ class ChatModelIntegrationTests(ChatModelTests): First, debug :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke` - because this test also uses `model.invoke()`. + because this test also uses ``model.invoke()``. Second, debug :meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_conversation` because this test is the "basic case" without double messages. If that test passes those but not this one, you should verify that: - 1. Your model API can handle double messages, or the integration should - merge messages before sending them to the API. + 1. Your model API can handle double messages, or the integration should merge messages before sending them to the API. 2. The response is a valid :class:`~langchain_core.messages.AIMessage` - """ + + """ # noqa: E501 messages = [ SystemMessage("hello"), SystemMessage("hello"), @@ -926,6 +932,7 @@ class ChatModelIntegrationTests(ChatModelTests): AIMessage("hello"), HumanMessage("how are you"), ] + result = model.invoke(messages) assert result is not None assert isinstance(result, AIMessage) @@ -940,13 +947,14 @@ class ChatModelIntegrationTests(ChatModelTests): .. versionchanged:: 0.3.17 - Additionally check for the presence of `model_name` in the response + Additionally check for the presence of ``model_name`` in the response metadata, which is needed for usage tracking in callback handlers. .. dropdown:: Configuration By default, this test is run. - To disable this feature, set `returns_usage_metadata` to False in your + + To disable this feature, set ``returns_usage_metadata`` to ``False`` in your test class: .. code-block:: python @@ -957,7 +965,7 @@ class ChatModelIntegrationTests(ChatModelTests): return False This test can also check the format of specific kinds of usage metadata - based on the `supported_usage_metadata_details` property. This property + based on the ``supported_usage_metadata_details`` property. This property should be configured as follows with the types of tokens that the model supports tracking: @@ -988,7 +996,7 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, first verify that your model returns :class:`~langchain_core.messages.ai.UsageMetadata` dicts - attached to the returned AIMessage object in `_generate`: + attached to the returned AIMessage object in ``_generate``: .. code-block:: python @@ -1020,9 +1028,11 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.returns_usage_metadata: pytest.skip("Not implemented.") + result = model.invoke("Hello") assert result is not None assert isinstance(result, AIMessage) + assert result.usage_metadata is not None assert isinstance(result.usage_metadata["input_tokens"], int) assert isinstance(result.usage_metadata["output_tokens"], int) @@ -1106,13 +1116,13 @@ class ChatModelIntegrationTests(ChatModelTests): .. versionchanged:: 0.3.17 - Additionally check for the presence of `model_name` in the response + Additionally check for the presence of ``model_name`` in the response metadata, which is needed for usage tracking in callback handlers. .. dropdown:: Configuration By default, this test is run. - To disable this feature, set `returns_usage_metadata` to False in your + To disable this feature, set ``returns_usage_metadata`` to ``False`` in your test class: .. code-block:: python @@ -1123,7 +1133,7 @@ class ChatModelIntegrationTests(ChatModelTests): return False This test can also check the format of specific kinds of usage metadata - based on the `supported_usage_metadata_details` property. This property + based on the ``supported_usage_metadata_details`` property. This property should be configured as follows with the types of tokens that the model supports tracking: @@ -1153,16 +1163,16 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, first verify that your model yields :class:`~langchain_core.messages.ai.UsageMetadata` dicts - attached to the returned AIMessage object in `_stream` + attached to the returned AIMessage object in ``_stream`` that sum up to the total usage metadata. - Note that `input_tokens` should only be included on one of the chunks - (typically the first or the last chunk), and the rest should have 0 or None - to avoid counting input tokens multiple times. + Note that ``input_tokens`` should only be included on one of the chunks + (typically the first or the last chunk), and the rest should have ``0`` or + ``None`` to avoid counting input tokens multiple times. - `output_tokens` typically count the number of tokens in each chunk, not the - sum. This test will pass as long as the sum of `output_tokens` across all - chunks is not 0. + ``output_tokens`` typically count the number of tokens in each chunk, not + the sum. This test will pass as long as the sum of ``output_tokens`` across + all chunks is not ``0``. .. code-block:: python @@ -1198,6 +1208,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.returns_usage_metadata: pytest.skip("Not implemented.") + full: Optional[AIMessageChunk] = None for chunk in model.stream("Write me 2 haikus. Only include the haikus."): assert isinstance(chunk, AIMessageChunk) @@ -1262,7 +1273,7 @@ class ChatModelIntegrationTests(ChatModelTests): """Test that model does not fail when invoked with the ``stop`` parameter, which is a standard parameter for stopping generation at a certain token. - More on standard parameters here: https://python.langchain.com/docs/concepts/chat_models/#standard-parameters + `More on standard parameters `__ This should pass for all integrations. @@ -1336,6 +1347,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") + tool_choice_value = None if not self.has_tool_choice else "any" # Emit warning if tool_choice_value property is overridden if inspect.getattr_static( @@ -1410,6 +1422,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") + tool_choice_value = None if not self.has_tool_choice else "any" model_with_tools = model.bind_tools( [magic_function], tool_choice=tool_choice_value @@ -1519,10 +1532,10 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, check that: - 1. The model can correctly handle message histories that include AIMessage objects with ``""`` content. - 2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format. - 3. The model can correctly handle ToolMessage objects with string content and arbitrary string values for ``tool_call_id``. - assert tool_call.get("type") == "tool_call" + 1. The model can correctly handle message histories that include ``AIMessage`` objects with ``""`` content. + 2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly handled and passed to the model in an appropriate format. + 3. The model can correctly handle ``ToolMessage`` objects with string content and arbitrary string values for ``tool_call_id``. + You can ``xfail`` the test if tool calling is implemented but this format is not supported. @@ -1535,6 +1548,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling.") + model_with_tools = model.bind_tools([my_adder_tool]) function_name = "my_adder_tool" function_args = {"a": "1", "b": "2"} @@ -1570,7 +1584,7 @@ class ChatModelIntegrationTests(ChatModelTests): """Test that message histories are compatible with list tool contents (e.g. Anthropic format). - These message histories will include AIMessage objects with "tool use" and + These message histories will include ``AIMessage`` objects with "tool use" and content blocks, e.g., .. code-block:: python @@ -1604,8 +1618,8 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, check that: - 1. The model can correctly handle message histories that include AIMessage objects with list content. - 2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format. + 1. The model can correctly handle message histories that include ``AIMessage`` objects with list content. + 2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly handled and passed to the model in an appropriate format. 3. The model can correctly handle ToolMessage objects with string content and arbitrary string values for ``tool_call_id``. You can ``xfail`` the test if tool calling is implemented but this format @@ -1620,6 +1634,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling.") + model_with_tools = model.bind_tools([my_adder_tool]) function_name = "my_adder_tool" function_args = {"a": 1, "b": 2} @@ -1692,7 +1707,7 @@ class ChatModelIntegrationTests(ChatModelTests): pytest.skip("Test requires tool choice.") @tool - def get_weather(location: str) -> str: # pylint: disable=unused-argument + def get_weather(location: str) -> str: """Get weather at a location.""" return "It's sunny." @@ -1750,6 +1765,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling.") + tool_choice_value = None if not self.has_tool_choice else "any" model_with_tools = model.bind_tools( [magic_function_no_args], tool_choice=tool_choice_value @@ -1767,7 +1783,7 @@ class ChatModelIntegrationTests(ChatModelTests): def test_tool_message_error_status( self, model: BaseChatModel, my_adder_tool: BaseTool ) -> None: - """Test that ToolMessage with ``status="error"`` can be handled. + """Test that ``ToolMessage`` with ``status="error"`` can be handled. These messages may take the form: @@ -1806,6 +1822,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") + model_with_tools = model.bind_tools([my_adder_tool]) messages = [ HumanMessage("What is 1 + 2"), @@ -1860,8 +1877,9 @@ class ChatModelIntegrationTests(ChatModelTests): .. dropdown:: Troubleshooting - This test uses a utility function in ``langchain_core`` to generate a - sequence of messages representing "few-shot" examples: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.tool_example_to_messages.html + This test uses `a utility function `__ + in ``langchain_core`` to generate a sequence of messages representing + "few-shot" examples. If this test fails, check that the model can correctly handle this sequence of messages. @@ -1878,6 +1896,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") + model_with_tools = model.bind_tools([my_adder_tool], tool_choice="any") function_result = json.dumps({"result": 3}) @@ -1921,10 +1940,12 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, ensure that the model's ``bind_tools`` method properly handles both JSON Schema and Pydantic V2 models. - ``langchain_core`` implements a utility function that will accommodate - most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html - See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + ``langchain_core`` implements `a utility function `__ + that will accommodate most formats. + + See `example implementation `__ + of ``with_structured_output``. """ if not self.has_structured_output: @@ -2000,10 +2021,12 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, ensure that the model's ``bind_tools`` method properly handles both JSON Schema and Pydantic V2 models. - ``langchain_core`` implements a utility function that will accommodate - most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html - See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + ``langchain_core`` implements `a utility function `__ + that will accommodate most formats. + + See `example implementation `__ + of ``with_structured_output``. """ if not self.has_structured_output: @@ -2052,10 +2075,9 @@ class ChatModelIntegrationTests(ChatModelTests): @pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Test requires pydantic 2.") def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None: - """Test to verify we can generate structured output using - pydantic.v1.BaseModel. + """Test to verify we can generate structured output using ``pydantic.v1.BaseModel``. - pydantic.v1.BaseModel is available in the pydantic 2 package. + ``pydantic.v1.BaseModel`` is available in the Pydantic 2 package. This test is optional and should be skipped if the model does not support structured output (see Configuration below). @@ -2079,12 +2101,14 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, ensure that the model's ``bind_tools`` method properly handles both JSON Schema and Pydantic V1 models. - ``langchain_core`` implements a utility function that will accommodate - most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html - See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + ``langchain_core`` implements `a utility function `__ + that will accommodate most formats. - """ + See `example implementation `__ + of ``with_structured_output``. + + """ # noqa: E501 if not self.has_structured_output: pytest.skip("Test requires structured output.") @@ -2141,10 +2165,12 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, ensure that the model's ``bind_tools`` method properly handles Pydantic V2 models with optional parameters. - ``langchain_core`` implements a utility function that will accommodate - most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html - See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + ``langchain_core`` implements `a utility function `__ + that will accommodate most formats. + + See `example implementation `__ + of ``with_structured_output``. """ if not self.has_structured_output: @@ -2225,7 +2251,7 @@ class ChatModelIntegrationTests(ChatModelTests): # Type ignoring since the interface only officially supports pydantic 1 # or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2. # We'll need to do a pass updating the type signatures. - chat = model.with_structured_output(Joke, method="json_mode") # type: ignore[arg-type] + chat = model.with_structured_output(Joke, method="json_mode") msg = ( "Tell me a joke about cats. Return the result as a JSON with 'setup' and " "'punchline' keys. Return nothing other than JSON." @@ -2288,6 +2314,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.supports_pdf_inputs: pytest.skip("Model does not support PDF inputs.") + url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf" pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8") @@ -2364,6 +2391,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.supports_audio_inputs: pytest.skip("Model does not support audio inputs.") + url = "https://upload.wikimedia.org/wikipedia/commons/3/3d/Alcal%C3%A1_de_Henares_%28RPS_13-04-2024%29_canto_de_ruise%C3%B1or_%28Luscinia_megarhynchos%29_en_el_Soto_del_Henares.wav" audio_data = base64.b64encode(httpx.get(url).content).decode("utf-8") @@ -2465,6 +2493,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.supports_image_inputs: pytest.skip("Model does not support image message.") + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") @@ -2572,6 +2601,7 @@ class ChatModelIntegrationTests(ChatModelTests): """ if not self.supports_image_tool_message: pytest.skip("Model does not support image tool message.") + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") @@ -2687,7 +2717,7 @@ class ChatModelIntegrationTests(ChatModelTests): 1. The model can correctly handle message histories that include message objects with list content. 2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format. - 3. HumanMessages with "tool_result" content blocks are correctly handled. + 3. ``HumanMessage``s with "tool_result" content blocks are correctly handled. Otherwise, if Anthropic tool call and result formats are not supported, set the ``supports_anthropic_inputs`` property to False. @@ -2793,7 +2823,7 @@ class ChatModelIntegrationTests(ChatModelTests): assert isinstance(response, AIMessage) def test_message_with_name(self, model: BaseChatModel) -> None: - """Test that HumanMessage with values for the ``name`` field can be handled. + """Test that ``HumanMessage`` with values for the ``name`` field can be handled. These messages may take the form: @@ -2842,7 +2872,7 @@ class ChatModelIntegrationTests(ChatModelTests): chat model. Check also that all required information (e.g., tool calling identifiers) - from AIMessage objects is propagated correctly to model payloads. + from ``AIMessage`` objects is propagated correctly to model payloads. This test may fail if the chat model does not consistently generate tool calls in response to an appropriate query. In these cases you can ``xfail`` @@ -2859,7 +2889,7 @@ class ChatModelIntegrationTests(ChatModelTests): pytest.skip("Test requires tool calling.") @tool - def get_weather(location: str) -> str: # pylint: disable=unused-argument + def get_weather(location: str) -> str: """Call to surf the web.""" return "It's sunny." @@ -2953,12 +2983,13 @@ class ChatModelIntegrationTests(ChatModelTests): Args: model: The chat model to test - tool_choice: Tool choice parameter to pass to bind_tools (provider-specific) - force_tool_call: Whether to force a tool call (use tool_choice=True if None) + tool_choice: Tool choice parameter to pass to ``bind_tools()`` (provider-specific) + force_tool_call: Whether to force a tool call (use ``tool_choice=True`` if None) Tests that Unicode characters in tool call arguments are preserved correctly, - not escaped as \\uXXXX sequences. - """ + not escaped as ``\\uXXXX`` sequences. + + """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling support.") diff --git a/libs/standard-tests/langchain_tests/integration_tests/chat_models_v1.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models_v1.py new file mode 100644 index 00000000000..693896a8746 --- /dev/null +++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models_v1.py @@ -0,0 +1,3015 @@ +"""Integration tests for v1 chat models. + +This module provides comprehensive integration tests for the new messages and standard +content block system introduced in ``langchain_core.v1.messages`` and +``langchain_core.messages.content_blocks``. +""" + +import base64 +import json +from typing import Annotated, Any, Literal, Optional, TypedDict, Union, cast +from unittest.mock import MagicMock + +import httpx +import langchain_core.messages.content_blocks as types +import pytest +from langchain_core.callbacks import BaseCallbackHandler +from langchain_core.language_models.fake_chat_models import GenericFakeChatModel +from langchain_core.messages.content_blocks import ( + AudioContentBlock, + Citation, + CodeInterpreterCall, + CodeInterpreterOutput, + CodeInterpreterResult, + FileContentBlock, + ImageContentBlock, + InvalidToolCall, + NonStandardContentBlock, + PlainTextContentBlock, + ReasoningContentBlock, + TextContentBlock, + ToolCall, + ToolCallChunk, + VideoContentBlock, + WebSearchCall, + WebSearchResult, + create_audio_block, + create_file_block, + create_image_block, + create_non_standard_block, + create_plaintext_block, + create_text_block, + create_tool_call, + is_reasoning_block, + is_text_block, + is_tool_call_block, +) +from langchain_core.output_parsers.string import StrOutputParser +from langchain_core.prompts.chat import ChatPromptTemplate +from langchain_core.tools import tool +from langchain_core.tools.base import BaseTool +from langchain_core.utils.function_calling import ( + convert_to_json_schema, + tool_example_to_messages, +) +from langchain_core.v1.chat_models import BaseChatModel +from langchain_core.v1.messages import ( + AIMessage, + AIMessageChunk, + HumanMessage, + SystemMessage, + ToolMessage, +) +from pydantic import BaseModel, Field +from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped] +from vcr.cassette import Cassette + +from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1Tests + +# Content block type definitions for testing +ContentBlock = Union[ + TextContentBlock, + ImageContentBlock, + VideoContentBlock, + AudioContentBlock, + PlainTextContentBlock, + FileContentBlock, + ReasoningContentBlock, + NonStandardContentBlock, + ToolCall, + InvalidToolCall, + ToolCallChunk, + WebSearchCall, + WebSearchResult, + Citation, + CodeInterpreterCall, + CodeInterpreterOutput, + CodeInterpreterResult, +] + + +def _get_joke_class( + schema_type: Literal["pydantic", "typeddict", "json_schema"], +) -> Any: + """:private:""" + + class Joke(BaseModel): + """Joke to tell user.""" + + setup: str = Field(description="question to set up a joke") + punchline: str = Field(description="answer to resolve the joke") + + def validate_joke(result: Any) -> bool: + return isinstance(result, Joke) + + class JokeDict(TypedDict): + """Joke to tell user.""" + + setup: Annotated[str, ..., "question to set up a joke"] + punchline: Annotated[str, ..., "answer to resolve the joke"] + + def validate_joke_dict(result: Any) -> bool: + return all(key in ["setup", "punchline"] for key in result) + + if schema_type == "pydantic": + return Joke, validate_joke + + if schema_type == "typeddict": + return JokeDict, validate_joke_dict + + if schema_type == "json_schema": + return Joke.model_json_schema(), validate_joke_dict + msg = "Invalid schema type" + raise ValueError(msg) + + +class _TestCallbackHandler(BaseCallbackHandler): + options: list[Optional[dict]] + + def __init__(self) -> None: + super().__init__() + self.options = [] + + def on_chat_model_start( + self, + serialized: Any, + messages: Any, + *, + options: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + self.options.append(options) + + +class _MagicFunctionSchema(BaseModel): + input: int = Field(..., gt=-1000, lt=1000) + + +@tool(args_schema=_MagicFunctionSchema) +def magic_function(_input: int) -> int: + """Applies a magic function to an input.""" + return _input + 2 + + +@tool +def magic_function_no_args() -> int: + """Calculates a magic function.""" + return 5 + + +def _validate_tool_call_message(message: AIMessage) -> None: + """Validate that a message contains tool calls in content blocks format.""" + + if isinstance(message.content, list): + tool_call_blocks = [ + block + for block in message.content + if isinstance(block, dict) and is_tool_call_block(block) + ] + assert len(tool_call_blocks) >= 1 + + for tool_call in tool_call_blocks: + # Ensure each tool call has the required fields + assert "name" in tool_call + assert "args" in tool_call + assert "id" in tool_call + # (No fallback, since the tools attribute makes the same search as the list + # comprehension above) + + +def _validate_tool_call_message_no_args(message: AIMessage) -> None: + """Validate that a message contains a single tool call with no arguments. + + Used for testing tool calls without arguments, such as + ``magic_function_no_args``. + """ + assert len(message.tool_calls) == 1 + tool_call = message.tool_calls[0] + assert tool_call["name"] == "magic_function_no_args" + assert tool_call["args"] == {} + assert tool_call["id"] is not None + + +@tool +def unicode_customer(customer_name: str, description: str) -> str: + """Tool for creating a customer with a name containing Unicode characters. + + Args: + customer_name: The customer's name in their native language. + description: Description of the customer. + + Returns: + A confirmation message about the customer creation. + """ + return f"Created customer: {customer_name} - {description}" + + +class ChatModelV1IntegrationTests(ChatModelV1Tests): + """Base class for v1 chat model integration tests. + + TODO: verify this entire docstring! + + Test subclasses must implement the ``chat_model_class`` and + ``chat_model_params`` properties to specify what model to test and its + initialization parameters. + + Example: + + .. code-block:: python + + from typing import Type + + from langchain_tests.integration_tests import ChatModelV1IntegrationTests + from my_package.chat_models import MyChatModel + + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def chat_model_class(self) -> Type[MyV1ChatModel]: + # Return the chat model class to test here + return MyChatModel + + @property + def chat_model_params(self) -> dict: + # Return initialization parameters for the v1 model. + return {"model": "model-001", "temperature": 0} + + .. note:: + API references for individual test methods include troubleshooting tips. + + + Test subclasses **must** implement the following two properties: + + chat_model_class + The chat model class to test, e.g., ``ChatParrotLinkV1``. + + Example: + + .. code-block:: python + + @property + def chat_model_class(self) -> Type[ChatParrotLinkV1]: + return ChatParrotLinkV1 + + chat_model_params + Initialization parameters for the chat model. + + Example: + + .. code-block:: python + + @property + def chat_model_params(self) -> dict: + return {"model": "bird-brain-001", "temperature": 0} + + In addition, test subclasses can control what features are tested (such as tool + calling or multi-modality) by selectively overriding the following properties. + Expand to see details: + + .. dropdown:: has_tool_calling + + TODO + + .. dropdown:: tool_choice_value + + TODO + + .. dropdown:: has_tool_choice + + TODO + + .. dropdown:: has_structured_output + + TODO + + .. dropdown:: structured_output_kwargs + + TODO + + .. dropdown:: supports_json_mode + + TODO + + .. dropdown:: returns_usage_metadata + + TODO + + .. dropdown:: supports_anthropic_inputs + + TODO + + .. dropdown:: supports_image_tool_message + + TODO + + .. dropdown:: supported_usage_metadata_details + + TODO + + .. dropdown:: enable_vcr_tests + + Property controlling whether to enable select tests that rely on + `VCR `_ caching of HTTP calls, such + as benchmarking tests. + + To enable these tests, follow these steps: + + 1. Override the ``enable_vcr_tests`` property to return ``True``: + + .. code-block:: python + + @property + def enable_vcr_tests(self) -> bool: + return True + + 2. Configure VCR to exclude sensitive headers and other information from cassettes. + + .. important:: + VCR will by default record authentication headers and other sensitive + information in cassettes. Read below for how to configure what + information is recorded in cassettes. + + To add configuration to VCR, add a ``conftest.py`` file to the ``tests/`` + directory and implement the ``vcr_config`` fixture there. + + ``langchain-tests`` excludes the headers ``'authorization'``, + ``'x-api-key'``, and ``'api-key'`` from VCR cassettes. To pick up this + configuration, you will need to add ``conftest.py`` as shown below. You can + also exclude additional headers, override the default exclusions, or apply + other customizations to the VCR configuration. See example below: + + .. code-block:: python + :caption: tests/conftest.py + + import pytest + from langchain_tests.conftest import _base_vcr_config as _base_vcr_config + + _EXTRA_HEADERS = [ + # Specify additional headers to redact + ("user-agent", "PLACEHOLDER"), + ] + + + def remove_response_headers(response: dict) -> dict: + # If desired, remove or modify headers in the response. + response["headers"] = {} + return response + + + @pytest.fixture(scope="session") + def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811 + \"\"\"Extend the default configuration from langchain_tests.\"\"\" + config = _base_vcr_config.copy() + config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS) + config["before_record_response"] = remove_response_headers + + return config + + .. dropdown:: Compressing cassettes + + ``langchain-tests`` includes a custom VCR serializer that compresses + cassettes using gzip. To use it, register the ``yaml.gz`` serializer + to your VCR fixture and enable this serializer in the config. See + example below: + + .. code-block:: python + :caption: tests/conftest.py + + import pytest + from langchain_tests.conftest import CustomPersister, CustomSerializer + from langchain_tests.conftest import _base_vcr_config as _base_vcr_config + from vcr import VCR + + _EXTRA_HEADERS = [ + # Specify additional headers to redact + ("user-agent", "PLACEHOLDER"), + ] + + + def remove_response_headers(response: dict) -> dict: + # If desired, remove or modify headers in the response. + response["headers"] = {} + return response + + + @pytest.fixture(scope="session") + def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811 + \"\"\"Extend the default configuration from langchain_tests.\"\"\" + config = _base_vcr_config.copy() + config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS) + config["before_record_response"] = remove_response_headers + # New: enable serializer and set file extension + config["serializer"] = "yaml.gz" + config["path_transformer"] = VCR.ensure_suffix(".yaml.gz") + + return config + + + def pytest_recording_configure(config: dict, vcr: VCR) -> None: + vcr.register_persister(CustomPersister()) + vcr.register_serializer("yaml.gz", CustomSerializer()) + + + You can inspect the contents of the compressed cassettes (e.g., to + ensure no sensitive information is recorded) using + + .. code-block:: bash + + gunzip -k /path/to/tests/cassettes/TestClass_test.yaml.gz + + or by using the serializer: + + .. code-block:: python + + from langchain_tests.conftest import CustomPersister, CustomSerializer + + cassette_path = "/path/to/tests/cassettes/TestClass_test.yaml.gz" + requests, responses = CustomPersister().load_cassette(path, CustomSerializer()) + + 3. Run tests to generate VCR cassettes. + + Example: + + .. code-block:: bash + + uv run python -m pytest tests/integration_tests/test_chat_models.py::TestMyModel::test_stream_time + + This will generate a VCR cassette for the test in + ``tests/integration_tests/cassettes/``. + + .. important:: + You should inspect the generated cassette to ensure that it does not + contain sensitive information. If it does, you can modify the + ``vcr_config`` fixture to exclude headers or modify the response + before it is recorded. + + You can then commit the cassette to your repository. Subsequent test runs + will use the cassette instead of making HTTP calls. + + """ # noqa: E501 + + @property + def standard_chat_model_params(self) -> dict: + """:private:""" + return {} + + def test_invoke(self, model: BaseChatModel) -> None: + """Test to verify that ``model.invoke(simple_message)`` works. + + A model should be able to produce a non-empty ``AIMessage`` in response to + ``"Hello"``. The message should at least contain a ``TextContentBlock`` with + text populated. + + .. important:: + This should pass for all integrations! + + .. dropdown:: Troubleshooting + + TODO + + """ + result = model.invoke("Hello") + assert result is not None + assert isinstance(result, AIMessage) + assert result.text + + async def test_ainvoke(self, model: BaseChatModel) -> None: + """Test to verify that ``await model.ainvoke(simple_message)`` works. + + A model should be able to produce a non-empty ``AIMessage`` in response to + ``"Hello"``. The message should at least contain a ``TextContentBlock`` with + text populated. + + .. important:: + This should pass for all integrations! + + Passing this test does not indicate a "natively async" implementation, but + rather that the model can be used in an async context. + + .. dropdown:: Troubleshooting + + First, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_invoke`. + because ``ainvoke`` has a default implementation that calls ``invoke`` in an + async context. + + """ + result = await model.ainvoke("Hello") + assert result is not None + assert isinstance(result, AIMessage) + assert result.text + + def test_stream(self, model: BaseChatModel) -> None: + """Test to verify that ``model.stream(simple_message)`` works. + + .. important:: + This should pass for all integrations! + + Passing this test does not indicate a "streaming" implementation, but rather + that the model can be used in a streaming context. For instance, a model + that yields at least one chunk in response to ``"Hello"``. + + .. dropdown:: Troubleshooting + + First, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_invoke`. + because ``stream`` has a default implementation that calls ``invoke`` and + yields the result as a single chunk. + + """ + num_chunks = 0 + for chunk in model.stream("Hello"): + assert chunk is not None + assert isinstance(chunk, AIMessageChunk) + assert isinstance(chunk.content, list) + num_chunks += 1 + assert num_chunks > 0 + + async def test_astream(self, model: BaseChatModel) -> None: + """Test to verify that ``await model.astream(simple_message)`` works. + + .. important:: + This should pass for all integrations! + + Passing this test does not indicate a "natively async" or "streaming" + implementation, but rather that the model can be used in an async streaming + context. + + .. dropdown:: Troubleshooting + + First, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_stream`. + and + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_ainvoke`. + because ``astream`` has a default implementation that calls ``_stream`` in + an async context if it is implemented, or ``ainvoke`` and yields the result + as a single ``AIMessageChunk`` chunk if not. + + """ + num_chunks = 0 + async for chunk in model.astream("Hello"): + assert chunk is not None + assert isinstance(chunk, AIMessageChunk) + assert isinstance(chunk.content, list) + num_chunks += 1 + assert num_chunks > 0 + + def test_batch(self, model: BaseChatModel) -> None: + """Test to verify that ``model.batch([messages])`` works. + + .. important:: + This should pass for all integrations! + + Tests the model's ability to process multiple prompts in a single batch. We + expect that the ``TextContentBlock`` of each response is populated with text. + + Passing this test does not indicate a "natively batching" or "batching" + implementation, but rather that the model can be used in a batching context. For + instance, your model may internally call ``invoke`` for each message in the + batch, even if the model provider does not support batching natively. + + .. dropdown:: Troubleshooting + + First, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_invoke` + because ``batch`` has a default implementation that calls ``invoke`` for + each message in the batch. + + If that test passes but not this one, you should make sure your ``batch`` + method does not raise any exceptions, and that it returns a list of valid + :class:`~langchain_core.v1.messages.AIMessage` objects. + + """ + batch_results = model.batch(["Hello", "Hey"]) + assert batch_results is not None + assert isinstance(batch_results, list) + assert len(batch_results) == 2 + for result in batch_results: + assert result is not None + assert isinstance(result, AIMessage) + assert result.text + + async def test_abatch(self, model: BaseChatModel) -> None: + """Test to verify that ``await model.abatch([messages])`` works. + + .. important:: + This should pass for all integrations! + + Tests the model's ability to process multiple prompts in a single batch + asynchronously. We expect that the ``TextContentBlock`` of each response is + populated with text. + + Passing this test does not indicate a "natively batching" or "batching" + implementation, but rather that the model can be used in a batching context. For + instance, your model may internally call ``ainvoke`` for each message in the + batch, even if the model provider does not support batching natively. + + .. dropdown:: Troubleshooting + + First, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_batch` + and + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_ainvoke` + because ``abatch`` has a default implementation that calls ``ainvoke`` for + each message in the batch. + + If those tests pass but not this one, you should make sure your ``abatch`` + method does not raise any exceptions, and that it returns a list of valid + :class:`~langchain_core.v1.messages.AIMessage` objects. + + """ + batch_results = await model.abatch(["Hello", "Hey"]) + assert batch_results is not None + assert isinstance(batch_results, list) + assert len(batch_results) == 2 + for result in batch_results: + assert result is not None + assert isinstance(result, AIMessage) + assert result.text + + def test_conversation(self, model: BaseChatModel) -> None: + """Test to verify that the model can handle multi-turn conversations. + + .. important:: + This should pass for all integrations! + + Tests the model's ability to process a sequence of alternating human and AI + messages as context for generating the next response. We expect that the + ``TextContentBlock`` of each response is populated with text. + + .. dropdown:: Troubleshooting + + First, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_invoke` + because this test also uses ``model.invoke()``. + + If that test passes but not this one, you should verify that: + 1. Your model correctly processes the message history + 2. The model maintains appropriate context from previous messages + 3. The response is a valid :class:`~langchain_core.v1.messages.AIMessage` + + """ + messages = [ + HumanMessage("hello"), + AIMessage("hello"), + HumanMessage("how are you"), + ] + result = model.invoke(messages) # type: ignore[arg-type] + assert result is not None + assert isinstance(result, AIMessage) + assert result.text + + def test_double_messages_conversation(self, model: BaseChatModel) -> None: + """Test to verify that the model can handle double-message conversations. + + .. important:: + This should pass for all integrations! + + Tests the model's ability to process a sequence of double-system, double-human, + and double-ai messages as context for generating the next response. We expect + that the ``TextContentBlock`` of each response is populated with text. + + .. dropdown:: Troubleshooting + + First, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_invoke` + because this test also uses ``model.invoke()``. + + Second, debug + :meth:`~langchain_tests.integration_tests.chat_models_v1.ChatModelV1IntegrationTests.test_conversation` + because this test is the "basic case" without double messages. + + If that test passes those but not this one, you should verify that: + 1. Your model API can handle double messages, or the integration should merge messages before sending them to the API. + 2. The response is a valid :class:`~langchain_core.v1.messages.AIMessage` + + """ # noqa: E501 + messages = [ + SystemMessage("hello"), + SystemMessage("hello"), + HumanMessage("hello"), + HumanMessage("hello"), + AIMessage("hello"), + AIMessage("hello"), + HumanMessage("how are you"), + ] + result = model.invoke(messages) # type: ignore[arg-type] + assert result is not None + assert isinstance(result, AIMessage) + assert result.text + + def test_usage_metadata(self, model: BaseChatModel) -> None: + """Test to verify that the model returns correct usage metadata. + + This test is optional and should be skipped if the model does not return + usage metadata (see Configuration below). + + .. versionchanged:: 0.3.17 + + Additionally check for the presence of ``model_name`` in the response + metadata, which is needed for usage tracking in callback handlers. + + .. dropdown:: Configuration + + By default, this test is run. + + To disable this feature, set the ``returns_usage_metadata`` property to + ``False`` in your test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def returns_usage_metadata(self) -> bool: + return False + + This test can also check the format of specific kinds of usage metadata + based on the ``supported_usage_metadata_details`` property. This property + should be configured as follows with the types of tokens that the model + supports tracking: + + TODO: check this! + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def supported_usage_metadata_details(self) -> dict: + return { + "invoke": [ + "audio_input", + "audio_output", + "reasoning_output", + "cache_read_input", + "cache_creation_input", + ], + "stream": [ + "audio_input", + "audio_output", + "reasoning_output", + "cache_read_input", + "cache_creation_input", + ], + } + + + .. dropdown:: Troubleshooting + + TODO + + """ + if not self.returns_usage_metadata: + pytest.skip("Not implemented.") + + result = model.invoke("Hello") + assert result is not None + assert isinstance(result, AIMessage) + + assert result.usage_metadata is not None + assert isinstance(result.usage_metadata["input_tokens"], int) + assert isinstance(result.usage_metadata["output_tokens"], int) + assert isinstance(result.usage_metadata["total_tokens"], int) + + # Check model_name is in response_metadata + # (Needed for langchain_core.callbacks.usage) + model_name = result.response_metadata.get("model_name") + assert isinstance(model_name, str) + assert model_name != "", "model_name is empty" + + # TODO: check these + # `input_tokens` is the total, possibly including other unclassified or + # system-level tokens. + if "audio_input" in self.supported_usage_metadata_details["invoke"]: + # Checks if the specific chat model integration being tested has declared + # that it supports reporting token counts specifically for `audio_input` + msg = self.invoke_with_audio_input() # To be implemented in test subclass + assert (usage_metadata := msg.usage_metadata) is not None + assert ( + input_token_details := usage_metadata.get("input_token_details") + ) is not None + assert isinstance(input_token_details.get("audio"), int) + # Asserts that total input tokens are at least the sum of the token counts + total_detailed_tokens = sum( + v for v in input_token_details.values() if isinstance(v, int) + ) + assert usage_metadata.get("input_tokens", 0) >= total_detailed_tokens + if "audio_output" in self.supported_usage_metadata_details["invoke"]: + msg = self.invoke_with_audio_output() + assert (usage_metadata := msg.usage_metadata) is not None + assert ( + output_token_details := usage_metadata.get("output_token_details") + ) is not None + assert isinstance(output_token_details.get("audio"), int) + # Asserts that total output tokens are at least the sum of the token counts + total_detailed_tokens = sum( + v for v in output_token_details.values() if isinstance(v, int) + ) + assert usage_metadata.get("output_tokens", 0) >= total_detailed_tokens + if "reasoning_output" in self.supported_usage_metadata_details["invoke"]: + msg = self.invoke_with_reasoning_output() + assert (usage_metadata := msg.usage_metadata) is not None + assert ( + output_token_details := usage_metadata.get("output_token_details") + ) is not None + assert isinstance(output_token_details.get("reasoning"), int) + # Asserts that total output tokens are at least the sum of the token counts + total_detailed_tokens = sum( + v for v in output_token_details.values() if isinstance(v, int) + ) + assert usage_metadata.get("output_tokens", 0) >= total_detailed_tokens + if "cache_read_input" in self.supported_usage_metadata_details["invoke"]: + msg = self.invoke_with_cache_read_input() + assert (usage_metadata := msg.usage_metadata) is not None + assert ( + input_token_details := usage_metadata.get("input_token_details") + ) is not None + assert isinstance(input_token_details.get("cache_read"), int) + # Asserts that total input tokens are at least the sum of the token counts + total_detailed_tokens = sum( + v for v in input_token_details.values() if isinstance(v, int) + ) + assert usage_metadata.get("input_tokens", 0) >= total_detailed_tokens + if "cache_creation_input" in self.supported_usage_metadata_details["invoke"]: + msg = self.invoke_with_cache_creation_input() + assert (usage_metadata := msg.usage_metadata) is not None + assert ( + input_token_details := usage_metadata.get("input_token_details") + ) is not None + assert isinstance(input_token_details.get("cache_creation"), int) + # Asserts that total input tokens are at least the sum of the token counts + total_detailed_tokens = sum( + v for v in input_token_details.values() if isinstance(v, int) + ) + assert usage_metadata.get("input_tokens", 0) >= total_detailed_tokens + + def test_usage_metadata_streaming(self, model: BaseChatModel) -> None: + """Test usage metadata in streaming mode. + + Test to verify that the model returns correct usage metadata in streaming mode. + + .. versionchanged:: 0.3.17 + + Additionally check for the presence of ``model_name`` in the response + metadata, which is needed for usage tracking in callback handlers. + + .. dropdown:: Configuration + + By default, this test is run. + To disable this feature, set ``returns_usage_metadata`` to ``False`` in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def returns_usage_metadata(self) -> bool: + return False + + This test can also check the format of specific kinds of usage metadata + based on the ``supported_usage_metadata_details`` property. This property + should be configured as follows with the types of tokens that the model + supports tracking: + + TODO: check this! + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def supported_usage_metadata_details(self) -> dict: + return { + "invoke": [ + "audio_input", + "audio_output", + "reasoning_output", + "cache_read_input", + "cache_creation_input", + ], + "stream": [ + "audio_input", + "audio_output", + "reasoning_output", + "cache_read_input", + "cache_creation_input", + ], + } + + .. dropdown:: Troubleshooting + + TODO + + """ + if not self.returns_usage_metadata: + pytest.skip("Not implemented.") + + full: Optional[AIMessageChunk] = None + for chunk in model.stream("Write me 2 haikus. Only include the haikus."): + assert isinstance(chunk, AIMessageChunk) + # Only one chunk is allowed to set usage_metadata.input_tokens + # if multiple do, it's likely a bug that will result in overcounting + # input tokens (since the total number of input tokens applies to the full + # generation, not individual chunks) + if full and full.usage_metadata and full.usage_metadata["input_tokens"]: + assert ( + not chunk.usage_metadata or not chunk.usage_metadata["input_tokens"] + ), ( + "Only one chunk should set input_tokens," + " the rest should be 0 or None" + ) + full = chunk if full is None else cast("AIMessageChunk", full + chunk) + + assert isinstance(full, AIMessageChunk) + assert full.usage_metadata is not None + assert isinstance(full.usage_metadata["input_tokens"], int) + assert isinstance(full.usage_metadata["output_tokens"], int) + assert isinstance(full.usage_metadata["total_tokens"], int) + + # Check model_name is in response_metadata + # (Needed for langchain_core.callbacks.usage) + model_name = full.response_metadata.get("model_name") + assert isinstance(model_name, str) + assert model_name != "", "model_name is empty" + + # TODO: check these + if "audio_input" in self.supported_usage_metadata_details["stream"]: + msg = self.invoke_with_audio_input(stream=True) + assert msg.usage_metadata is not None + assert isinstance( + msg.usage_metadata.get("input_token_details", {}).get("audio"), int + ) + if "audio_output" in self.supported_usage_metadata_details["stream"]: + msg = self.invoke_with_audio_output(stream=True) + assert msg.usage_metadata is not None + assert isinstance( + msg.usage_metadata.get("output_token_details", {}).get("audio"), int + ) + if "reasoning_output" in self.supported_usage_metadata_details["stream"]: + msg = self.invoke_with_reasoning_output(stream=True) + assert msg.usage_metadata is not None + assert isinstance( + msg.usage_metadata.get("output_token_details", {}).get("reasoning"), int + ) + if "cache_read_input" in self.supported_usage_metadata_details["stream"]: + msg = self.invoke_with_cache_read_input(stream=True) + assert msg.usage_metadata is not None + assert isinstance( + msg.usage_metadata.get("input_token_details", {}).get("cache_read"), int + ) + if "cache_creation_input" in self.supported_usage_metadata_details["stream"]: + msg = self.invoke_with_cache_creation_input(stream=True) + assert msg.usage_metadata is not None + assert isinstance( + msg.usage_metadata.get("input_token_details", {}).get("cache_creation"), + int, + ) + + def test_stop_sequence(self, model: BaseChatModel) -> None: + """Test that model does not fail when invoked with the ``stop`` parameter, + which is a standard parameter for stopping generation at a certain token. + + `More on standard parameters `__ + + .. important:: + This should pass for all integrations! + + .. dropdown:: Troubleshooting + + TODO + + """ + result = model.invoke("hi", stop=["you"]) + assert isinstance(result, AIMessage) + + custom_model = self.chat_model_class( + **{ + **self.chat_model_params, + "stop": ["you"], + } + ) + result = custom_model.invoke("hi") + assert isinstance(result, AIMessage) + + def test_tool_calling(self, model: BaseChatModel) -> None: + """Test that the model generates tool calls. This test is skipped if the + ``has_tool_calling`` property on the test class is set to False. + + This test is optional and should be skipped if the model does not support + tool calling (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that ``bind_tools`` is implemented to correctly + translate LangChain tool objects into the appropriate schema for your + chat model. + + This test may fail if the chat model does not support a ``tool_choice`` + parameter. This parameter can be used to force a tool call. If + ``tool_choice`` is not supported and the model consistently fails this + test, you can ``xfail`` the test: + + .. code-block:: python + + @pytest.mark.xfail(reason=("Does not support tool_choice.")) + def test_tool_calling(self, model: BaseChatModelV1) -> None: + super().test_tool_calling(model) + + Otherwise, in the case that only one tool is bound, ensure that + ``tool_choice`` supports the string ``'any'`` to force calling that tool. + + """ + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + + tool_choice_value = None if not self.has_tool_choice else "any" + + model_with_tools = model.bind_tools( + [magic_function], tool_choice=tool_choice_value + ) + query = "What is the value of magic_function(3)? Use the tool." + result = model_with_tools.invoke(query) + _validate_tool_call_message(result) + + # Test stream() + full: Optional[AIMessageChunk] = None + for chunk in model_with_tools.stream(query): + full = chunk if full is None else full + chunk # type: ignore[assignment] + assert isinstance(full, AIMessage) + _validate_tool_call_message(full) + + async def test_tool_calling_async(self, model: BaseChatModel) -> None: + """Test that the model generates tool calls. This test is skipped if the + ``has_tool_calling`` property on the test class is set to False. + + This test is optional and should be skipped if the model does not support + tool calling (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that ``bind_tools`` is implemented to correctly + translate LangChain tool objects into the appropriate schema for your + chat model. + + This test may fail if the chat model does not support a ``tool_choice`` + parameter. This parameter can be used to force a tool call. If + ``tool_choice`` is not supported and the model consistently fails this + test, you can ``xfail`` the test: + + .. code-block:: python + + @pytest.mark.xfail(reason=("Does not support tool_choice.")) + async def test_tool_calling_async(self, model: BaseChatModelV1) -> None: + await super().test_tool_calling_async(model) + + Otherwise, in the case that only one tool is bound, ensure that + ``tool_choice`` supports the string ``'any'`` to force calling that tool. + + """ + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + + tool_choice_value = None if not self.has_tool_choice else "any" + model_with_tools = model.bind_tools( + [magic_function], tool_choice=tool_choice_value + ) + query = "What is the value of magic_function(3)? Use the tool." + result = await model_with_tools.ainvoke(query) + _validate_tool_call_message(result) + + # Test astream() + full: Optional[AIMessageChunk] = None + async for chunk in model_with_tools.astream(query): + full = chunk if full is None else full + chunk # type: ignore[assignment] + assert isinstance(full, AIMessage) + _validate_tool_call_message(full) + + def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None: + """Test that the model generates tool calls for tools that are derived from + LangChain runnables. This test is skipped if the ``has_tool_calling`` property + on the test class is set to False. + + This test is optional and should be skipped if the model does not support + tool calling (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that ``bind_tools`` is implemented to correctly + translate LangChain tool objects into the appropriate schema for your + chat model. + + This test may fail if the chat model does not support a ``tool_choice`` + parameter. This parameter can be used to force a tool call. If + ``tool_choice`` is not supported and the model consistently fails this + test, you can ``xfail`` the test: + + .. code-block:: python + + @pytest.mark.xfail(reason=("Does not support tool_choice.")) + def test_bind_runnables_as_tools(self, model: BaseChatModelV1) -> None: + super().test_bind_runnables_as_tools(model) + + Otherwise, ensure that the ``tool_choice_value`` property is correctly + specified on the test class. + + """ + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + + prompt = ChatPromptTemplate.from_messages( + [("human", "Hello. Please respond in the style of {answer_style}.")] + ) + llm = GenericFakeChatModel(messages=iter(["hello matey"])) + chain = prompt | llm | StrOutputParser() + tool_ = chain.as_tool( + name="greeting_generator", + description="Generate a greeting in a particular style of speaking.", + ) + + if self.has_tool_choice: + tool_choice: Optional[str] = "any" + else: + tool_choice = None + + model_with_tools = model.bind_tools([tool_], tool_choice=tool_choice) + query = "Using the tool, generate a Pirate greeting." + result = model_with_tools.invoke(query) + assert isinstance(result, AIMessage) + assert result.tool_calls + tool_call = result.tool_calls[0] + assert tool_call["args"].get( + "answer_style" + ) # TODO: do we need to handle if args is str? # noqa: E501 + assert is_tool_call_block(tool_call) + + def test_tool_message_histories_list_content( + self, + model: BaseChatModel, + my_adder_tool: BaseTool, + ) -> None: + """Test that message histories are compatible with list tool contents + (e.g. Anthropic format). + + These message histories will include AIMessage objects with "tool use" and + content blocks, e.g., + + .. code-block:: python + + [ + {"type": "text", "text": "Hmm let me think about that"}, + { + "type": "tool_use", + "input": {"fav_color": "green"}, + "id": "foo", + "name": "color_picker", + }, + ] + + This test should be skipped if the model does not support tool calling + (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that: + + 1. The model can correctly handle message histories that include ``AIMessage`` objects with list content. + 2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly handled and passed to the model in an appropriate format. + 3. The model can correctly handle ``ToolMessage`` objects with string content and arbitrary string values for ``tool_call_id``. + + You can ``xfail`` the test if tool calling is implemented but this format + is not supported. + + .. code-block:: python + + @pytest.mark.xfail(reason=("Not implemented.")) + def test_tool_message_histories_list_content(self, *args: Any) -> None: + super().test_tool_message_histories_list_content(*args) + + """ # noqa: E501 + pytest.xfail("Test not implemented yet.") + + # TODO + # if not self.has_tool_calling: + # pytest.skip("Test requires tool calling.") + + # model_with_tools = model.bind_tools([my_adder_tool]) + # function_name = "my_adder_tool" + # function_args = {"a": 1, "b": 2} + + # messages_list_content = [ + # HumanMessage("What is 1 + 2"), + # # List content (e.g., Anthropic) + # AIMessage( + # [ + # {"type": "text", "text": "some text"}, + # { + # "type": "tool_use", + # "id": "abc123", + # "name": function_name, + # "input": function_args, + # }, + # ], + # tool_calls=[ + # { + # "name": function_name, + # "args": function_args, + # "id": "abc123", + # "type": "tool_call", + # }, + # ], + # ), + # ToolMessage( + # json.dumps({"result": 3}), + # name=function_name, + # tool_call_id="abc123", + # ), + # ] + # result_list_content = model_with_tools.invoke(messages_list_content) + # assert isinstance(result_list_content, AIMessage) + + def test_tool_choice(self, model: BaseChatModel) -> None: + """Test that the model can force tool calling via the ``tool_choice`` + parameter. This test is skipped if the ``has_tool_choice`` property on the + test class is set to False. + + This test is optional and should be skipped if the model does not support + tool calling (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_choice`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_choice(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check whether the ``test_tool_calling`` test is passing. + If it is not, refer to the troubleshooting steps in that test first. + + If ``test_tool_calling`` is passing, check that the underlying model + supports forced tool calling. If it does, ``bind_tools`` should accept a + ``tool_choice`` parameter that can be used to force a tool call. + + It should accept: + + 1. The string ``'any'`` to force calling the bound tool, and, + 2. The string name of the tool to force calling that tool. + + """ + if not self.has_tool_choice or not self.has_tool_calling: + pytest.skip("Test requires tool choice.") + + @tool + def get_weather(location: str) -> str: + """Get weather at a location.""" + return "It's sunny." + + for tool_choice in ["any", "magic_function"]: + model_with_tools = model.bind_tools( + [magic_function, get_weather], tool_choice=tool_choice + ) + result = model_with_tools.invoke("Hello!") + assert isinstance(result, AIMessage) + assert result.tool_calls + if tool_choice == "magic_function": + assert result.tool_calls[0]["name"] == "magic_function" + + def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None: + """Test that the model generates tool calls for tools with no arguments. + This test is skipped if the ``has_tool_calling`` property on the test class + is set to False. + + This test is optional and should be skipped if the model does not support + tool calling (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that ``bind_tools`` is implemented to correctly + translate LangChain tool objects into the appropriate schema for your + chat model. It should correctly handle the case where a tool has no + arguments. + + This test may fail if the chat model does not support a ``tool_choice`` + parameter. This parameter can be used to force a tool call. It may also + fail if a provider does not support this form of tool. In these cases, + you can ``xfail`` the test: + + .. code-block:: python + + @pytest.mark.xfail(reason=("Does not support tool_choice.")) + def test_tool_calling_with_no_arguments(self, model: BaseChatModelV1) -> None: + super().test_tool_calling_with_no_arguments(model) + + Otherwise, in the case that only one tool is bound, ensure that + ``tool_choice`` supports the string ``'any'`` to force calling that tool. + + """ # noqa: E501 + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + + tool_choice_value = None if not self.has_tool_choice else "any" + model_with_tools = model.bind_tools( + [magic_function_no_args], tool_choice=tool_choice_value + ) + query = "What is the value of magic_function_no_args()? You must use the tool." + + # Invoke + result = model_with_tools.invoke(query) + _validate_tool_call_message_no_args(result) + + # Stream + full: Optional[AIMessageChunk] = None + for chunk in model_with_tools.stream(query): + full = chunk if full is None else full + chunk # type: ignore[assignment] + assert isinstance(full, AIMessage) + _validate_tool_call_message_no_args(full) + + def test_tool_message_error_status( + self, model: BaseChatModel, my_adder_tool: BaseTool + ) -> None: + """Test that ``ToolMessage`` with ``status="error"`` can be handled. + + These messages may take the form: + + .. code-block:: python + + ToolMessage( + content="Error: Missing required argument 'b'.", + status="error", + ) + + If possible, the ``status`` field should be parsed and passed appropriately + to the model. + + This test is optional and should be skipped if the model does not support + tool calling (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that the ``status`` field on ``ToolMessage`` + objects is either ignored or passed to the model appropriately. + + """ + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + + model_with_tools = model.bind_tools([my_adder_tool]) + messages = [ + HumanMessage("What is 1 + 2?"), + create_tool_call( + "my_adder_tool", {"a": 1}, id="abc123" + ), # Missing required argument 'b' + ToolMessage( + "Error: Missing required argument 'b'.", + tool_call_id="abc123", + status="error", + ), + ] + result = model_with_tools.invoke(messages) # type: ignore[arg-type] + assert isinstance(result, AIMessage) + + def test_structured_few_shot_examples( + self, model: BaseChatModel, my_adder_tool: BaseTool + ) -> None: + """Test that the model can process few-shot examples with tool calls. + + These are represented as a sequence of messages of the following form: + + - ``HumanMessage`` with ``TextContentBlock`` content; + - ``AIMessage`` with the ``tool_calls`` attribute populated; + - ``ToolMessage`` with string content; + - ``ToolMessage`` with content block content; + - ``AIMessage`` with ``TextContentBlock`` content (an answer); + - ``HumanMessage`` with ``TextContentBlock`` content (a follow-up question). + + This test should be skipped if the model does not support tool calling + (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + This test uses `a utility function `__ + in ``langchain_core`` to generate a sequence of messages representing + "few-shot" examples. + + If this test fails, check that the model can correctly handle this + sequence of messages. + + You can ``xfail`` the test if tool calling is implemented but this format + is not supported. + + .. code-block:: python + + @pytest.mark.xfail(reason=("Not implemented.")) + def test_structured_few_shot_examples(self, *args: Any) -> None: + super().test_structured_few_shot_examples(*args) + + """ + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + + model_with_tools = model.bind_tools([my_adder_tool], tool_choice="any") + function_result = json.dumps({"result": 3}) + + tool_schema = my_adder_tool.args_schema + assert isinstance(tool_schema, type) + assert issubclass(tool_schema, BaseModel) + # TODO verify this is correct + few_shot_messages = tool_example_to_messages( + "What is 1 + 2", + [tool_schema(a=1, b=2)], + tool_outputs=[function_result], + ai_response=function_result, + ) + + messages = [*few_shot_messages, HumanMessage("What is 3 + 4")] + result = model_with_tools.invoke(messages) # type: ignore[arg-type] + assert isinstance(result, AIMessage) + + @pytest.mark.parametrize("schema_type", ["pydantic", "typeddict", "json_schema"]) + def test_structured_output(self, model: BaseChatModel, schema_type: str) -> None: + """Test to verify structured output is generated both on ``invoke()`` and ``stream()``. + + This test is optional and should be skipped if the model does not support + structured output (see Configuration below). + + .. dropdown:: Configuration + + To disable structured output tests, set ``has_structured_output`` to False + in your test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_structured_output(self) -> bool: + return False + + By default, ``has_structured_output`` is True if a model overrides the + ``with_structured_output`` or ``bind_tools`` methods. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that the model's ``bind_tools`` method + properly handles both JSON Schema and Pydantic V2 models. + + ``langchain_core`` implements `a utility function `__ + that will accommodate most formats. + + See `example implementation `__ + of ``with_structured_output``. + + """ # noqa: E501 + if not self.has_structured_output: + pytest.skip("Test requires structured output.") + + schema, validation_function = _get_joke_class(schema_type) # type: ignore[arg-type] + chat = model.with_structured_output(schema, **self.structured_output_kwargs) + mock_callback = MagicMock() + mock_callback.on_chat_model_start = MagicMock() + + invoke_callback = _TestCallbackHandler() + + result = chat.invoke( + "Tell me a joke about cats.", config={"callbacks": [invoke_callback]} + ) + validation_function(result) + + assert len(invoke_callback.options) == 1, ( + "Expected on_chat_model_start to be called once" + ) + assert isinstance(invoke_callback.options[0], dict) + assert isinstance( + invoke_callback.options[0]["ls_structured_output_format"]["schema"], dict + ) + assert invoke_callback.options[0]["ls_structured_output_format"][ + "schema" + ] == convert_to_json_schema(schema) + + stream_callback = _TestCallbackHandler() + + for chunk in chat.stream( + "Tell me a joke about cats.", config={"callbacks": [stream_callback]} + ): + validation_function(chunk) + assert chunk + + assert len(stream_callback.options) == 1, ( + "Expected on_chat_model_start to be called once" + ) + assert isinstance(stream_callback.options[0], dict) + assert isinstance( + stream_callback.options[0]["ls_structured_output_format"]["schema"], dict + ) + assert stream_callback.options[0]["ls_structured_output_format"][ + "schema" + ] == convert_to_json_schema(schema) + + @pytest.mark.parametrize("schema_type", ["pydantic", "typeddict", "json_schema"]) + async def test_structured_output_async( + self, model: BaseChatModel, schema_type: str + ) -> None: + """Test to verify structured output is generated both on ``invoke()`` and ``stream()``. + + This test is optional and should be skipped if the model does not support + structured output (see Configuration below). + + .. dropdown:: Configuration + + To disable structured output tests, set ``has_structured_output`` to False + in your test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_structured_output(self) -> bool: + return False + + By default, ``has_structured_output`` is True if a model overrides the + ``with_structured_output`` or ``bind_tools`` methods. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that the model's ``bind_tools`` method + properly handles both JSON Schema and Pydantic V2 models. + + ``langchain_core`` implements `a utility function `__ + that will accommodate most formats. + + See `example implementation `__ + of ``with_structured_output``. + + """ # noqa: E501 + if not self.has_structured_output: + pytest.skip("Test requires structured output.") + + schema, validation_function = _get_joke_class(schema_type) # type: ignore[arg-type] + + chat = model.with_structured_output(schema, **self.structured_output_kwargs) + ainvoke_callback = _TestCallbackHandler() + + result = await chat.ainvoke( + "Tell me a joke about cats.", config={"callbacks": [ainvoke_callback]} + ) + validation_function(result) + + assert len(ainvoke_callback.options) == 1, ( + "Expected on_chat_model_start to be called once" + ) + assert isinstance(ainvoke_callback.options[0], dict) + assert isinstance( + ainvoke_callback.options[0]["ls_structured_output_format"]["schema"], dict + ) + assert ainvoke_callback.options[0]["ls_structured_output_format"][ + "schema" + ] == convert_to_json_schema(schema) + + astream_callback = _TestCallbackHandler() + + async for chunk in chat.astream( + "Tell me a joke about cats.", config={"callbacks": [astream_callback]} + ): + validation_function(chunk) + assert chunk + + assert len(astream_callback.options) == 1, ( + "Expected on_chat_model_start to be called once" + ) + + assert isinstance(astream_callback.options[0], dict) + assert isinstance( + astream_callback.options[0]["ls_structured_output_format"]["schema"], dict + ) + assert astream_callback.options[0]["ls_structured_output_format"][ + "schema" + ] == convert_to_json_schema(schema) + + def test_structured_output_optional_param(self, model: BaseChatModel) -> None: + """Test to verify we can generate structured output that includes optional + parameters. + + This test is optional and should be skipped if the model does not support + structured output (see Configuration below). + + .. dropdown:: Configuration + + To disable structured output tests, set ``has_structured_output`` to False + in your test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_structured_output(self) -> bool: + return False + + By default, ``has_structured_output`` is True if a model overrides the + ``with_structured_output`` or ``bind_tools`` methods. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that the model's ``bind_tools`` method + properly handles Pydantic V2 models with optional parameters. + + ``langchain_core`` implements `a utility function `__ + that will accommodate most formats. + + See `example implementation `__ + of ``with_structured_output``. + + """ + if not self.has_structured_output: + pytest.skip("Test requires structured output.") + + # Pydantic + class Joke(BaseModel): + """Joke to tell user.""" + + setup: str = Field(description="question to set up a joke") + punchline: Optional[str] = Field( + default=None, description="answer to resolve the joke" + ) + + chat = model.with_structured_output(Joke, **self.structured_output_kwargs) + setup_result = chat.invoke( + "Give me the setup to a joke about cats, no punchline." + ) + assert isinstance(setup_result, Joke) + + joke_result = chat.invoke("Give me a joke about cats, include the punchline.") + assert isinstance(joke_result, Joke) + + # Schema + chat = model.with_structured_output( + Joke.model_json_schema(), **self.structured_output_kwargs + ) + result = chat.invoke("Tell me a joke about cats.") + assert isinstance(result, dict) + + # TypedDict + class JokeDict(TypedDict): + """Joke to tell user.""" + + setup: Annotated[str, ..., "question to set up a joke"] + punchline: Annotated[Optional[str], None, "answer to resolve the joke"] + + chat = model.with_structured_output(JokeDict, **self.structured_output_kwargs) + result = chat.invoke("Tell me a joke about cats.") + assert isinstance(result, dict) + + def test_json_mode(self, model: BaseChatModel) -> None: + """Test structured output via `JSON mode. `_. + + This test is optional and should be skipped if the model does not support + the JSON mode feature (see Configuration below). + + .. dropdown:: Configuration + + To disable this test, set ``supports_json_mode`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def supports_json_mode(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + See `example implementation `__ + of ``with_structured_output``. + + """ + if not self.supports_json_mode: + pytest.skip("Test requires json mode support.") + + from pydantic import BaseModel as BaseModelProper + from pydantic import Field as FieldProper + + class Joke(BaseModelProper): + """Joke to tell user.""" + + setup: str = FieldProper(description="question to set up a joke") + punchline: str = FieldProper(description="answer to resolve the joke") + + # Pydantic class + # Type ignoring since the interface only officially supports pydantic 1 + # or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2. + # We'll need to do a pass updating the type signatures. + chat = model.with_structured_output(Joke, method="json_mode") + msg = ( + "Tell me a joke about cats. Return the result as a JSON with 'setup' and " + "'punchline' keys. Return nothing other than JSON." + ) + result = chat.invoke(msg) + assert isinstance(result, Joke) + + for chunk in chat.stream(msg): + assert isinstance(chunk, Joke) + + # Schema + chat = model.with_structured_output( + Joke.model_json_schema(), method="json_mode" + ) + result = chat.invoke(msg) + assert isinstance(result, dict) + assert set(result.keys()) == {"setup", "punchline"} + + for chunk in chat.stream(msg): + assert isinstance(chunk, dict) + assert isinstance(chunk, dict) # for mypy + assert set(chunk.keys()) == {"setup", "punchline"} + + def test_pdf_inputs(self, model: BaseChatModel) -> None: + """Test that the model can process PDF inputs. + + This test should be skipped (see Configuration below) if the model does not + support PDF inputs. These will take the form: + + .. code-block:: python + + { + "type": "image", + "source_type": "base64", + "data": "", + "mime_type": "application/pdf", + } + + See https://python.langchain.com/docs/concepts/multimodality/ + + .. dropdown:: Configuration + + To disable this test, set ``supports_pdf_inputs`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + + @property + def supports_pdf_inputs(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that the model can correctly handle messages + with pdf content blocks, including base64-encoded files. Otherwise, set + the ``supports_pdf_inputs`` property to False. + + """ + pytest.xfail("Test not implemented yet.") + + # TODO + # if not self.supports_pdf_inputs: + # pytest.skip("Model does not support PDF inputs.") + # url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf" + # pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8") + + # message = HumanMessage( + # [ + # { + # "type": "text", + # "text": "Summarize this document:", + # }, + # { + # "type": "file", + # "source_type": "base64", + # "mime_type": "application/pdf", + # "data": pdf_data, + # }, + # ] + # ) + # _ = model.invoke([message]) + + # # Test OpenAI Chat Completions format + # message = HumanMessage( + # [ + # { + # "type": "text", + # "text": "Summarize this document:", + # }, + # { + # "type": "file", + # "file": { + # "filename": "test file.pdf", + # "file_data": f"data:application/pdf;base64,{pdf_data}", + # }, + # }, + # ] + # ) + # _ = model.invoke([message]) + + def test_audio_inputs(self, model: BaseChatModel) -> None: + """Test that the model can process audio inputs. + + This test should be skipped (see Configuration below) if the model does not + support audio inputs. These will take the form: + + .. code-block:: python + + # AudioContentBlock + { + "type": "audio", + "base64": "", + "mime_type": "audio/wav", # or appropriate mime-type + } + + See https://python.langchain.com/docs/concepts/multimodality/ + + .. dropdown:: Configuration + + To disable this test, set ``supports_audio_content_blocks`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + + @property + def supports_audio_content_blocks(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that the model can correctly handle messages + with audio content blocks. Otherwise, set the ``supports_audio_content_blocks`` + property to False. + + """ # noqa: E501 + if not self.supports_audio_content_blocks: + pytest.skip("Model does not support AudioContentBlock inputs.") + + url = "https://upload.wikimedia.org/wikipedia/commons/3/3d/Alcal%C3%A1_de_Henares_%28RPS_13-04-2024%29_canto_de_ruise%C3%B1or_%28Luscinia_megarhynchos%29_en_el_Soto_del_Henares.wav" + audio_data = base64.b64encode(httpx.get(url).content).decode("utf-8") + + message = HumanMessage( + [ + create_text_block("Describe this audio:"), + create_audio_block( + base64=audio_data, + mime_type="audio/wav", + ), + ] + ) + _ = model.invoke([message]) + + # TODO? + # Test OpenAI Chat Completions format + # message = HumanMessage( + # [ + # { + # "type": "text", + # "text": "Describe this audio:", + # }, + # { + # "type": "input_audio", + # "input_audio": {"data": audio_data, "format": "wav"}, + # }, + # ] + # ) + # _ = model.invoke([message]) + + def test_image_inputs(self, model: BaseChatModel) -> None: + """Test that the model can process image inputs. + + This test should be skipped (see Configuration below) if the model does not + support image inputs. These will take the form: + + .. code-block:: python + + # ImageContentBlock + { + "type": "image", + "base64": "", + "mime_type": "image/png", # or appropriate mime-type + } + + TODO: verify this + For backward-compatibility, we must also support OpenAI-style + image content blocks: + + .. code-block:: python + + [ + {"type": "text", "text": "describe the weather in this image"}, + { + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}, + }, + ] + + See https://python.langchain.com/docs/concepts/multimodality/ + + .. dropdown:: Configuration + + To disable this test, set ``supports_image_content_blocks`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def supports_image_content_blocks(self) -> bool: + return False + + # Can also explicitly disable testing image URLs: + @property + def supports_image_urls(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that the model can correctly handle messages + with image content blocks, including base64-encoded images. Otherwise, set + the ``supports_image_content_blocks`` property to False. + + """ + if not self.supports_image_content_blocks: + pytest.skip("Model does not support image message.") + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") + + # TODO? + # OpenAI format, base64 data + # message = HumanMessage( + # content=[ + # {"type": "text", "text": "describe the weather in this image"}, + # { + # "type": "image_url", + # "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}, + # }, + # ], + # ) + # _ = model.invoke([message]) + + # Standard format, base64 data + message = HumanMessage( + [ + create_text_block("describe the weather in this image"), + create_image_block( + base64=image_data, + mime_type="image/jpeg", + ), + ], + ) + _ = model.invoke([message]) + + # TODO? + # Standard format, URL + # if self.supports_image_urls: + # message = HumanMessage( + # content=[ + # {"type": "text", "text": "describe the weather in this image"}, + # { + # "type": "image", + # "source_type": "url", + # "url": image_url, + # }, + # ], + # ) + # _ = model.invoke([message]) + + def test_image_tool_message(self, model: BaseChatModel) -> None: + """Test that the model can process ToolMessages with image inputs. + + TODO: is this needed? + + This test should be skipped if the model does not support messages of the + form: + + .. code-block:: python + + ToolMessage( + content=[ + { + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}, + }, + ], + tool_call_id="1", + ) + + containing image content blocks in OpenAI Chat Completions format, in addition + to messages of the form: + + .. code-block:: python + + ToolMessage( + content=[ + { + "type": "image", + "source_type": "base64", + "data": image_data, + "mime_type": "image/jpeg", + }, + ], + tool_call_id="1", + ) + + containing image content blocks in standard format. + + This test can be skipped by setting the ``supports_image_tool_message`` property + to False (see Configuration below). + + .. dropdown:: Configuration + + To disable this test, set ``supports_image_tool_message`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def supports_image_tool_message(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that the model can correctly handle messages + with image content blocks in ToolMessages, including base64-encoded + images. Otherwise, set the ``supports_image_tool_message`` property to + False. + + """ + pytest.xfail("Test not implemented yet.") + + # TODO + # if not self.supports_image_tool_message: + # pytest.skip("Model does not support image tool message.") + # image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + # image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") + + # # Support both OpenAI and standard formats + # oai_format_message = ToolMessage( + # content=[ + # { + # "type": "image_url", + # "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}, + # }, + # ], + # tool_call_id="1", + # name="random_image", + # ) + + # standard_format_message = ToolMessage( + # content=[ + # { + # "type": "image", + # "source_type": "base64", + # "data": image_data, + # "mime_type": "image/jpeg", + # }, + # ], + # tool_call_id="1", + # name="random_image", + # ) + + # for tool_message in [oai_format_message, standard_format_message]: + # messages = [ + # HumanMessage( + # "get a random image using the tool and describe the weather" + # ), + # AIMessage( + # [], + # tool_calls=[ + # { + # "type": "tool_call", + # "id": "1", + # "name": "random_image", + # "args": {}, + # } + # ], + # ), + # tool_message, + # ] + + # def random_image() -> str: + # """Return a random image.""" + # return "" + + # _ = model.bind_tools([random_image]).invoke(messages) + + def test_anthropic_inputs(self, model: BaseChatModel) -> None: + """Test that model can process Anthropic-style message histories. + + TODO? + + These message histories will include ``AIMessage`` objects with ``tool_use`` + content blocks, e.g., + + .. code-block:: python + + AIMessage( + [ + {"type": "text", "text": "Hmm let me think about that"}, + { + "type": "tool_use", + "input": {"fav_color": "green"}, + "id": "foo", + "name": "color_picker", + }, + ] + ) + + as well as ``HumanMessage`` objects containing ``tool_result`` content blocks: + + .. code-block:: python + + HumanMessage( + [ + { + "type": "tool_result", + "tool_use_id": "foo", + "content": [ + { + "type": "text", + "text": "green is a great pick! that's my sister's favorite color", # noqa: E501 + } + ], + "is_error": False, + }, + {"type": "text", "text": "what's my sister's favorite color"}, + ] + ) + + This test should be skipped if the model does not support messages of this + form (or doesn't support tool calling generally). See Configuration below. + + .. dropdown:: Configuration + + To disable this test, set ``supports_anthropic_inputs`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def supports_anthropic_inputs(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that: + + 1. The model can correctly handle message histories that include message objects with list content. + 2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format. + 3. HumanMessages with "tool_result" content blocks are correctly handled. + + Otherwise, if Anthropic tool call and result formats are not supported, + set the ``supports_anthropic_inputs`` property to False. + + """ # noqa: E501 + pytest.xfail("Test not implemented yet.") + + # TODO + # if not self.supports_anthropic_inputs: + # pytest.skip("Model does not explicitly support Anthropic inputs.") + + # # Anthropic-format tool + # color_picker = { + # "name": "color_picker", + # "input_schema": { + # "type": "object", + # "properties": { + # "fav_color": {"type": "string"}, + # }, + # "required": ["fav_color"], + # }, + # "description": "Input your fav color and get a random fact about it.", + # "cache_control": {"type": "ephemeral"}, + # } + + # human_content: list[dict] = [ + # { + # "type": "text", + # "text": "what's your favorite color in this image", + # "cache_control": {"type": "ephemeral"}, + # }, + # ] + # if self.supports_image_inputs: + # image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + # image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") # noqa: E501 + # human_content.append( + # { + # "type": "image", + # "source": { + # "type": "base64", + # "media_type": "image/jpeg", + # "data": image_data, + # }, + # } + # ) + # messages = [ + # SystemMessage("you're a good assistant"), + # HumanMessage(human_content), # type: ignore[arg-type] + # AIMessage( + # [ + # {"type": "text", "text": "Hmm let me think about that"}, + # { + # "type": "tool_use", + # "input": {"fav_color": "green"}, + # "id": "foo", + # "name": "color_picker", + # }, + # ], + # tool_calls=[ + # { + # "name": "color_picker", + # "args": {"fav_color": "green"}, + # "id": "foo", + # "type": "tool_call", + # } + # ], + # ), + # ToolMessage("That's a great pick!", tool_call_id="foo"), + # ] + # response = model.bind_tools([color_picker]).invoke(messages) + # assert isinstance(response, AIMessage) + + # # Test thinking blocks + # messages = [ + # HumanMessage( + # [ + # { + # "type": "text", + # "text": "Hello", + # }, + # ] + # ), + # AIMessage( + # [ + # { + # "type": "thinking", + # "thinking": "I'm thinking...", + # "signature": "abc123", + # }, + # { + # "type": "text", + # "text": "Hello, how are you?", + # }, + # ] + # ), + # HumanMessage( + # [ + # { + # "type": "text", + # "text": "Well, thanks.", + # }, + # ] + # ), + # ] + # response = model.invoke(messages) + # assert isinstance(response, AIMessage) + + def test_message_with_name(self, model: BaseChatModel) -> None: + """Test that ``HumanMessage`` with values for the ``name`` field can be handled. + + This test expects that the model with a non-empty ``TextContentBlock``. + + These messages may take the form: + + .. code-block:: python + + HumanMessage("hello", name="example_user") + + If possible, the ``name`` field should be parsed and passed appropriately + to the model. Otherwise, it should be ignored. + + .. dropdown:: Troubleshooting + + If this test fails, check that the ``name`` field on ``HumanMessage`` + objects is either ignored or passed to the model appropriately. + + """ + result = model.invoke([HumanMessage("hello", name="example_user")]) + assert result is not None + assert isinstance(result, AIMessage) + assert len(result.content) > 0 + assert isinstance(result.text, str) + assert len(result.text) > 0 + + def test_agent_loop(self, model: BaseChatModel) -> None: + """Test that the model supports a simple ReAct agent loop. This test is skipped + if the ``has_tool_calling`` property on the test class is set to False. + + This test is optional and should be skipped if the model does not support + tool calling (see Configuration below). + + .. dropdown:: Configuration + + To disable tool calling tests, set ``has_tool_calling`` to False in your + test class: + + .. code-block:: python + + class TestMyV1ChatModelIntegration(ChatModelV1IntegrationTests): + @property + def has_tool_calling(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + If this test fails, check that ``bind_tools`` is implemented to correctly + translate LangChain tool objects into the appropriate schema for your + chat model. + + Check also that all required information (e.g., tool calling identifiers) + from ``AIMessage`` objects is propagated correctly to model payloads. + + This test may fail if the chat model does not consistently generate tool + calls in response to an appropriate query. In these cases you can ``xfail`` + the test: + + .. code-block:: python + + @pytest.mark.xfail(reason=("Does not support tool_choice.")) + def test_agent_loop(self, model: BaseChatModel) -> None: + super().test_agent_loop(model) + + """ + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + + @tool + def get_weather(location: str) -> str: + """Call to surf the web.""" + return "It's sunny." + + llm_with_tools = model.bind_tools([get_weather]) + input_message = HumanMessage("What is the weather in San Francisco, CA?") + tool_call_message = llm_with_tools.invoke([input_message]) + assert isinstance(tool_call_message, AIMessage) + tool_calls = tool_call_message.tool_calls + assert len(tool_calls) == 1 + tool_call = tool_calls[0] + tool_message = get_weather.invoke(tool_call) + assert isinstance(tool_message, ToolMessage) + response = llm_with_tools.invoke( + [ + input_message, + tool_call_message, + tool_message, + ] + ) + assert isinstance(response, AIMessage) + + @pytest.mark.benchmark + @pytest.mark.vcr + def test_stream_time( + self, model: BaseChatModel, benchmark: BenchmarkFixture, vcr: Cassette + ) -> None: + """Test that streaming does not introduce undue overhead. + + See ``enable_vcr_tests`` dropdown :class:`above ` + for more information. + + .. dropdown:: Configuration + + This test can be enabled or disabled using the ``enable_vcr_tests`` + property. For example, to disable the test, set this property to ``False``: + + .. code-block:: python + + @property + def enable_vcr_tests(self) -> bool: + return False + + .. important:: + + VCR will by default record authentication headers and other sensitive + information in cassettes. See ``enable_vcr_tests`` dropdown + :class:`above ` for how to configure what + information is recorded in cassettes. + + """ + if not self.enable_vcr_tests: + pytest.skip("VCR not set up.") + + def _run() -> None: + for _ in model.stream("Write a story about a cat."): + pass + + if not vcr.responses: + _run() + else: + benchmark(_run) + + def invoke_with_audio_input(self, *, stream: bool = False) -> AIMessage: + """:private:""" + # To be implemented in test subclass + raise NotImplementedError + + def invoke_with_audio_output(self, *, stream: bool = False) -> AIMessage: + """:private:""" + # To be implemented in test subclass + raise NotImplementedError + + def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage: + """:private:""" + # To be implemented in test subclass + raise NotImplementedError + + def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage: + """:private:""" + # To be implemented in test subclass + raise NotImplementedError + + def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage: + """:private:""" + # To be implemented in test subclass + raise NotImplementedError + + def test_unicode_tool_call_integration( + self, + model: BaseChatModel, + *, + tool_choice: Optional[str] = None, + force_tool_call: bool = True, + ) -> None: + """Generic integration test for Unicode characters in tool calls. + + Args: + model: The chat model to test + tool_choice: Tool choice parameter to pass to ``bind_tools()`` (provider-specific) + force_tool_call: Whether to force a tool call (use ``tool_choice=True`` if None) + + Tests that Unicode characters in tool call arguments are preserved correctly, + not escaped as ``\\uXXXX`` sequences. + """ # noqa: E501 + if not self.has_tool_calling: + pytest.skip("Test requires tool calling support.") + + # Configure tool choice based on provider capabilities + if tool_choice is None and force_tool_call: + tool_choice = "any" + + if tool_choice is not None: + llm_with_tool = model.bind_tools( + [unicode_customer], tool_choice=tool_choice + ) + else: + llm_with_tool = model.bind_tools([unicode_customer]) + + # Test with Chinese characters + msgs = [ + HumanMessage( + "Create a customer named '你好啊集团' (Hello Group) - a Chinese " + "technology company" + ) + ] + ai_msg = llm_with_tool.invoke(msgs) + + assert isinstance(ai_msg, AIMessage) + assert isinstance(ai_msg.tool_calls, list) + + if force_tool_call: + assert len(ai_msg.tool_calls) >= 1, ( + f"Expected at least 1 tool call, got {len(ai_msg.tool_calls)}" + ) + + if ai_msg.tool_calls: + tool_call = ai_msg.tool_calls[0] + assert tool_call["name"] == "unicode_customer" + assert "args" in tool_call + + # Verify Unicode characters are properly handled + args = tool_call["args"] + assert "customer_name" in args + customer_name = args["customer_name"] + + # The model should include the Unicode characters, not escaped sequences + assert ( + "你好" in customer_name + or "你" in customer_name + or "好" in customer_name + ), f"Unicode characters not found in: {customer_name}" + + # Test with additional Unicode examples - Japanese + msgs_jp = [ + HumanMessage( + "Create a customer named 'こんにちは株式会社' (Hello Corporation) - a " + "Japanese company" + ) + ] + ai_msg_jp = llm_with_tool.invoke(msgs_jp) + + assert isinstance(ai_msg_jp, AIMessage) + + if force_tool_call: + assert len(ai_msg_jp.tool_calls) >= 1 + + if ai_msg_jp.tool_calls: + tool_call_jp = ai_msg_jp.tool_calls[0] + args_jp = tool_call_jp["args"] + customer_name_jp = args_jp["customer_name"] + + # Verify Japanese Unicode characters are preserved + assert ( + "こんにちは" in customer_name_jp + or "株式会社" in customer_name_jp + or "こ" in customer_name_jp + or "ん" in customer_name_jp + ), f"Japanese Unicode characters not found in: {customer_name_jp}" + + # TODO + # def test_multimodal_reasoning(self, model: BaseChatModel) -> None: + # """Test complex reasoning with multiple content types. + + # TODO: expand docstring + + # """ + # if not self.supports_multimodal_reasoning: + # pytest.skip("Model does not support multimodal reasoning.") + + # content_blocks: list[types.ContentBlock] = [ + # create_text_block( + # "Compare these media files and provide reasoning analysis:" + # ), + # create_image_block( + # base64=_get_test_image_base64(), + # mime_type="image/png", + # ), + # ] + + # if self.supports_audio_content_blocks: + # content_blocks.append( + # create_audio_block( + # base64=_get_test_audio_base64(), + # mime_type="audio/wav", + # ) + # ) + + # message = HumanMessage(content=cast("list[types.ContentBlock]", content_blocks)) # noqa: E501 + # result = model.invoke([message]) + + # assert isinstance(result, AIMessage) + + # if self.supports_reasoning_content_blocks: + # reasoning_blocks = [ + # block + # for block in result.content + # if isinstance(block, dict) and is_reasoning_block(block) + # ] + # assert len(reasoning_blocks) > 0 + + def test_citation_generation_with_sources(self, model: BaseChatModel) -> None: + """Test that the model can generate ``Citations`` with source links. + + TODO: expand docstring + + """ + if not self.supports_structured_citations: + pytest.skip("Model does not support structured citations.") + + message = HumanMessage( + "Provide factual information about the distance to the moon with proper " + "citations to scientific sources." + ) + result = model.invoke([message]) + + assert isinstance(result, AIMessage) + + # Check for text blocks with citations + text_blocks_with_citations = [] + for block in result.content: + if ( + isinstance(block, dict) + and is_text_block(block) + and "annotations" in block + ): + annotations = cast("list[dict[str, Any]]", block.get("annotations", [])) + citations = [ + ann + for ann in annotations + if isinstance(ann, dict) and ann.get("type") == "citation" + ] + if citations: + text_blocks_with_citations.append(block) + assert len(text_blocks_with_citations) > 0 + + # Validate citation structure + for block in text_blocks_with_citations: + annotations = cast("list[dict[str, Any]]", block.get("annotations", [])) + for annotation in annotations: + if annotation.get("type") == "citation": + # TODO: evaluate these since none are *technically* required + # This may be a test that needs adjustment on per-integration basis + assert "cited_text" in annotation + assert "start_index" in annotation + assert "end_index" in annotation + + def test_web_search_integration(self, model: BaseChatModel) -> None: + """Test web search content blocks integration. + + TODO: expand docstring + + """ + if not self.supports_web_search_blocks: + pytest.skip("Model does not support web search blocks.") + + message = HumanMessage( + "Search for the latest developments in quantum computing." + ) + result = model.invoke([message]) + + assert isinstance(result, AIMessage) + + # Check for web search blocks + search_call_blocks = [ + block + for block in result.content + if isinstance(block, dict) and block.get("type") == "web_search_call" + ] + search_result_blocks = [ + block + for block in result.content + if isinstance(block, dict) and block.get("type") == "web_search_result" + ] + # TODO: should this be one or the other or both? + assert len(search_call_blocks) > 0 or len(search_result_blocks) > 0 + + def test_code_interpreter_blocks(self, model: BaseChatModel) -> None: + """Test code interpreter content blocks. + + TODO: expand docstring + + """ + if not self.supports_code_interpreter: + pytest.skip("Model does not support code interpreter blocks.") + + message = HumanMessage("Calculate the factorial of 10 using Python code.") + result = model.invoke([message]) + + assert isinstance(result, AIMessage) + + # Check for code interpreter blocks + code_blocks = [ + block + for block in result.content + if isinstance(block, dict) + and block.get("type") + in [ + "code_interpreter_call", + "code_interpreter_output", + "code_interpreter_result", + ] + ] + # TODO: should we require all three types or just an output/result? + assert len(code_blocks) > 0 + + def test_tool_calling_with_content_blocks(self, model: BaseChatModel) -> None: + """Test tool calling with content blocks. + + TODO: expand docstring + + """ + if not self.has_tool_calling: + pytest.skip("Model does not support tool calls.") + + @tool + def calculate_area(length: float, width: float) -> str: + """Calculate the area of a rectangle.""" + area = length * width + return f"The area is {area} square units." + + model_with_tools = model.bind_tools([calculate_area]) + message = HumanMessage( + "Calculate the area of a rectangle with length 5 and width 3." + ) + + result = model_with_tools.invoke([message]) + _validate_tool_call_message(result) + + def test_plaintext_content_blocks_from_documents( + self, model: BaseChatModel + ) -> None: + """Test PlainTextContentBlock for document plaintext content. + + TODO: expand docstring + + """ + if not self.supports_plaintext_content_blocks: + pytest.skip("Model does not support PlainTextContentBlock.") + + # Test with PlainTextContentBlock (plaintext from document) + plaintext_block = create_plaintext_block( + text="This is plaintext content extracted from a document.", + file_id="doc_123", + ) + + message = HumanMessage( + content=cast("list[types.ContentBlock]", [plaintext_block]) + ) + result = model.invoke([message]) + + assert isinstance(result, AIMessage) + # TODO expand + + def test_content_block_streaming_integration(self, model: BaseChatModel) -> None: + """Test streaming with content blocks. + + TODO: expand docstring + + """ + if not self.supports_content_blocks_v1: + pytest.skip("Model does not support content blocks v1.") + + message = HumanMessage( + content=[ + { + "type": "text", + "text": "Write a detailed explanation of machine learning.", + } + ] + ) + + chunks = [] + for chunk in model.stream([message]): + chunks.append(chunk) + assert isinstance(chunk, (AIMessage, AIMessageChunk)) + + assert len(chunks) > 1 # Should receive multiple chunks + + # Aggregate chunks + final_message = chunks[0] + for chunk in chunks[1:]: + final_message = final_message + chunk + + assert isinstance(final_message.content, list) + + def test_error_handling_with_invalid_content_blocks( + self, model: BaseChatModel + ) -> None: + """Test error handling with various invalid content block configurations. + + TODO: expand docstring + + """ + if not self.supports_content_blocks_v1: + pytest.skip("Model does not support content blocks v1.") + + test_cases = [ + {"type": "text"}, # Missing text field + {"type": "image"}, # Missing url/mime_type + {"type": "tool_call", "name": "test"}, # Missing args/id + ] + + for invalid_block in test_cases: + message = HumanMessage([invalid_block]) # type: ignore[list-item] + + # Should either handle gracefully or raise appropriate error + try: + result = model.invoke([message]) + assert isinstance(result, AIMessage) + except (ValueError, TypeError, KeyError) as e: + # Acceptable to raise validation errors + assert len(str(e)) > 0 + + async def test_async_content_blocks_processing(self, model: BaseChatModel) -> None: + """Test asynchronous processing of content blocks. + + TODO: expand docstring + + """ + if not self.supports_content_blocks_v1: + pytest.skip("Model does not support content blocks v1.") + + message = HumanMessage("Generate a creative story about space exploration.") + + result = await model.ainvoke([message]) + assert isinstance(result, AIMessage) + + def test_input_conversion_string(self, model: BaseChatModel) -> None: + """Test that string input is properly converted to messages. + + TODO: expand docstring + + """ + result = model.invoke("Test string input") + assert isinstance(result, AIMessage) + assert result.content is not None + + def test_input_conversion_empty_string(self, model: BaseChatModel) -> None: + """Test that empty string input is handled gracefully. + + TODO: expand docstring + + """ + result = model.invoke("") + assert isinstance(result, AIMessage) + + def test_input_conversion_message_v1_list(self, model: BaseChatModel) -> None: + """Test that v1 message list input is handled correctly. + + TODO: expand docstring + + """ + messages = [HumanMessage("Test message")] + result = model.invoke(messages) + assert isinstance(result, AIMessage) + assert result.content is not None + + def test_text_content_blocks_basic(self, model: BaseChatModel) -> None: + """Test that the model can handle the ``TextContentBlock`` format.""" + if not self.supports_text_content_blocks: + pytest.skip("Model does not support TextContentBlock (rare!)") + + text_block = create_text_block("Hello, world!") + message = HumanMessage(content=[text_block]) + + result = model.invoke([message]) + assert isinstance(result, AIMessage) + assert result.content is not None + + def test_mixed_content_blocks_basic(self, model: BaseChatModel) -> None: + """Test that the model can handle messages with mixed content blocks.""" + if not ( + self.supports_text_content_blocks and self.supports_image_content_blocks + ): + pytest.skip( + "Model doesn't support mixed content blocks (concurrent text and image)" + ) + + content_blocks: list[types.ContentBlock] = [ + create_text_block("Describe this image:"), + create_image_block( + base64="iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==", + mime_type="image/png", + ), + ] + + message = HumanMessage(content=content_blocks) + result = model.invoke([message]) + + assert isinstance(result, AIMessage) + assert result.content is not None + + def test_reasoning_content_blocks_basic(self, model: BaseChatModel) -> None: + """Test that the model can generate ``ReasoningContentBlock``. + + If your integration requires a reasoning parameter to be explicitly set, you + will need to override this test to set it appropriately. + + """ + if not self.supports_reasoning_content_blocks: + pytest.skip("Model does not support ReasoningContentBlock.") + + message = HumanMessage("Think step by step: What is 2 + 2?") + result = model.invoke([message]) + + assert isinstance(result, AIMessage) + if isinstance(result.content, list): + reasoning_blocks = [ + block + for block in result.content + if isinstance(block, dict) and is_reasoning_block(block) + ] + assert len(reasoning_blocks) > 0, ( + "Expected reasoning content blocks but found none. " + f"Content blocks: {[block.get('type') for block in result.content]}" + ) + + def test_non_standard_content_blocks_basic(self, model: BaseChatModel) -> None: + """Test that the model can handle ``NonStandardContentBlock``.""" + if not self.supports_non_standard_blocks: + pytest.skip("Model does not support NonStandardContentBlock.") + + non_standard_block = create_non_standard_block( + { + "custom_field": "custom_value", + "data": [1, 2, 3], + } + ) + + message = HumanMessage(content=[non_standard_block]) + + # Should not raise an error + result = model.invoke([message]) + assert isinstance(result, AIMessage) + + def test_invalid_tool_call_handling_basic(self, model: BaseChatModel) -> None: + """Test that the model can handle ``InvalidToolCall`` blocks gracefully.""" + if not self.supports_invalid_tool_calls: + pytest.skip("Model does not support InvalidToolCall handling.") + + invalid_tool_call: InvalidToolCall = { + "type": "invalid_tool_call", + "name": "nonexistent_tool", + "args": None, + "id": "invalid_123", + "error": "Tool not found", + } + + # Create a message with invalid tool call in history + ai_message = AIMessage(content=[invalid_tool_call]) + follow_up = HumanMessage("Please try again with a valid approach.") + + result = model.invoke([ai_message, follow_up]) + assert isinstance(result, AIMessage) + assert result.content is not None + + def test_file_content_blocks_basic(self, model: BaseChatModel) -> None: + """Test that the model can handle ``FileContentBlock``.""" + if not self.supports_file_content_blocks: + pytest.skip("Model does not support FileContentBlock.") + + file_block = create_file_block( + base64="SGVsbG8sIHdvcmxkIQ==", # "Hello, world!" + mime_type="text/plain", + ) + + message = HumanMessage(content=[file_block]) + result = model.invoke([message]) + + assert isinstance(result, AIMessage) + assert result.content is not None diff --git a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py index 320d2b491f1..a42201f2bce 100644 --- a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py @@ -12,17 +12,11 @@ from langchain_core.load import dumpd, load from langchain_core.runnables import RunnableBinding from langchain_core.tools import BaseTool, tool from pydantic import BaseModel, Field, SecretStr -from pydantic.v1 import ( - BaseModel as BaseModelV1, -) -from pydantic.v1 import ( - Field as FieldV1, -) -from pydantic.v1 import ( - ValidationError as ValidationErrorV1, -) +from pydantic.v1 import BaseModel as BaseModelV1 +from pydantic.v1 import Field as FieldV1 +from pydantic.v1 import ValidationError as ValidationErrorV1 from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped] -from syrupy import SnapshotAssertion +from syrupy.assertion import SnapshotAssertion from langchain_tests.base import BaseStandardTests from langchain_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION @@ -32,6 +26,7 @@ def generate_schema_pydantic_v1_from_2() -> Any: """Use to generate a schema from v1 namespace in pydantic 2. :private: + """ if PYDANTIC_MAJOR_VERSION != 2: msg = "This function is only compatible with Pydantic v2." @@ -50,6 +45,7 @@ def generate_schema_pydantic() -> Any: """Works with either pydantic 1 or 2. :private: + """ class PersonA(BaseModel): @@ -71,6 +67,7 @@ class ChatModelTests(BaseStandardTests): """Base class for chat model tests. :private: + """ @property @@ -154,16 +151,12 @@ class ChatModelTests(BaseStandardTests): @property def supports_image_inputs(self) -> bool: - """(bool) whether the chat model supports image inputs, defaults to - ``False``. - """ + """(bool) whether the chat model supports image inputs, defaults to ``False``.""" # noqa: E501 return False @property def supports_image_urls(self) -> bool: - """(bool) whether the chat model supports image inputs from URLs, defaults to - ``False``. - """ + """(bool) whether the chat model supports image inputs from URLs, defaults to ``False``.""" # noqa: E501 return False @property @@ -173,23 +166,21 @@ class ChatModelTests(BaseStandardTests): @property def supports_audio_inputs(self) -> bool: - """(bool) whether the chat model supports audio inputs, defaults to - ``False``. - """ + """(bool) whether the chat model supports audio inputs, defaults to ``False``.""" # noqa: E501 return False @property def supports_video_inputs(self) -> bool: """(bool) whether the chat model supports video inputs, defaults to ``False``. + No current tests are written for this feature. + """ return False @property def returns_usage_metadata(self) -> bool: - """(bool) whether the chat model returns usage metadata on invoke and streaming - responses. - """ + """(bool) whether the chat model returns usage metadata on invoke and streaming responses.""" # noqa: E501 return True @property @@ -199,9 +190,7 @@ class ChatModelTests(BaseStandardTests): @property def supports_image_tool_message(self) -> bool: - """(bool) whether the chat model supports ToolMessages that include image - content. - """ + """(bool) whether the chat model supports ``ToolMessage``s that include image content.""" # noqa: E501 return False @property @@ -211,6 +200,7 @@ class ChatModelTests(BaseStandardTests): .. important:: See ``enable_vcr_tests`` dropdown :class:`above ` for more information. + """ return False @@ -267,7 +257,7 @@ class ChatModelUnitTests(ChatModelTests): API references for individual test methods include troubleshooting tips. - Test subclasses must implement the following two properties: + Test subclasses **must** implement the following two properties: chat_model_class The chat model class to test, e.g., ``ChatParrotLink``. @@ -299,7 +289,7 @@ class ChatModelUnitTests(ChatModelTests): Boolean property indicating whether the chat model supports tool calling. - By default, this is determined by whether the chat model's `bind_tools` method + By default, this is determined by whether the chat model's ``bind_tools`` method is overridden. It typically does not need to be overridden on the test class. Example override: @@ -401,7 +391,7 @@ class ChatModelUnitTests(ChatModelTests): Defaults to ``False``. If set to ``True``, the chat model will be tested using content blocks of the - form + form. .. code-block:: python @@ -437,7 +427,7 @@ class ChatModelUnitTests(ChatModelTests): URLs. Defaults to ``False``. If set to ``True``, the chat model will be tested using content blocks of the - form + form. .. code-block:: python @@ -463,7 +453,7 @@ class ChatModelUnitTests(ChatModelTests): Defaults to ``False``. If set to ``True``, the chat model will be tested using content blocks of the - form + form. .. code-block:: python @@ -490,7 +480,7 @@ class ChatModelUnitTests(ChatModelTests): Defaults to ``False``. If set to ``True``, the chat model will be tested using content blocks of the - form + form. .. code-block:: python @@ -519,10 +509,10 @@ class ChatModelUnitTests(ChatModelTests): .. dropdown:: returns_usage_metadata Boolean property indicating whether the chat model returns usage metadata - on invoke and streaming responses. + on invoke and streaming responses. Defaults to ``True``. - ``usage_metadata`` is an optional dict attribute on AIMessages that track input - and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html + ``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input + and output tokens. `See more. `__ Example: @@ -533,7 +523,7 @@ class ChatModelUnitTests(ChatModelTests): return False Models supporting ``usage_metadata`` should also return the name of the - underlying model in the ``response_metadata`` of the AIMessage. + underlying model in the ``response_metadata`` of the ``AIMessage``. .. dropdown:: supports_anthropic_inputs @@ -567,7 +557,7 @@ class ChatModelUnitTests(ChatModelTests): .. dropdown:: supports_image_tool_message - Boolean property indicating whether the chat model supports ToolMessages + Boolean property indicating whether the chat model supports ``ToolMessage``s that include image content, e.g., .. code-block:: python @@ -615,11 +605,11 @@ class ChatModelUnitTests(ChatModelTests): .. dropdown:: supported_usage_metadata_details - Property controlling what usage metadata details are emitted in both invoke - and stream. + Property controlling what usage metadata details are emitted in both ``invoke`` + and ``stream``. - ``usage_metadata`` is an optional dict attribute on AIMessages that track input - and output tokens: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html + ``usage_metadata`` is an optional dict attribute on ``AIMessage``s that track input + and output tokens. `See more. `__ It includes optional keys ``input_token_details`` and ``output_token_details`` that can track usage details associated with special types of tokens, such as @@ -812,6 +802,7 @@ class ChatModelUnitTests(ChatModelTests): def init_from_env_params(self) -> tuple[dict, dict, dict]: """(tuple) environment variables, additional initialization args, and expected instance attributes for testing initialization from environment variables. + """ return {}, {}, {} @@ -823,7 +814,8 @@ class ChatModelUnitTests(ChatModelTests): If this test fails, ensure that: 1. ``chat_model_params`` is specified and the model can be initialized from those params; - 2. The model accommodates standard parameters: https://python.langchain.com/docs/concepts/chat_models/#standard-parameters + 2. The model accommodates `standard parameters `__ + """ # noqa: E501 model = self.chat_model_class( **{ @@ -843,6 +835,7 @@ class ChatModelUnitTests(ChatModelTests): If this test fails, ensure that ``init_from_env_params`` is specified correctly and that model parameters are properly set from environment variables during initialization. + """ env_params, model_params, expected_attrs = self.init_from_env_params if not env_params: @@ -867,6 +860,7 @@ class ChatModelUnitTests(ChatModelTests): If this test fails, ensure that the model can be initialized with a boolean ``streaming`` parameter. + """ model = self.chat_model_class( **{ @@ -893,6 +887,7 @@ class ChatModelUnitTests(ChatModelTests): a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html See example implementation of ``bind_tools`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.bind_tools + """ if not self.has_tool_calling: return @@ -933,6 +928,7 @@ class ChatModelUnitTests(ChatModelTests): a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + """ if not self.has_structured_output: return @@ -955,6 +951,7 @@ class ChatModelUnitTests(ChatModelTests): Check also that the model class is named according to convention (e.g., ``ChatProviderName``). + """ class ExpectedParams(BaseModelV1): @@ -992,6 +989,7 @@ class ChatModelUnitTests(ChatModelTests): If this test fails, check that the ``init_from_env_params`` property is correctly set on the test class. + """ if not self.chat_model_class.is_lc_serializable(): pytest.skip("Model is not serializable.") @@ -1011,6 +1009,7 @@ class ChatModelUnitTests(ChatModelTests): def test_init_time(self, benchmark: BenchmarkFixture) -> None: """Test initialization time of the chat model. If this test fails, check that we are not introducing undue overhead in the model's initialization. + """ def _init_in_loop() -> None: diff --git a/libs/standard-tests/langchain_tests/unit_tests/chat_models_v1.py b/libs/standard-tests/langchain_tests/unit_tests/chat_models_v1.py new file mode 100644 index 00000000000..26e3afcf640 --- /dev/null +++ b/libs/standard-tests/langchain_tests/unit_tests/chat_models_v1.py @@ -0,0 +1,934 @@ +""":autodoc-options: autoproperty. + +Standard unit tests for chat models supporting v1 messages. + +This module provides updated test patterns for the new messages introduced in +``langchain_core.messages.content_blocks``. Notably, this includes the standardized +content blocks system. +""" + +import inspect +import os +from abc import abstractmethod +from typing import Any, Literal, Optional +from unittest import mock + +import pytest +from langchain_core.load import dumpd, load +from langchain_core.messages.content_blocks import ( + create_text_block, +) +from langchain_core.runnables import RunnableBinding +from langchain_core.tools import BaseTool, tool +from langchain_core.v1.chat_models import BaseChatModel +from langchain_core.v1.messages import HumanMessage +from pydantic import BaseModel, Field, SecretStr +from pydantic.v1 import BaseModel as BaseModelV1 +from pydantic.v1 import Field as FieldV1 +from pydantic.v1 import ValidationError as ValidationErrorV1 +from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped] +from syrupy.assertion import SnapshotAssertion + +from langchain_tests.base import BaseStandardTests +from langchain_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION + + +def generate_schema_pydantic_v1_from_2() -> Any: + """Use to generate a schema from v1 namespace in pydantic 2. + + :private: + """ + if PYDANTIC_MAJOR_VERSION != 2: + msg = "This function is only compatible with Pydantic v2." + raise AssertionError(msg) + + class PersonB(BaseModelV1): + """Record attributes of a person.""" + + name: str = FieldV1(..., description="The name of the person.") + age: int = FieldV1(..., description="The age of the person.") + + return PersonB + + +def generate_schema_pydantic() -> Any: + """Works with either pydantic 1 or 2. + + :private: + """ + + class PersonA(BaseModel): + """Record attributes of a person.""" + + name: str = Field(..., description="The name of the person.") + age: int = Field(..., description="The age of the person.") + + return PersonA + + +TEST_PYDANTIC_MODELS = [generate_schema_pydantic()] + +if PYDANTIC_MAJOR_VERSION == 2: + TEST_PYDANTIC_MODELS.append(generate_schema_pydantic_v1_from_2()) + + +class ChatModelV1Tests(BaseStandardTests): + """Test suite for v1 chat models. + + This class provides comprehensive testing for the new message system introduced in + LangChain v1, including the standardized content block format. + + :private: + """ + + @property + @abstractmethod + def chat_model_class(self) -> type[BaseChatModel]: + """The chat model class to test, e.g., ``ChatParrotLink``. + + .. important:: + Test subclasses **must** implement this property. + + """ + ... + + @property + def chat_model_params(self) -> dict: + """Initialization parameters for the chat model to test. + + .. important:: + Test subclasses **must** implement this property. + + """ + return {} + + @property + def standard_chat_model_params(self) -> dict: + """:private:""" + return { + "temperature": 0, + "max_tokens": 100, + "timeout": 60, + "stop": [], + "max_retries": 2, + } + + @pytest.fixture + def model(self) -> BaseChatModel: + """:private:""" + return self.chat_model_class( + **{ + **self.standard_chat_model_params, + **self.chat_model_params, + } + ) + + @pytest.fixture + def my_adder_tool(self) -> BaseTool: + """:private:""" + + @tool + def my_adder_tool(a: int, b: int) -> int: + """Takes two integers, a and b, and returns their sum.""" + return a + b + + return my_adder_tool + + @property + def has_tool_calling(self) -> bool: + """Whether the model supports tool calling.""" + return self.chat_model_class.bind_tools is not BaseChatModel.bind_tools + + @property + def tool_choice_value(self) -> Optional[str]: + """(None or str) To use for tool choice when used in tests. + + Not required. + + """ + return None + + @property + def has_tool_choice(self) -> bool: + """Whether the model supports forcing tool calling via ``tool_choice``.""" + bind_tools_params = inspect.signature( + self.chat_model_class.bind_tools + ).parameters + return "tool_choice" in bind_tools_params + + @property + def has_structured_output(self) -> bool: + """Whether the model supports structured output.""" + return ( + self.chat_model_class.with_structured_output + is not BaseChatModel.with_structured_output + ) or self.has_tool_calling + + @property + def structured_output_kwargs(self) -> dict: + """Additional kwargs for ``with_structured_output``. + + Example: ``{"method": "json_schema", "strict": True}`` + + """ + return {} + + @property + def supports_json_mode(self) -> bool: + """Whether the model supports JSON mode. + + TODO: clarify what this means exactly. + + Defaults to False. + + """ + return False + + # Content Block Support Properties + @property + def supports_content_blocks_v1(self) -> bool: + """Whether the model supports content blocks v1 format. + + **Defaults to True.** + + .. important:: + This should not be overridden by a ChatV1 subclass. + + You may override the following properties to enable specific content block + support. Each defaults to False: + + - ``supports_reasoning_content_blocks`` + - ``supports_plaintext_content_blocks`` + - ``supports_file_content_blocks`` + - ``supports_image_content_blocks`` + - ``supports_audio_content_blocks`` + - ``supports_video_content_blocks`` + - ``supports_citations`` + - ``supports_web_search_blocks`` + - ``supports_invalid_tool_calls`` + + """ + return True + + @property + def supports_non_standard_blocks(self) -> bool: + """Whether the model supports ``NonStandardContentBlock``. + + Defaults to True. + + """ + return True + + @property + def supports_text_content_blocks(self) -> bool: + """Whether the model supports ``TextContentBlock``. + + .. important:: + This is a minimum requirement for v1 chat models. + + """ + return self.supports_content_blocks_v1 + + @property + def supports_reasoning_content_blocks(self) -> bool: + """Whether the model supports ``ReasoningContentBlock``. + + Defaults to False. + + """ + return False + + @property + def supports_plaintext_content_blocks(self) -> bool: + """Whether the model supports ``PlainTextContentBlock``. + + Defaults to False. + + """ + return False + + @property + def supports_file_content_blocks(self) -> bool: + """Whether the model supports ``FileContentBlock``. + + Replaces ``supports_pdf_inputs`` from v0. + + Defaults to False. + + """ + return False + + @property + def supports_image_content_blocks(self) -> bool: + """Whether the model supports ``ImageContentBlock``. + + Replaces ``supports_image_inputs`` from v0. + + Defaults to False. + + """ + return False + + @property + def supports_audio_content_blocks(self) -> bool: + """Whether the model supports ``AudioContentBlock``. + + Replaces ``supports_audio_inputs`` from v0. + + Defaults to False. + + """ + return False + + @property + def supports_video_content_blocks(self) -> bool: + """Whether the model supports ``VideoContentBlock``. + + Replaces ``supports_video_inputs`` from v0. + + Defaults to False. + + """ + return False + + @property + def supports_multimodal_reasoning(self) -> bool: + """Whether the model can reason about multimodal content.""" + return ( + self.supports_image_content_blocks + and self.supports_reasoning_content_blocks + ) + + @property + def supports_citations(self) -> bool: + """Whether the model supports ``Citation`` annotations. + + Defaults to False. + + """ + return False + + @property + def supports_structured_citations(self) -> bool: + """Whether the model supports structured citation generation.""" + return self.supports_citations + + @property + def supports_web_search_blocks(self) -> bool: + """Whether the model supports ``WebSearchCall``/``WebSearchResult`` blocks. + + Defaults to False. + + """ + return False + + @property + def supports_code_interpreter(self) -> bool: + """Whether the model supports code interpreter blocks. + + Defaults to False. + + """ + return False + + @property + def supports_invalid_tool_calls(self) -> bool: + """Whether the model can handle ``InvalidToolCall`` blocks. + + Defaults to False. + + """ + return False + + @property + def returns_usage_metadata(self) -> bool: + """Whether the model returns usage metadata on invoke and streaming. + + Defaults to True. + + """ + return True + + @property + def supports_anthropic_inputs(self) -> bool: + """Whether the model supports Anthropic-style inputs. + + Defaults to False. + + """ + return False + + @property + def enable_vcr_tests(self) -> bool: + """Whether to enable VCR tests for the chat model. + + .. important:: + See ``enable_vcr_tests`` dropdown :class:`above ` for more + information. + + Defaults to False. + + """ + return False + + # TODO: check this, since there is `reasoning_output` in usage metadata details ? + @property + def supported_usage_metadata_details( + self, + ) -> dict[ + Literal["invoke", "stream"], + list[ + Literal[ + "audio_input", + "audio_output", + "reasoning_output", + "cache_read_input", + "cache_creation_input", + ] + ], + ]: + """What usage metadata details are emitted in ``invoke()`` and ``stream()``.""" + return {"invoke": [], "stream": []} + + +class ChatModelV1UnitTests(ChatModelV1Tests): + """Base class for chat model v1 unit tests. + + These tests run in isolation without external dependencies. + + Test subclasses must implement the ``chat_model_class`` and + ``chat_model_params`` properties to specify what model to test and its + initialization parameters. + + Example: + + .. code-block:: python + + from typing import Type + + from langchain_tests.unit_tests import ChatModelV1UnitTests + from my_package.chat_models import MyChatModel + + + class TestMyChatModelUnit(ChatModelV1UnitTests): + @property + def chat_model_class(self) -> Type[MyChatModel]: + # Return the chat model class to test here + return MyChatModel + + @property + def chat_model_params(self) -> dict: + # Return initialization parameters for the v1 model. + return {"model": "model-001", "temperature": 0} + + .. note:: + API references for individual test methods include troubleshooting tips. + + .. important:: + Test subclasses **must** implement the following two properties: + + chat_model_class + The chat model class to test, e.g., ``ChatParrotLinkV1``. + + Example: + + .. code-block:: python + + @property + def chat_model_class(self) -> Type[ChatParrotLinkV1]: + return ChatParrotLinkV1 + + chat_model_params + Initialization parameters for the chat model. + + Example: + + .. code-block:: python + + @property + def chat_model_params(self) -> dict: + return {"model": "bird-brain-001", "temperature": 0} + + In addition, test subclasses can control what features are tested (such as tool + calling or multi-modality) by selectively overriding the following properties. + Expand to see details: + + .. dropdown:: has_tool_calling + + TODO + + .. dropdown:: tool_choice_value + + TODO + + .. dropdown:: has_tool_choice + + TODO + + .. dropdown:: has_structured_output + + TODO + + .. dropdown:: structured_output_kwargs + + TODO + + .. dropdown:: supports_json_mode + + TODO + + .. dropdown:: returns_usage_metadata + + TODO + + .. dropdown:: supports_anthropic_inputs + + TODO + + .. dropdown:: supported_usage_metadata_details + + TODO + + .. dropdown:: enable_vcr_tests + + Property controlling whether to enable select tests that rely on + `VCR `_ caching of HTTP calls, such + as benchmarking tests. + + To enable these tests, follow these steps: + + 1. Override the ``enable_vcr_tests`` property to return ``True``: + + .. code-block:: python + + @property + def enable_vcr_tests(self) -> bool: + return True + + 2. Configure VCR to exclude sensitive headers and other information from cassettes. + + .. important:: + VCR will by default record authentication headers and other sensitive + information in cassettes. Read below for how to configure what + information is recorded in cassettes. + + To add configuration to VCR, add a ``conftest.py`` file to the ``tests/`` + directory and implement the ``vcr_config`` fixture there. + + ``langchain-tests`` excludes the headers ``'authorization'``, + ``'x-api-key'``, and ``'api-key'`` from VCR cassettes. To pick up this + configuration, you will need to add ``conftest.py`` as shown below. You can + also exclude additional headers, override the default exclusions, or apply + other customizations to the VCR configuration. See example below: + + .. code-block:: python + :caption: tests/conftest.py + + import pytest + from langchain_tests.conftest import _base_vcr_config as _base_vcr_config + + _EXTRA_HEADERS = [ + # Specify additional headers to redact + ("user-agent", "PLACEHOLDER"), + ] + + + def remove_response_headers(response: dict) -> dict: + # If desired, remove or modify headers in the response. + response["headers"] = {} + return response + + + @pytest.fixture(scope="session") + def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811 + \"\"\"Extend the default configuration from langchain_tests.\"\"\" + config = _base_vcr_config.copy() + config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS) + config["before_record_response"] = remove_response_headers + + return config + + .. dropdown:: Compressing cassettes + + ``langchain-tests`` includes a custom VCR serializer that compresses + cassettes using gzip. To use it, register the ``yaml.gz`` serializer + to your VCR fixture and enable this serializer in the config. See + example below: + + .. code-block:: python + :caption: tests/conftest.py + + import pytest + from langchain_tests.conftest import CustomPersister, CustomSerializer + from langchain_tests.conftest import _base_vcr_config as _base_vcr_config + from vcr import VCR + + _EXTRA_HEADERS = [ + # Specify additional headers to redact + ("user-agent", "PLACEHOLDER"), + ] + + + def remove_response_headers(response: dict) -> dict: + # If desired, remove or modify headers in the response. + response["headers"] = {} + return response + + + @pytest.fixture(scope="session") + def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811 + \"\"\"Extend the default configuration from langchain_tests.\"\"\" + config = _base_vcr_config.copy() + config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS) + config["before_record_response"] = remove_response_headers + # New: enable serializer and set file extension + config["serializer"] = "yaml.gz" + config["path_transformer"] = VCR.ensure_suffix(".yaml.gz") + + return config + + + def pytest_recording_configure(config: dict, vcr: VCR) -> None: + vcr.register_persister(CustomPersister()) + vcr.register_serializer("yaml.gz", CustomSerializer()) + + + You can inspect the contents of the compressed cassettes (e.g., to + ensure no sensitive information is recorded) using + + .. code-block:: bash + + gunzip -k /path/to/tests/cassettes/TestClass_test.yaml.gz + + or by using the serializer: + + .. code-block:: python + + from langchain_tests.conftest import CustomPersister, CustomSerializer + + cassette_path = "/path/to/tests/cassettes/TestClass_test.yaml.gz" + requests, responses = CustomPersister().load_cassette(path, CustomSerializer()) + + 3. Run tests to generate VCR cassettes. + + Example: + + .. code-block:: bash + + uv run python -m pytest tests/integration_tests/test_chat_models.py::TestMyModel::test_stream_time + + This will generate a VCR cassette for the test in + ``tests/integration_tests/cassettes/``. + + .. important:: + You should inspect the generated cassette to ensure that it does not + contain sensitive information. If it does, you can modify the + ``vcr_config`` fixture to exclude headers or modify the response + before it is recorded. + + You can then commit the cassette to your repository. Subsequent test runs + will use the cassette instead of making HTTP calls. + + Testing initialization from environment variables + Some unit tests may require testing initialization from environment variables. + These tests can be enabled by overriding the ``init_from_env_params`` + property (see below): + + .. dropdown:: init_from_env_params + + This property is used in unit tests to test initialization from + environment variables. It should return a tuple of three dictionaries + that specify the environment variables, additional initialization args, + and expected instance attributes to check. + + Defaults to empty dicts. If not overridden, the test is skipped. + + Example: + + .. code-block:: python + + @property + def init_from_env_params(self) -> Tuple[dict, dict, dict]: + return ( + { + "MY_API_KEY": "api_key", + }, + { + "model": "bird-brain-001", + }, + { + "my_api_key": "api_key", + }, + ) + + """ # noqa: E501 + + @property + def standard_chat_model_params(self) -> dict: + """:private:""" + params = super().standard_chat_model_params + params["api_key"] = "test" + return params + + @property + def init_from_env_params(self) -> tuple[dict, dict, dict]: + """Environment variables, additional initialization args, and expected + instance attributes for testing initialization from environment variables. + + Not required. + + """ + return {}, {}, {} + + # Initialization Tests + def test_init(self) -> None: + """Test model initialization. This should pass for all integrations. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that: + + 1. ``chat_model_params`` is specified and the model can be initialized from those params; + 2. The model accommodates `standard parameters `__ + + """ # noqa: E501 + model = self.chat_model_class( + **{ + **self.standard_chat_model_params, + **self.chat_model_params, + } + ) + assert model is not None + + def test_init_from_env(self) -> None: + """Test initialization from environment variables. Relies on the + ``init_from_env_params`` property. Test is skipped if that property is not + set. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that ``init_from_env_params`` is specified + correctly and that model parameters are properly set from environment + variables during initialization. + + """ + env_params, model_params, expected_attrs = self.init_from_env_params + if not env_params: + pytest.skip("init_from_env_params not specified.") + else: + with mock.patch.dict(os.environ, env_params): + model = self.chat_model_class(**model_params) + assert model is not None + for k, expected in expected_attrs.items(): + actual = getattr(model, k) + if isinstance(actual, SecretStr): + actual = actual.get_secret_value() + assert actual == expected + + def test_init_streaming( + self, + ) -> None: + """Test that model can be initialized with ``streaming=True``. This is for + backward-compatibility purposes. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that the model can be initialized with a + boolean ``streaming`` parameter. + + """ + model = self.chat_model_class( + **{ + **self.standard_chat_model_params, + **self.chat_model_params, + "streaming": True, + } + ) + assert model is not None + + def test_bind_tool_pydantic( + self, + model: BaseChatModel, + my_adder_tool: BaseTool, + ) -> None: + """Test that chat model correctly handles Pydantic models that are passed + into ``bind_tools``. Test is skipped if the ``has_tool_calling`` property + on the test class is False. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that the model's ``bind_tools`` method + properly handles Pydantic V2 models. ``langchain_core`` implements + a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html + + See example implementation of ``bind_tools`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.bind_tools + + """ + if not self.has_tool_calling: + return + + def my_adder(a: int, b: int) -> int: + """Takes two integers, a and b, and returns their sum.""" + return a + b + + tools = [my_adder_tool, my_adder] + + for pydantic_model in TEST_PYDANTIC_MODELS: + model_schema = ( + pydantic_model.model_json_schema() + if hasattr(pydantic_model, "model_json_schema") + else pydantic_model.schema() + ) + tools.extend([pydantic_model, model_schema]) + + # Doing a mypy ignore here since some of the tools are from pydantic + # BaseModel 2 which isn't typed properly yet. This will need to be fixed + # so type checking does not become annoying to users. + tool_model = model.bind_tools(tools, tool_choice="any") # type: ignore[arg-type] + assert isinstance(tool_model, RunnableBinding) + + @pytest.mark.parametrize("schema", TEST_PYDANTIC_MODELS) + def test_with_structured_output( + self, + model: BaseChatModel, + schema: Any, + ) -> None: + """Test ``with_structured_output`` method. Test is skipped if the + ``has_structured_output`` property on the test class is False. + + .. dropdown:: Troubleshooting + + If this test fails, ensure that the model's ``bind_tools`` method + properly handles Pydantic V2 models. ``langchain_core`` implements + a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html + + See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + + """ + if not self.has_structured_output: + return + + assert model.with_structured_output(schema) is not None + for method in ["json_schema", "function_calling", "json_mode"]: + strict_values = [None, False, True] if method != "json_mode" else [None] + for strict in strict_values: + assert model.with_structured_output( + schema, method=method, strict=strict + ) + + def test_standard_params(self, model: BaseChatModel) -> None: + """Test that model properly generates standard parameters. These are used + for tracing purposes. + + .. dropdown:: Troubleshooting + + If this test fails, check that the model accommodates `standard parameters `__. + + Check also that the model class is named according to convention + (e.g., ``ChatProviderName``). + """ + + class ExpectedParams(BaseModelV1): + ls_provider: str + ls_model_name: str + ls_model_type: Literal["chat"] + ls_temperature: Optional[float] + ls_max_tokens: Optional[int] + ls_stop: Optional[list[str]] + + ls_params = model._get_ls_params() + try: + ExpectedParams(**ls_params) # type: ignore[arg-type] + except ValidationErrorV1 as e: + pytest.fail(f"Validation error: {e}") + + # Test optional params + model = self.chat_model_class( + max_tokens=10, # type: ignore[call-arg] + stop=["test"], # type: ignore[call-arg] + **self.chat_model_params, + ) + ls_params = model._get_ls_params() + try: + ExpectedParams(**ls_params) # type: ignore[arg-type] + except ValidationErrorV1 as e: + pytest.fail(f"Validation error: {e}") + + def test_serdes(self, model: BaseChatModel, snapshot: SnapshotAssertion) -> None: + """Test serialization and deserialization of the model. Test is skipped if the + ``is_lc_serializable`` property on the chat model class is not overwritten + to return ``True``. + + .. dropdown:: Troubleshooting + + If this test fails, check that the ``init_from_env_params`` property is + correctly set on the test class. + """ + if not self.chat_model_class.is_lc_serializable(): + pytest.skip("Model is not serializable.") + else: + env_params, _model_params, _expected_attrs = self.init_from_env_params + with mock.patch.dict(os.environ, env_params): + ser = dumpd(model) + assert ser == snapshot(name="serialized") + assert ( + model.model_dump() + == load( + dumpd(model), valid_namespaces=model.get_lc_namespace()[:1] + ).model_dump() + ) + + @pytest.mark.benchmark + def test_init_time(self, benchmark: BenchmarkFixture) -> None: + """Test initialization time of the chat model. If this test fails, check that + we are not introducing undue overhead in the model's initialization. + """ + + def _init_in_loop() -> None: + for _ in range(10): + self.chat_model_class(**self.chat_model_params) + + benchmark(_init_in_loop) + + # Property Tests + def test_llm_type_property(self, model: BaseChatModel) -> None: + """Test that ``_llm_type`` property is implemented and returns a string.""" + llm_type = model._llm_type + assert isinstance(llm_type, str) + assert len(llm_type) > 0 + + def test_identifying_params_property(self, model: BaseChatModel) -> None: + """Test that ``_identifying_params`` property returns a mapping.""" + params = model._identifying_params + assert isinstance(params, dict) # Should be dict-like mapping + + # Serialization Tests + def test_dump_serialization(self, model: BaseChatModel) -> None: + """Test that ``dump()`` returns proper serialization.""" + dumped = model.dump() + assert isinstance(dumped, dict) + assert "_type" in dumped + assert dumped["_type"] == model._llm_type + + # Should contain identifying parameters + for key, value in model._identifying_params.items(): + assert key in dumped + assert dumped[key] == value + + def test_content_block_serialization(self, model: BaseChatModel) -> None: + """Test that messages with content blocks can be serialized/deserialized.""" + if not self.supports_content_blocks_v1: + pytest.skip("Model does not support v1 content blocks.") + + text_block = create_text_block("Test serialization") + message = HumanMessage(content=[text_block]) + + # Test serialization + serialized = dumpd(message) + assert isinstance(serialized, dict) + + # Test deserialization + deserialized = load(serialized) + assert isinstance(deserialized, HumanMessage) + assert deserialized.content == message.content + # TODO: make more robust? include more fields diff --git a/libs/standard-tests/tests/unit_tests/custom_chat_model.py b/libs/standard-tests/tests/unit_tests/custom_chat_model.py index cc9be763989..737745ec27e 100644 --- a/libs/standard-tests/tests/unit_tests/custom_chat_model.py +++ b/libs/standard-tests/tests/unit_tests/custom_chat_model.py @@ -1,15 +1,9 @@ from collections.abc import Iterator from typing import Any, Optional -from langchain_core.callbacks import ( - CallbackManagerForLLMRun, -) +from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import BaseChatModel -from langchain_core.messages import ( - AIMessage, - AIMessageChunk, - BaseMessage, -) +from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage from langchain_core.messages.ai import UsageMetadata from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from pydantic import Field diff --git a/libs/standard-tests/tests/unit_tests/custom_chat_model_v1.py b/libs/standard-tests/tests/unit_tests/custom_chat_model_v1.py new file mode 100644 index 00000000000..1739b393a10 --- /dev/null +++ b/libs/standard-tests/tests/unit_tests/custom_chat_model_v1.py @@ -0,0 +1,259 @@ +"""``ChatParrotLinkV1`` implementation for standard-tests with v1 messages. + +This module provides a test implementation of ``BaseChatModel`` that supports the new +v1 message format with content blocks. +""" + +from collections.abc import AsyncIterator, Iterator +from typing import Any, Optional, cast + +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun +from langchain_core.messages.ai import UsageMetadata +from langchain_core.v1.chat_models import BaseChatModel +from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1 +from pydantic import Field + + +class ChatParrotLinkV1(BaseChatModel): + """A custom v1 chat model that echoes input with content blocks support. + + This model is designed for testing the v1 message format and content blocks. Echoes + the first ``parrot_buffer_length`` characters of the input and returns them as + proper v1 content blocks. + + Example: + .. code-block:: python + + model = ChatParrotLinkV1(parrot_buffer_length=10, model="parrot-v1") + result = model.invoke([HumanMessage(content="hello world")]) + # Returns AIMessage with content blocks format + """ + + model_name: str = Field(alias="model") + """The name of the model.""" + temperature: Optional[float] = None + max_tokens: Optional[int] = None + timeout: Optional[int] = None + stop: Optional[list[str]] = None + max_retries: int = 2 + + parrot_buffer_length: int = Field(default=50) + """The number of characters from the last message to echo.""" + + def _invoke( + self, + messages: list[MessageV1], + **kwargs: Any, + ) -> AIMessage: + """Generate a response by echoing the input as content blocks. + + Args: + messages: List of v1 messages to process. + **kwargs: Additional generation parameters. + + Returns: + AIMessage with content blocks format. + """ + _ = kwargs # Mark as used + + if not messages: + return AIMessage("No input provided") + + last_message = messages[-1] + + # Extract text content from the message + text_content = "" + for block in last_message.content: + if isinstance(block, dict) and block.get("type") == "text": + text_content += str(block.get("text", "")) + + # Echo the first parrot_buffer_length characters + echoed_text = text_content[: self.parrot_buffer_length] + + # Calculate usage metadata + total_input_chars = sum( + len(str(msg.content)) + if isinstance(msg.content, str) + else ( + sum(len(str(block)) for block in msg.content) + if isinstance(msg.content, list) + else 0 + ) + for msg in messages + ) + + usage_metadata = UsageMetadata( + input_tokens=total_input_chars, + output_tokens=len(echoed_text), + total_tokens=total_input_chars + len(echoed_text), + ) + + return AIMessage( + content=echoed_text, + response_metadata=cast( + Any, + { + "model_name": self.model_name, + "time_in_seconds": 0.1, + }, + ), + usage_metadata=usage_metadata, + ) + + def _stream( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[AIMessageChunk]: + """Stream the response by yielding character chunks. + + Args: + messages: List of v1 messages to process. + stop: Stop sequences (unused in this implementation). + run_manager: Callback manager for the LLM run. + **kwargs: Additional generation parameters. + + Yields: + AIMessageChunk objects with individual characters. + """ + _ = stop # Mark as used + _ = kwargs # Mark as used + + if not messages: + yield AIMessageChunk("No input provided") + return + + last_message = messages[-1] + + # Extract text content from the message + text_content = "" + # Extract text from content blocks + for block in last_message.content: + if isinstance(block, dict) and block.get("type") == "text": + text_content += str(block.get("text", "")) + + # Echo the first parrot_buffer_length characters + echoed_text = text_content[: self.parrot_buffer_length] + + # Calculate total input for usage metadata + total_input_chars = sum( + len(str(msg.content)) + if isinstance(msg.content, str) + else ( + sum(len(str(block)) for block in msg.content) + if isinstance(msg.content, list) + else 0 + ) + for msg in messages + ) + + # Stream each character as a chunk + for i, char in enumerate(echoed_text): + usage_metadata = UsageMetadata( + input_tokens=total_input_chars if i == 0 else 0, + output_tokens=1, + total_tokens=total_input_chars + 1 if i == 0 else 1, + ) + + chunk = AIMessageChunk( + content=char, + usage_metadata=usage_metadata, + ) + + if run_manager: + run_manager.on_llm_new_token(char, chunk=chunk) + + yield chunk + + # Final chunk with response metadata + final_chunk = AIMessageChunk( + content=[], + response_metadata=cast( + Any, + { + "model_name": self.model_name, + "time_in_seconds": 0.1, + }, + ), + ) + yield final_chunk + + async def _ainvoke( + self, + messages: list[MessageV1], + **kwargs: Any, + ) -> AIMessage: + """Async generate a response (delegates to sync implementation). + + Args: + messages: List of v1 messages to process. + **kwargs: Additional generation parameters. + + Returns: + AIMessage with content blocks format. + """ + # For simplicity, delegate to sync implementation + return self._invoke(messages, **kwargs) + + async def _astream( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[AIMessageChunk]: + """Async stream the response (delegates to sync implementation). + + Args: + messages: List of v1 messages to process. + stop: Stop sequences (unused in this implementation). + run_manager: Async callback manager for the LLM run. + **kwargs: Additional generation parameters. + + Yields: + AIMessageChunk objects with individual characters. + """ + # For simplicity, delegate to sync implementation + for chunk in self._stream(messages, stop, None, **kwargs): + yield chunk + + @property + def _llm_type(self) -> str: + """Get the type of language model used by this chat model.""" + return "parrot-chat-model-v1" + + @property + def _identifying_params(self) -> dict[str, Any]: + """Return a dictionary of identifying parameters.""" + return { + "model_name": self.model_name, + "parrot_buffer_length": self.parrot_buffer_length, + } + + def get_token_ids(self, text: str) -> list[int]: + """Convert text to token IDs using simple character-based tokenization. + + For testing purposes, we use a simple approach where each character + maps to its ASCII/Unicode code point. + + Args: + text: The text to tokenize. + + Returns: + List of token IDs (character code points). + """ + return [ord(char) for char in text] + + def get_num_tokens(self, text: str) -> int: + """Get the number of tokens in the text. + + Args: + text: The text to count tokens for. + + Returns: + Number of tokens (characters in this simple implementation). + """ + return len(text) diff --git a/libs/standard-tests/tests/unit_tests/test_custom_chat_model_v1.py b/libs/standard-tests/tests/unit_tests/test_custom_chat_model_v1.py new file mode 100644 index 00000000000..7b6dc556a06 --- /dev/null +++ b/libs/standard-tests/tests/unit_tests/test_custom_chat_model_v1.py @@ -0,0 +1,117 @@ +"""Test the standard v1 tests on the ``ChatParrotLinkV1`` custom chat model.""" + +import pytest + +from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1UnitTests + +from .custom_chat_model_v1 import ChatParrotLinkV1 + + +class TestChatParrotLinkV1Unit(ChatModelV1UnitTests): + """Unit tests for ``ChatParrotLinkV1`` using the standard v1 test suite.""" + + @property + def chat_model_class(self) -> type[ChatParrotLinkV1]: + """Return the chat model class to test.""" + return ChatParrotLinkV1 + + @property + def chat_model_params(self) -> dict: + """Return the parameters for initializing the chat model.""" + return { + "model": "parrot-v1-test", + "parrot_buffer_length": 20, + "temperature": 0.0, + } + + @pytest.fixture + def model(self) -> ChatParrotLinkV1: + """Create a model instance for testing.""" + return self.chat_model_class(**self.chat_model_params) + + # Override property methods to match ChatParrotLinkV1 capabilities + @property + def has_tool_calling(self) -> bool: + """``ChatParrotLinkV1`` does not support tool calling.""" + return False + + @property + def has_structured_output(self) -> bool: + """``ChatParrotLinkV1`` does not support structured output.""" + return False + + @property + def supports_json_mode(self) -> bool: + """``ChatParrotLinkV1`` does not support JSON mode.""" + return False + + @property + def supports_content_blocks_v1(self) -> bool: + """``ChatParrotLinkV1`` supports content blocks v1 format.""" + return True + + @property + def supports_text_content_blocks(self) -> bool: + """``ChatParrotLinkV1`` supports ``TextContentBlock``.""" + return True + + @property + def supports_non_standard_blocks(self) -> bool: + """``ChatParrotLinkV1`` can handle ``NonStandardContentBlock`` gracefully.""" + return True + + # All other content block types are not supported by ChatParrotLinkV1 + @property + def supports_reasoning_content_blocks(self) -> bool: + """``ChatParrotLinkV1`` does not generate ``ReasoningContentBlock``.""" + return False + + @property + def supports_plaintext_content_blocks(self) -> bool: + """``ChatParrotLinkV1`` does not support ``PlainTextContentBlock``.""" + return False + + @property + def supports_file_content_blocks(self) -> bool: + """``ChatParrotLinkV1`` does not support ``FileContentBlock``.""" + return False + + @property + def supports_image_content_blocks(self) -> bool: + """``ChatParrotLinkV1`` does not support ``ImageContentBlock``.""" + return False + + @property + def supports_audio_content_blocks(self) -> bool: + """``ChatParrotLinkV1`` does not support ``AudioContentBlock``.""" + return False + + @property + def supports_video_content_blocks(self) -> bool: + """``ChatParrotLinkV1`` does not support ``VideoContentBlock``.""" + return False + + @property + def supports_citations(self) -> bool: + """``ChatParrotLinkV1`` does not support citations.""" + return False + + @property + def supports_web_search_blocks(self) -> bool: + """``ChatParrotLinkV1`` does not support web search blocks.""" + return False + + @property + def supports_tool_calls(self) -> bool: + """``ChatParrotLinkV1`` does not support tool calls.""" + return False + + @property + def supports_invalid_tool_calls(self) -> bool: + """``ChatParrotLinkV1`` does not support ``InvalidToolCall`` handling.""" + return False + + @property + def supports_tool_call_chunks(self) -> bool: + """``ChatParrotLinkV1`` does not support ``ToolCallChunk`` blocks.""" + return False diff --git a/libs/standard-tests/uv.lock b/libs/standard-tests/uv.lock index 9a0d8a21d3c..63f0b9cffc7 100644 --- a/libs/standard-tests/uv.lock +++ b/libs/standard-tests/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.9" resolution-markers = [ "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", @@ -305,7 +305,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.72" +version = "0.4.0.dev0" source = { editable = "../core" } dependencies = [ { name = "jsonpatch" },