mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-14 07:07:34 +00:00
namespace refactor
This commit is contained in:
parent
4651457c7e
commit
551663d0b7
@ -21,7 +21,7 @@ def default(obj: Any) -> Any:
|
||||
return obj.to_json()
|
||||
|
||||
# Handle v1 message classes
|
||||
from langchain_core.messages.v1 import MessageV1Types
|
||||
from langchain_core.v1.messages import MessageV1Types
|
||||
|
||||
if type(obj) in MessageV1Types:
|
||||
import dataclasses
|
||||
@ -38,7 +38,7 @@ def default(obj: Any) -> Any:
|
||||
return {
|
||||
"lc": 1,
|
||||
"type": "constructor",
|
||||
"id": ["langchain_core", "messages", "v1", type(obj).__name__],
|
||||
"id": ["langchain_core", "v1", "messages", type(obj).__name__],
|
||||
"kwargs": kwargs,
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ class Reviver:
|
||||
cls = getattr(mod, name)
|
||||
|
||||
# The class must be a subclass of Serializable or a v1 message class.
|
||||
from langchain_core.messages.v1 import MessageV1Types
|
||||
from langchain_core.v1.messages import MessageV1Types
|
||||
|
||||
if not (issubclass(cls, Serializable) or cls in MessageV1Types):
|
||||
msg = f"Invalid namespace: {value}"
|
||||
|
@ -2381,7 +2381,7 @@ class Runnable(ABC, Generic[Input, Output]):
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`.
|
||||
|
||||
Returns:
|
||||
A BaseTool instance.
|
||||
|
@ -504,7 +504,7 @@ class ChildTool(BaseTool):
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
|
@ -127,7 +127,7 @@ def tool(
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`.
|
||||
|
||||
Returns:
|
||||
The tool.
|
||||
@ -409,7 +409,7 @@ def convert_runnable_to_tool(
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`.
|
||||
|
||||
Returns:
|
||||
The tool.
|
||||
|
@ -93,7 +93,7 @@ def create_retriever_tool(
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`.
|
||||
|
||||
Returns:
|
||||
Tool class to pass to an agent.
|
||||
|
@ -162,7 +162,7 @@ class StructuredTool(BaseTool):
|
||||
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`.
|
||||
|
||||
kwargs: Additional arguments to pass to the tool
|
||||
|
||||
|
@ -1,361 +0,0 @@
|
||||
"""Unit tests for ResponseMetadata TypedDict."""
|
||||
|
||||
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, ResponseMetadata
|
||||
|
||||
|
||||
class TestResponseMetadata:
|
||||
"""Test the ResponseMetadata TypedDict functionality."""
|
||||
|
||||
def test_response_metadata_basic_fields(self) -> None:
|
||||
"""Test ResponseMetadata with basic required fields."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
}
|
||||
|
||||
assert metadata.get("model_provider") == "openai"
|
||||
assert metadata.get("model_name") == "gpt-4"
|
||||
|
||||
def test_response_metadata_is_optional(self) -> None:
|
||||
"""Test that ResponseMetadata fields are optional due to total=False."""
|
||||
# Should be able to create empty ResponseMetadata
|
||||
metadata: ResponseMetadata = {}
|
||||
assert metadata == {}
|
||||
|
||||
# Should be able to create with just one field
|
||||
metadata_partial: ResponseMetadata = {"model_provider": "anthropic"}
|
||||
assert metadata_partial.get("model_provider") == "anthropic"
|
||||
assert "model_name" not in metadata_partial
|
||||
|
||||
def test_response_metadata_supports_extra_fields(self) -> None:
|
||||
"""Test that ResponseMetadata supports provider-specific extra fields."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4-turbo",
|
||||
# Extra fields should be allowed
|
||||
"usage": {"input_tokens": 100, "output_tokens": 50},
|
||||
"system_fingerprint": "fp_12345",
|
||||
"logprobs": None,
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
|
||||
assert metadata.get("model_provider") == "openai"
|
||||
assert metadata.get("model_name") == "gpt-4-turbo"
|
||||
assert metadata.get("usage") == {"input_tokens": 100, "output_tokens": 50}
|
||||
assert metadata.get("system_fingerprint") == "fp_12345"
|
||||
assert metadata.get("logprobs") is None
|
||||
assert metadata.get("finish_reason") == "stop"
|
||||
|
||||
def test_response_metadata_various_data_types(self) -> None:
|
||||
"""Test that ResponseMetadata can store various data types in extra fields."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "anthropic",
|
||||
"model_name": "claude-3-sonnet",
|
||||
"string_field": "test_value", # type: ignore[typeddict-unknown-key]
|
||||
"int_field": 42, # type: ignore[typeddict-unknown-key]
|
||||
"float_field": 3.14, # type: ignore[typeddict-unknown-key]
|
||||
"bool_field": True, # type: ignore[typeddict-unknown-key]
|
||||
"none_field": None, # type: ignore[typeddict-unknown-key]
|
||||
"list_field": [1, 2, 3, "test"], # type: ignore[typeddict-unknown-key]
|
||||
"dict_field": { # type: ignore[typeddict-unknown-key]
|
||||
"nested": {"deeply": "nested_value"}
|
||||
},
|
||||
}
|
||||
|
||||
assert metadata.get("string_field") == "test_value" # type: ignore[typeddict-item]
|
||||
assert metadata.get("int_field") == 42 # type: ignore[typeddict-item]
|
||||
assert metadata.get("float_field") == 3.14 # type: ignore[typeddict-item]
|
||||
assert metadata.get("bool_field") is True # type: ignore[typeddict-item]
|
||||
assert metadata.get("none_field") is None # type: ignore[typeddict-item]
|
||||
|
||||
list_field = metadata.get("list_field") # type: ignore[typeddict-item]
|
||||
assert isinstance(list_field, list)
|
||||
assert list_field == [1, 2, 3, "test"]
|
||||
|
||||
dict_field = metadata.get("dict_field") # type: ignore[typeddict-item]
|
||||
assert isinstance(dict_field, dict)
|
||||
nested = dict_field.get("nested") # type: ignore[union-attr]
|
||||
assert isinstance(nested, dict)
|
||||
assert nested.get("deeply") == "nested_value" # type: ignore[union-attr]
|
||||
|
||||
def test_response_metadata_can_be_modified(self) -> None:
|
||||
"""Test that ResponseMetadata can be modified after creation."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
}
|
||||
|
||||
# Modify existing fields
|
||||
metadata["model_name"] = "gpt-4"
|
||||
assert metadata.get("model_name") == "gpt-4"
|
||||
|
||||
# Add new fields
|
||||
metadata["request_id"] = "req_12345" # type: ignore[typeddict-unknown-key]
|
||||
assert metadata.get("request_id") == "req_12345" # type: ignore[typeddict-item]
|
||||
|
||||
# Modify nested structures
|
||||
metadata["usage"] = {"input_tokens": 10} # type: ignore[typeddict-unknown-key]
|
||||
metadata["usage"]["output_tokens"] = 20 # type: ignore[typeddict-item]
|
||||
|
||||
usage = metadata.get("usage") # type: ignore[typeddict-item]
|
||||
assert isinstance(usage, dict)
|
||||
assert usage.get("input_tokens") == 10 # type: ignore[union-attr]
|
||||
assert usage.get("output_tokens") == 20 # type: ignore[union-attr]
|
||||
|
||||
def test_response_metadata_provider_specific_examples(self) -> None:
|
||||
"""Test ResponseMetadata with realistic provider-specific examples."""
|
||||
# OpenAI-style metadata
|
||||
openai_metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4-turbo-2024-04-09",
|
||||
"usage": { # type: ignore[typeddict-unknown-key]
|
||||
"prompt_tokens": 50,
|
||||
"completion_tokens": 25,
|
||||
"total_tokens": 75,
|
||||
},
|
||||
"system_fingerprint": "fp_abc123", # type: ignore[typeddict-unknown-key]
|
||||
"created": 1234567890, # type: ignore[typeddict-unknown-key]
|
||||
"logprobs": None, # type: ignore[typeddict-unknown-key]
|
||||
"finish_reason": "stop", # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
assert openai_metadata.get("model_provider") == "openai"
|
||||
assert openai_metadata.get("system_fingerprint") == "fp_abc123" # type: ignore[typeddict-item]
|
||||
|
||||
# Anthropic-style metadata
|
||||
anthropic_metadata: ResponseMetadata = {
|
||||
"model_provider": "anthropic",
|
||||
"model_name": "claude-3-sonnet-20240229",
|
||||
"usage": { # type: ignore[typeddict-unknown-key]
|
||||
"input_tokens": 75,
|
||||
"output_tokens": 30,
|
||||
},
|
||||
"stop_reason": "end_turn", # type: ignore[typeddict-unknown-key]
|
||||
"stop_sequence": None, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
assert anthropic_metadata.get("model_provider") == "anthropic"
|
||||
assert anthropic_metadata.get("stop_reason") == "end_turn" # type: ignore[typeddict-item]
|
||||
|
||||
# Custom provider metadata
|
||||
custom_metadata: ResponseMetadata = {
|
||||
"model_provider": "custom_llm_service",
|
||||
"model_name": "custom-model-v1",
|
||||
"service_tier": "premium", # type: ignore[typeddict-unknown-key]
|
||||
"rate_limit_info": { # type: ignore[typeddict-unknown-key]
|
||||
"requests_remaining": 100,
|
||||
"reset_time": "2024-01-01T00:00:00Z",
|
||||
},
|
||||
"response_time_ms": 1250, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
assert custom_metadata.get("service_tier") == "premium" # type: ignore[typeddict-item]
|
||||
rate_limit = custom_metadata.get("rate_limit_info") # type: ignore[typeddict-item]
|
||||
assert isinstance(rate_limit, dict)
|
||||
assert rate_limit.get("requests_remaining") == 100 # type: ignore[union-attr]
|
||||
|
||||
|
||||
class TestResponseMetadataWithAIMessages:
|
||||
"""Test ResponseMetadata integration with AI message classes."""
|
||||
|
||||
def test_ai_message_with_response_metadata(self) -> None:
|
||||
"""Test AIMessage with ResponseMetadata."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"usage": {"input_tokens": 10, "output_tokens": 5}, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
message = AIMessage(content="Hello, world!", response_metadata=metadata)
|
||||
|
||||
assert message.response_metadata == metadata
|
||||
assert message.response_metadata.get("model_provider") == "openai"
|
||||
assert message.response_metadata.get("model_name") == "gpt-4"
|
||||
|
||||
usage = message.response_metadata.get("usage") # type: ignore[typeddict-item]
|
||||
assert isinstance(usage, dict)
|
||||
assert usage.get("input_tokens") == 10 # type: ignore[union-attr]
|
||||
|
||||
def test_ai_message_chunk_with_response_metadata(self) -> None:
|
||||
"""Test AIMessageChunk with ResponseMetadata."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "anthropic",
|
||||
"model_name": "claude-3-sonnet",
|
||||
"stream_id": "stream_12345", # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
chunk = AIMessageChunk(content="Hello", response_metadata=metadata)
|
||||
|
||||
assert chunk.response_metadata == metadata
|
||||
assert chunk.response_metadata.get("stream_id") == "stream_12345" # type: ignore[typeddict-item]
|
||||
|
||||
def test_ai_message_default_empty_response_metadata(self) -> None:
|
||||
"""Test that AIMessage creates empty ResponseMetadata by default."""
|
||||
message = AIMessage(content="Test message")
|
||||
|
||||
# Should have empty dict as default
|
||||
assert message.response_metadata == {}
|
||||
assert isinstance(message.response_metadata, dict)
|
||||
|
||||
def test_ai_message_chunk_default_empty_response_metadata(self) -> None:
|
||||
"""Test that AIMessageChunk creates empty ResponseMetadata by default."""
|
||||
chunk = AIMessageChunk(content="Test chunk")
|
||||
|
||||
# Should have empty dict as default
|
||||
assert chunk.response_metadata == {}
|
||||
assert isinstance(chunk.response_metadata, dict)
|
||||
|
||||
def test_response_metadata_merging_in_chunks(self) -> None:
|
||||
"""Test that ResponseMetadata is properly merged when adding AIMessageChunks."""
|
||||
metadata1: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"request_id": "req_123", # type: ignore[typeddict-unknown-key]
|
||||
"usage": {"input_tokens": 10}, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
metadata2: ResponseMetadata = {
|
||||
"stream_chunk": 1, # type: ignore[typeddict-unknown-key]
|
||||
"usage": {"output_tokens": 5}, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
chunk1 = AIMessageChunk(content="Hello ", response_metadata=metadata1)
|
||||
chunk2 = AIMessageChunk(content="world!", response_metadata=metadata2)
|
||||
|
||||
merged = chunk1 + chunk2
|
||||
|
||||
# Should have merged response_metadata
|
||||
assert merged.response_metadata.get("model_provider") == "openai"
|
||||
assert merged.response_metadata.get("model_name") == "gpt-4"
|
||||
assert merged.response_metadata.get("request_id") == "req_123" # type: ignore[typeddict-item]
|
||||
assert merged.response_metadata.get("stream_chunk") == 1 # type: ignore[typeddict-item]
|
||||
|
||||
# Usage should be merged (from merge_dicts behavior)
|
||||
merged_usage = merged.response_metadata.get("usage") # type: ignore[typeddict-item]
|
||||
assert isinstance(merged_usage, dict)
|
||||
assert merged_usage.get("input_tokens") == 10 # type: ignore[union-attr]
|
||||
assert merged_usage.get("output_tokens") == 5 # type: ignore[union-attr]
|
||||
|
||||
def test_response_metadata_modification_after_message_creation(self) -> None:
|
||||
"""Test that ResponseMetadata can be modified after message creation."""
|
||||
message = AIMessage(
|
||||
content="Initial message",
|
||||
response_metadata={"model_provider": "openai", "model_name": "gpt-3.5"},
|
||||
)
|
||||
|
||||
# Modify existing field
|
||||
message.response_metadata["model_name"] = "gpt-4"
|
||||
assert message.response_metadata.get("model_name") == "gpt-4"
|
||||
|
||||
# Add new field
|
||||
message.response_metadata["finish_reason"] = "stop" # type: ignore[typeddict-unknown-key]
|
||||
assert message.response_metadata.get("finish_reason") == "stop" # type: ignore[typeddict-item]
|
||||
|
||||
def test_response_metadata_with_none_values(self) -> None:
|
||||
"""Test ResponseMetadata handling of None values."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"system_fingerprint": None, # type: ignore[typeddict-unknown-key]
|
||||
"logprobs": None, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
message = AIMessage(content="Test", response_metadata=metadata)
|
||||
|
||||
assert message.response_metadata.get("system_fingerprint") is None # type: ignore[typeddict-item]
|
||||
assert message.response_metadata.get("logprobs") is None # type: ignore[typeddict-item]
|
||||
assert "system_fingerprint" in message.response_metadata
|
||||
assert "logprobs" in message.response_metadata
|
||||
|
||||
|
||||
class TestResponseMetadataEdgeCases:
|
||||
"""Test edge cases and error conditions for ResponseMetadata."""
|
||||
|
||||
def test_response_metadata_with_complex_nested_structures(self) -> None:
|
||||
"""Test ResponseMetadata with deeply nested and complex structures."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "custom",
|
||||
"model_name": "complex-model",
|
||||
"complex_data": { # type: ignore[typeddict-unknown-key]
|
||||
"level1": {
|
||||
"level2": {
|
||||
"level3": {
|
||||
"deeply_nested": "value",
|
||||
"array": [
|
||||
{"item": 1, "metadata": {"nested": True}},
|
||||
{"item": 2, "metadata": {"nested": False}},
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
complex_data = metadata.get("complex_data") # type: ignore[typeddict-item]
|
||||
assert isinstance(complex_data, dict)
|
||||
level1 = complex_data.get("level1") # type: ignore[union-attr]
|
||||
assert isinstance(level1, dict)
|
||||
level2 = level1.get("level2") # type: ignore[union-attr]
|
||||
assert isinstance(level2, dict)
|
||||
level3 = level2.get("level3") # type: ignore[union-attr]
|
||||
assert isinstance(level3, dict)
|
||||
|
||||
assert level3.get("deeply_nested") == "value" # type: ignore[union-attr]
|
||||
array = level3.get("array") # type: ignore[union-attr]
|
||||
assert isinstance(array, list)
|
||||
assert len(array) == 2 # type: ignore[arg-type]
|
||||
assert array[0]["item"] == 1 # type: ignore[index, typeddict-item]
|
||||
assert array[0]["metadata"]["nested"] is True # type: ignore[index, typeddict-item]
|
||||
|
||||
def test_response_metadata_large_data(self) -> None:
|
||||
"""Test ResponseMetadata with large amounts of data."""
|
||||
# Create metadata with many fields
|
||||
large_metadata: ResponseMetadata = {
|
||||
"model_provider": "test_provider",
|
||||
"model_name": "test_model",
|
||||
}
|
||||
|
||||
# Add 100 extra fields
|
||||
for i in range(100):
|
||||
large_metadata[f"field_{i}"] = f"value_{i}" # type: ignore[literal-required]
|
||||
|
||||
message = AIMessage(content="Test", response_metadata=large_metadata)
|
||||
|
||||
# Verify all fields are accessible
|
||||
assert message.response_metadata.get("model_provider") == "test_provider"
|
||||
for i in range(100):
|
||||
assert message.response_metadata.get(f"field_{i}") == f"value_{i}" # type: ignore[typeddict-item]
|
||||
|
||||
def test_response_metadata_empty_vs_none(self) -> None:
|
||||
"""Test the difference between empty ResponseMetadata and None."""
|
||||
# Message with empty metadata
|
||||
message_empty = AIMessage(content="Test", response_metadata={})
|
||||
assert message_empty.response_metadata == {}
|
||||
assert isinstance(message_empty.response_metadata, dict)
|
||||
|
||||
# Message with None metadata (should become empty dict)
|
||||
message_none = AIMessage(content="Test", response_metadata=None)
|
||||
assert message_none.response_metadata == {}
|
||||
assert isinstance(message_none.response_metadata, dict)
|
||||
|
||||
# Default message (no metadata specified)
|
||||
message_default = AIMessage(content="Test")
|
||||
assert message_default.response_metadata == {}
|
||||
assert isinstance(message_default.response_metadata, dict)
|
||||
|
||||
def test_response_metadata_preserves_original_dict_type(self) -> None:
|
||||
"""Test that ResponseMetadata preserves the original dict when passed."""
|
||||
original_dict = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"custom_field": "custom_value",
|
||||
}
|
||||
|
||||
message = AIMessage(content="Test", response_metadata=original_dict)
|
||||
|
||||
# Should be the same dict object
|
||||
assert message.response_metadata is original_dict
|
||||
|
||||
# Modifications to the message's response_metadata should affect original
|
||||
message.response_metadata["new_field"] = "new_value" # type: ignore[typeddict-unknown-key]
|
||||
assert original_dict.get("new_field") == "new_value" # type: ignore[typeddict-item]
|
@ -16,7 +16,7 @@ service.
|
||||
from importlib import metadata
|
||||
|
||||
from langchain_ollama.chat_models import ChatOllama
|
||||
from langchain_ollama.chat_models_v1 import ChatOllamaV1
|
||||
from langchain_ollama.chat_models_v1 import ChatOllama as ChatOllamaV1
|
||||
from langchain_ollama.embeddings import OllamaEmbeddings
|
||||
from langchain_ollama.llms import OllamaLLM
|
||||
|
||||
|
@ -13,12 +13,15 @@ from langchain_core.messages.content_blocks import (
|
||||
TextContentBlock,
|
||||
ToolCall,
|
||||
)
|
||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
|
||||
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
|
||||
from langchain_core.messages.v1 import MessageV1, ResponseMetadata
|
||||
from langchain_core.messages.v1 import SystemMessage as SystemMessageV1
|
||||
from langchain_core.messages.v1 import ToolMessage as ToolMessageV1
|
||||
from langchain_core.v1.messages import (
|
||||
AIMessage,
|
||||
AIMessageChunk,
|
||||
HumanMessage,
|
||||
MessageV1,
|
||||
ResponseMetadata,
|
||||
SystemMessage,
|
||||
ToolMessage,
|
||||
)
|
||||
|
||||
|
||||
def _get_usage_metadata_from_response(
|
||||
@ -38,13 +41,13 @@ def _get_usage_metadata_from_response(
|
||||
|
||||
def _convert_from_v1_to_ollama_format(message: MessageV1) -> dict[str, Any]:
|
||||
"""Convert v1 message to Ollama API format."""
|
||||
if isinstance(message, HumanMessageV1):
|
||||
if isinstance(message, HumanMessage):
|
||||
return _convert_human_message_v1(message)
|
||||
if isinstance(message, AIMessageV1):
|
||||
if isinstance(message, AIMessage):
|
||||
return _convert_ai_message_v1(message)
|
||||
if isinstance(message, SystemMessageV1):
|
||||
if isinstance(message, SystemMessage):
|
||||
return _convert_system_message_v1(message)
|
||||
if isinstance(message, ToolMessageV1):
|
||||
if isinstance(message, ToolMessage):
|
||||
return _convert_tool_message_v1(message)
|
||||
msg = f"Unsupported message type: {type(message)}"
|
||||
raise ValueError(msg)
|
||||
@ -109,8 +112,8 @@ def _convert_content_blocks_to_ollama_format(
|
||||
return text_content, images, tool_calls
|
||||
|
||||
|
||||
def _convert_human_message_v1(message: HumanMessageV1) -> dict[str, Any]:
|
||||
"""Convert HumanMessageV1 to Ollama format."""
|
||||
def _convert_human_message_v1(message: HumanMessage) -> dict[str, Any]:
|
||||
"""Convert HumanMessage to Ollama format."""
|
||||
text_content, images, _ = _convert_content_blocks_to_ollama_format(message.content)
|
||||
|
||||
msg: dict[str, Any] = {
|
||||
@ -125,8 +128,8 @@ def _convert_human_message_v1(message: HumanMessageV1) -> dict[str, Any]:
|
||||
return msg
|
||||
|
||||
|
||||
def _convert_ai_message_v1(message: AIMessageV1) -> dict[str, Any]:
|
||||
"""Convert AIMessageV1 to Ollama format."""
|
||||
def _convert_ai_message_v1(message: AIMessage) -> dict[str, Any]:
|
||||
"""Convert AIMessage to Ollama format."""
|
||||
text_content, _, tool_calls = _convert_content_blocks_to_ollama_format(
|
||||
message.content
|
||||
)
|
||||
@ -146,8 +149,8 @@ def _convert_ai_message_v1(message: AIMessageV1) -> dict[str, Any]:
|
||||
return msg
|
||||
|
||||
|
||||
def _convert_system_message_v1(message: SystemMessageV1) -> dict[str, Any]:
|
||||
"""Convert SystemMessageV1 to Ollama format."""
|
||||
def _convert_system_message_v1(message: SystemMessage) -> dict[str, Any]:
|
||||
"""Convert SystemMessage to Ollama format."""
|
||||
text_content, _, _ = _convert_content_blocks_to_ollama_format(message.content)
|
||||
|
||||
return {
|
||||
@ -156,8 +159,8 @@ def _convert_system_message_v1(message: SystemMessageV1) -> dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def _convert_tool_message_v1(message: ToolMessageV1) -> dict[str, Any]:
|
||||
"""Convert ToolMessageV1 to Ollama format."""
|
||||
def _convert_tool_message_v1(message: ToolMessage) -> dict[str, Any]:
|
||||
"""Convert ToolMessage to Ollama format."""
|
||||
text_content, _, _ = _convert_content_blocks_to_ollama_format(message.content)
|
||||
|
||||
return {
|
||||
@ -167,8 +170,8 @@ def _convert_tool_message_v1(message: ToolMessageV1) -> dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def _convert_to_v1_from_ollama_format(response: dict[str, Any]) -> AIMessageV1:
|
||||
"""Convert Ollama API response to AIMessageV1."""
|
||||
def _convert_to_v1_from_ollama_format(response: dict[str, Any]) -> AIMessage:
|
||||
"""Convert Ollama API response to AIMessage."""
|
||||
content: list[types.ContentBlock] = []
|
||||
|
||||
# Handle text content
|
||||
@ -233,15 +236,15 @@ def _convert_to_v1_from_ollama_format(response: dict[str, Any]) -> AIMessageV1:
|
||||
if "context" in response:
|
||||
metadata_as_dict["context"] = response["context"]
|
||||
|
||||
return AIMessageV1(
|
||||
return AIMessage(
|
||||
content=content,
|
||||
response_metadata=response_metadata,
|
||||
usage_metadata=_get_usage_metadata_from_response(response),
|
||||
)
|
||||
|
||||
|
||||
def _convert_chunk_to_v1(chunk: dict[str, Any]) -> AIMessageChunkV1:
|
||||
"""Convert Ollama streaming chunk to AIMessageChunkV1."""
|
||||
def _convert_chunk_to_v1(chunk: dict[str, Any]) -> AIMessageChunk:
|
||||
"""Convert Ollama streaming chunk to AIMessageChunk."""
|
||||
content: list[types.ContentBlock] = []
|
||||
|
||||
# Handle reasoning content first in chunks
|
||||
@ -305,7 +308,7 @@ def _convert_chunk_to_v1(chunk: dict[str, Any]) -> AIMessageChunkV1:
|
||||
if chunk.get("done") is True:
|
||||
usage_metadata = _get_usage_metadata_from_response(chunk)
|
||||
|
||||
return AIMessageChunkV1(
|
||||
return AIMessageChunk(
|
||||
content=content,
|
||||
response_metadata=response_metadata or ResponseMetadata(),
|
||||
usage_metadata=usage_metadata,
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""Ollama chat model v1 implementation.
|
||||
|
||||
This implementation provides native support for v1 messages with structured
|
||||
content blocks and always returns AIMessageV1 format responses.
|
||||
content blocks.
|
||||
|
||||
.. versionadded:: 1.0.0
|
||||
"""
|
||||
@ -20,14 +20,6 @@ from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.language_models import LanguageModelInput
|
||||
from langchain_core.language_models.chat_models import LangSmithParams
|
||||
from langchain_core.language_models.v1.chat_models import (
|
||||
BaseChatModelV1,
|
||||
agenerate_from_stream,
|
||||
generate_from_stream,
|
||||
)
|
||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
|
||||
from langchain_core.messages.v1 import MessageV1
|
||||
from langchain_core.output_parsers import (
|
||||
JsonOutputKeyToolsParser,
|
||||
JsonOutputParser,
|
||||
@ -41,6 +33,12 @@ from langchain_core.utils.function_calling import (
|
||||
convert_to_openai_tool,
|
||||
)
|
||||
from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass
|
||||
from langchain_core.v1.chat_models import (
|
||||
BaseChatModel,
|
||||
agenerate_from_stream,
|
||||
generate_from_stream,
|
||||
)
|
||||
from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1
|
||||
from ollama import AsyncClient, Client, Options
|
||||
from pydantic import BaseModel, PrivateAttr, model_validator
|
||||
from pydantic.json_schema import JsonSchemaValue
|
||||
@ -154,11 +152,10 @@ def _is_pydantic_class(obj: Any) -> bool:
|
||||
return isinstance(obj, type) and is_basemodel_subclass(obj)
|
||||
|
||||
|
||||
class ChatOllamaV1(BaseChatModelV1):
|
||||
class ChatOllama(BaseChatModel):
|
||||
r"""Ollama chat model with native v1 message/content block support.
|
||||
|
||||
This implementation provides native support for structured content blocks
|
||||
and always returns AIMessageV1 format responses with structured content.
|
||||
This implementation provides native support for structured content blocks.
|
||||
|
||||
.. dropdown:: Setup
|
||||
:open:
|
||||
@ -196,9 +193,9 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_ollama import ChatOllamaV1
|
||||
from langchain_ollama import ChatOllama
|
||||
|
||||
llm = ChatOllamaV1(
|
||||
llm = ChatOllama(
|
||||
model = "llama3",
|
||||
temperature = 0.8,
|
||||
num_predict = 256,
|
||||
@ -208,7 +205,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
Invoke:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages.v1 import HumanMessage
|
||||
from langchain_core.v1.messages import HumanMessage
|
||||
from langchain_core.messages.content_blocks import TextContentBlock
|
||||
|
||||
messages = [
|
||||
@ -220,12 +217,12 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessageV1(content=[{'type': 'text', 'text': 'Hello! How can I help you today?'}], response_metadata={'model': 'llama3', 'created_at': '2024-07-04T03:37:50.182604Z', 'done_reason': 'stop', 'done': True, 'total_duration': 3576619666, 'load_duration': 788524916, 'prompt_eval_count': 32, 'prompt_eval_duration': 128125000, 'eval_count': 71, 'eval_duration': 2656556000}, id='run-ba48f958-6402-41a5-b461-5e250a4ebd36-0')
|
||||
AIMessage(content=[{'type': 'text', 'text': 'Hello! How can I help you today?'}], response_metadata={'model': 'llama3', 'created_at': '2024-07-04T03:37:50.182604Z', 'done_reason': 'stop', 'done': True, 'total_duration': 3576619666, 'load_duration': 788524916, 'prompt_eval_count': 32, 'prompt_eval_duration': 128125000, 'eval_count': 71, 'eval_duration': 2656556000}, id='run-ba48f958-6402-41a5-b461-5e250a4ebd36-0')
|
||||
|
||||
Stream:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages.v1 import HumanMessage
|
||||
from langchain_core.v1.messages import HumanMessage
|
||||
from langchain_core.messages.content_blocks import TextContentBlock
|
||||
|
||||
messages = [
|
||||
@ -261,7 +258,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
Tool Calling:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_ollama import ChatOllamaV1
|
||||
from langchain_ollama import ChatOllama
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class Multiply(BaseModel):
|
||||
@ -541,7 +538,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[AIMessageChunkV1]:
|
||||
) -> Iterator[AIMessageChunk]:
|
||||
"""Generate streaming response with native v1 chunks."""
|
||||
chat_params = self._chat_params(messages, stop, **kwargs)
|
||||
|
||||
@ -578,7 +575,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
response = self._client.chat(**chat_params)
|
||||
ai_message = _convert_to_v1_from_ollama_format(response)
|
||||
# Convert to chunk for yielding
|
||||
chunk = AIMessageChunkV1(
|
||||
chunk = AIMessageChunk(
|
||||
content=ai_message.content,
|
||||
response_metadata=ai_message.response_metadata,
|
||||
usage_metadata=ai_message.usage_metadata,
|
||||
@ -591,7 +588,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[AIMessageChunkV1]:
|
||||
) -> AsyncIterator[AIMessageChunk]:
|
||||
"""Generate async streaming response with native v1 chunks."""
|
||||
chat_params = self._chat_params(messages, stop, **kwargs)
|
||||
|
||||
@ -628,7 +625,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
response = await self._async_client.chat(**chat_params)
|
||||
ai_message = _convert_to_v1_from_ollama_format(response)
|
||||
# Convert to chunk for yielding
|
||||
chunk = AIMessageChunkV1(
|
||||
chunk = AIMessageChunk(
|
||||
content=ai_message.content,
|
||||
response_metadata=ai_message.response_metadata,
|
||||
usage_metadata=ai_message.usage_metadata,
|
||||
@ -641,7 +638,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> AIMessageV1:
|
||||
) -> AIMessage:
|
||||
"""Invoke the model with v1 messages and return a complete response.
|
||||
|
||||
Args:
|
||||
@ -664,7 +661,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> AIMessageV1:
|
||||
) -> AIMessage:
|
||||
"""Async invoke the model with v1 messages and return a complete response.
|
||||
|
||||
Args:
|
||||
@ -687,7 +684,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[AIMessageChunkV1]:
|
||||
) -> Iterator[AIMessageChunk]:
|
||||
"""Stream response chunks using the v1 format.
|
||||
|
||||
Args:
|
||||
@ -709,7 +706,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[AIMessageChunkV1]:
|
||||
) -> AsyncIterator[AIMessageChunk]:
|
||||
"""Async stream response chunks using the v1 format.
|
||||
|
||||
Args:
|
||||
@ -732,7 +729,7 @@ class ChatOllamaV1(BaseChatModelV1):
|
||||
*,
|
||||
tool_choice: Optional[Union[dict, str, bool]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Runnable[LanguageModelInput, AIMessageV1]:
|
||||
) -> Runnable[LanguageModelInput, AIMessage]:
|
||||
"""Bind tool-like objects to this chat model.
|
||||
|
||||
Args:
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Unit tests for ChatOllamaV1."""
|
||||
"""Unit tests for ChatOllama."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@ -12,10 +12,7 @@ from langchain_core.messages.content_blocks import (
|
||||
create_image_block,
|
||||
create_text_block,
|
||||
)
|
||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
|
||||
from langchain_core.messages.v1 import MessageV1
|
||||
from langchain_core.messages.v1 import SystemMessage as SystemMessageV1
|
||||
from langchain_core.v1.messages import AIMessage, HumanMessage, MessageV1, SystemMessage
|
||||
from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1UnitTests
|
||||
|
||||
from langchain_ollama._compat import (
|
||||
@ -24,7 +21,7 @@ from langchain_ollama._compat import (
|
||||
_convert_to_v1_from_ollama_format,
|
||||
)
|
||||
from langchain_ollama.chat_models_v1 import (
|
||||
ChatOllamaV1,
|
||||
ChatOllama,
|
||||
_parse_arguments_from_tool_call,
|
||||
_parse_json_string,
|
||||
)
|
||||
@ -36,8 +33,8 @@ class TestMessageConversion:
|
||||
"""Test v1 message conversion utilities."""
|
||||
|
||||
def test_convert_human_message_v1_text_only(self) -> None:
|
||||
"""Test converting HumanMessageV1 with text content."""
|
||||
message = HumanMessageV1("Hello world")
|
||||
"""Test converting HumanMessage with text content."""
|
||||
message = HumanMessage("Hello world")
|
||||
|
||||
result = _convert_from_v1_to_ollama_format(message)
|
||||
|
||||
@ -46,8 +43,8 @@ class TestMessageConversion:
|
||||
assert result["images"] == []
|
||||
|
||||
def test_convert_ai_message_v1(self) -> None:
|
||||
"""Test converting AIMessageV1 with text content."""
|
||||
message = AIMessageV1("Hello! How can I help?")
|
||||
"""Test converting AIMessage with text content."""
|
||||
message = AIMessage("Hello! How can I help?")
|
||||
|
||||
result = _convert_from_v1_to_ollama_format(message)
|
||||
|
||||
@ -55,8 +52,8 @@ class TestMessageConversion:
|
||||
assert result["content"] == "Hello! How can I help?"
|
||||
|
||||
def test_convert_system_message_v1(self) -> None:
|
||||
"""Test converting SystemMessageV1."""
|
||||
message = SystemMessageV1("You are a helpful assistant.")
|
||||
"""Test converting SystemMessage."""
|
||||
message = SystemMessage("You are a helpful assistant.")
|
||||
|
||||
result = _convert_from_v1_to_ollama_format(message)
|
||||
|
||||
@ -64,14 +61,14 @@ class TestMessageConversion:
|
||||
assert result["content"] == "You are a helpful assistant."
|
||||
|
||||
def test_convert_human_message_v1_with_image(self) -> None:
|
||||
"""Test converting HumanMessageV1 with text and image content.
|
||||
"""Test converting HumanMessage with text and image content.
|
||||
|
||||
Each uses `_convert_from_v1_to_ollama_format` to ensure
|
||||
that the conversion handles both text and image blocks correctly. Thus, we don't
|
||||
need additional tests for other message types that also use this function.
|
||||
|
||||
"""
|
||||
message_a = HumanMessageV1(
|
||||
message_a = HumanMessage(
|
||||
content=[
|
||||
create_text_block("Describe this image:"),
|
||||
create_image_block(base64="base64imagedata"),
|
||||
@ -85,7 +82,7 @@ class TestMessageConversion:
|
||||
assert result_a["images"] == ["base64imagedata"]
|
||||
|
||||
# Make sure multiple images are handled correctly
|
||||
message_b = HumanMessageV1(
|
||||
message_b = HumanMessage(
|
||||
content=[
|
||||
create_text_block("Describe this image:"),
|
||||
create_image_block(base64="base64imagedata"),
|
||||
@ -100,7 +97,7 @@ class TestMessageConversion:
|
||||
assert result_b["images"] == ["base64imagedata", "base64dataimage"]
|
||||
|
||||
def test_convert_from_ollama_format(self) -> None:
|
||||
"""Test converting Ollama response to `AIMessageV1`."""
|
||||
"""Test converting Ollama response to `AIMessage`."""
|
||||
ollama_response = {
|
||||
"model": MODEL_NAME,
|
||||
"created_at": "2024-01-01T00:00:00Z",
|
||||
@ -117,7 +114,7 @@ class TestMessageConversion:
|
||||
|
||||
result = _convert_to_v1_from_ollama_format(ollama_response)
|
||||
|
||||
assert isinstance(result, AIMessageV1)
|
||||
assert isinstance(result, AIMessage)
|
||||
assert len(result.content) == 1
|
||||
assert result.content[0].get("type") == "text"
|
||||
assert result.content[0].get("text") == "Hello! How can I help you today?"
|
||||
@ -125,7 +122,7 @@ class TestMessageConversion:
|
||||
assert result.response_metadata.get("done") is True
|
||||
|
||||
def test_convert_from_ollama_format_with_context(self) -> None:
|
||||
"""Test converting Ollama response with context field to `AIMessageV1`."""
|
||||
"""Test converting Ollama response with context field to `AIMessage`."""
|
||||
test_context = [1, 2, 3, 4, 5] # Example tokenized context
|
||||
ollama_response = {
|
||||
"model": MODEL_NAME,
|
||||
@ -144,7 +141,7 @@ class TestMessageConversion:
|
||||
|
||||
result = _convert_to_v1_from_ollama_format(ollama_response)
|
||||
|
||||
assert isinstance(result, AIMessageV1)
|
||||
assert isinstance(result, AIMessage)
|
||||
assert len(result.content) == 1
|
||||
assert result.content[0].get("type") == "text"
|
||||
assert result.content[0].get("text") == "Hello! How can I help you today?"
|
||||
@ -190,7 +187,7 @@ class TestMessageConversion:
|
||||
|
||||
def test_convert_empty_content(self) -> None:
|
||||
"""Test converting empty content blocks."""
|
||||
message = HumanMessageV1(content=[])
|
||||
message = HumanMessage(content=[])
|
||||
|
||||
result = _convert_from_v1_to_ollama_format(message)
|
||||
|
||||
@ -199,12 +196,12 @@ class TestMessageConversion:
|
||||
assert result["images"] == []
|
||||
|
||||
|
||||
class TestChatOllamaV1(ChatModelV1UnitTests):
|
||||
"""Test `ChatOllamaV1`."""
|
||||
class TestChatOllama(ChatModelV1UnitTests):
|
||||
"""Test `ChatOllama`."""
|
||||
|
||||
@property
|
||||
def chat_model_class(self) -> type[ChatOllamaV1]:
|
||||
return ChatOllamaV1
|
||||
def chat_model_class(self) -> type[ChatOllama]:
|
||||
return ChatOllama
|
||||
|
||||
@property
|
||||
def chat_model_params(self) -> dict:
|
||||
@ -212,32 +209,32 @@ class TestChatOllamaV1(ChatModelV1UnitTests):
|
||||
|
||||
@property
|
||||
def has_tool_calling(self) -> bool:
|
||||
"""`ChatOllamaV1` supports tool calling (e.g., `qwen3` models)."""
|
||||
"""`ChatOllama` supports tool calling (e.g., `qwen3` models)."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def has_tool_choice(self) -> bool:
|
||||
"""`ChatOllamaV1` supports tool choice parameter."""
|
||||
"""`ChatOllama` supports tool choice parameter."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def has_structured_output(self) -> bool:
|
||||
"""`ChatOllamaV1` supports structured output via `with_structured_output`."""
|
||||
"""`ChatOllama` supports structured output via `with_structured_output`."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def supports_image_content_blocks(self) -> bool:
|
||||
"""`ChatOllamaV1` supports image content blocks (e.g., `gemma3`)."""
|
||||
"""`ChatOllama` supports image content blocks (e.g., `gemma3`)."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def supports_reasoning_content_blocks(self) -> bool:
|
||||
"""`ChatOllamaV1` supports reasoning/thinking content blocks (e.g., `qwen3`)."""
|
||||
"""`ChatOllama` supports reasoning/thinking content blocks (e.g., `qwen3`)."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def returns_usage_metadata(self) -> bool:
|
||||
"""`ChatOllamaV1` returns usage metadata with token counts."""
|
||||
"""`ChatOllama` returns usage metadata with token counts."""
|
||||
return True
|
||||
|
||||
@property
|
||||
@ -251,8 +248,8 @@ class TestChatOllamaV1(ChatModelV1UnitTests):
|
||||
return False
|
||||
|
||||
@pytest.fixture
|
||||
def model(self) -> Iterator[ChatOllamaV1]:
|
||||
"""Create a ChatOllamaV1 instance for testing."""
|
||||
def model(self) -> Iterator[ChatOllama]:
|
||||
"""Create a ChatOllama instance for testing."""
|
||||
sync_patcher = patch("langchain_ollama.chat_models_v1.Client")
|
||||
async_patcher = patch("langchain_ollama.chat_models_v1.AsyncClient")
|
||||
|
||||
@ -333,12 +330,12 @@ class TestChatOllamaV1(ChatModelV1UnitTests):
|
||||
async_patcher.stop()
|
||||
|
||||
def test_initialization(self) -> None:
|
||||
"""Test `ChatOllamaV1` initialization."""
|
||||
"""Test `ChatOllama` initialization."""
|
||||
with (
|
||||
patch("langchain_ollama.chat_models_v1.Client"),
|
||||
patch("langchain_ollama.chat_models_v1.AsyncClient"),
|
||||
):
|
||||
llm = ChatOllamaV1(model=MODEL_NAME)
|
||||
llm = ChatOllama(model=MODEL_NAME)
|
||||
|
||||
assert llm.model == MODEL_NAME
|
||||
assert llm._llm_type == "chat-ollama-v1"
|
||||
@ -349,9 +346,9 @@ class TestChatOllamaV1(ChatModelV1UnitTests):
|
||||
patch("langchain_ollama.chat_models_v1.Client"),
|
||||
patch("langchain_ollama.chat_models_v1.AsyncClient"),
|
||||
):
|
||||
llm = ChatOllamaV1(model=MODEL_NAME, temperature=0.7)
|
||||
llm = ChatOllama(model=MODEL_NAME, temperature=0.7)
|
||||
|
||||
messages: list[MessageV1] = [HumanMessageV1("Hello")]
|
||||
messages: list[MessageV1] = [HumanMessage("Hello")]
|
||||
|
||||
params = llm._chat_params(messages)
|
||||
|
||||
@ -369,7 +366,7 @@ class TestChatOllamaV1(ChatModelV1UnitTests):
|
||||
patch("langchain_ollama.chat_models_v1.Client"),
|
||||
patch("langchain_ollama.chat_models_v1.AsyncClient"),
|
||||
):
|
||||
llm = ChatOllamaV1(model=MODEL_NAME, temperature=0.5)
|
||||
llm = ChatOllama(model=MODEL_NAME, temperature=0.5)
|
||||
|
||||
ls_params = llm._get_ls_params()
|
||||
|
||||
@ -384,7 +381,7 @@ class TestChatOllamaV1(ChatModelV1UnitTests):
|
||||
patch("langchain_ollama.chat_models_v1.Client"),
|
||||
patch("langchain_ollama.chat_models_v1.AsyncClient"),
|
||||
):
|
||||
llm = ChatOllamaV1(model=MODEL_NAME)
|
||||
llm = ChatOllama(model=MODEL_NAME)
|
||||
|
||||
def test_tool(query: str) -> str:
|
||||
"""A test tool."""
|
||||
@ -411,16 +408,16 @@ def test_validate_model_on_init(
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
# Test that validate_model is called when validate_model_on_init=True
|
||||
ChatOllamaV1(model=MODEL_NAME, validate_model_on_init=True)
|
||||
ChatOllama(model=MODEL_NAME, validate_model_on_init=True)
|
||||
mock_validate_model.assert_called_once()
|
||||
mock_validate_model.reset_mock()
|
||||
|
||||
# Test that validate_model is NOT called when validate_model_on_init=False
|
||||
ChatOllamaV1(model=MODEL_NAME, validate_model_on_init=False)
|
||||
ChatOllama(model=MODEL_NAME, validate_model_on_init=False)
|
||||
mock_validate_model.assert_not_called()
|
||||
|
||||
# Test that validate_model is NOT called by default
|
||||
ChatOllamaV1(model=MODEL_NAME)
|
||||
ChatOllama(model=MODEL_NAME)
|
||||
mock_validate_model.assert_not_called()
|
||||
|
||||
|
||||
@ -513,13 +510,13 @@ def test_load_response_with_empty_content_is_skipped(
|
||||
mock_client_class.return_value = mock_client
|
||||
mock_client.chat.return_value = iter(load_only_response)
|
||||
|
||||
llm = ChatOllamaV1(model="test-model")
|
||||
llm = ChatOllama(model="test-model")
|
||||
|
||||
with (
|
||||
caplog.at_level(logging.WARNING),
|
||||
pytest.raises(ValueError, match="No generations found in stream"),
|
||||
):
|
||||
llm.invoke([HumanMessageV1("Hello")])
|
||||
llm.invoke([HumanMessage("Hello")])
|
||||
|
||||
assert "Ollama returned empty response with done_reason='load'" in caplog.text
|
||||
|
||||
@ -543,13 +540,13 @@ def test_load_response_with_whitespace_content_is_skipped(
|
||||
mock_client_class.return_value = mock_client
|
||||
mock_client.chat.return_value = iter(load_whitespace_response)
|
||||
|
||||
llm = ChatOllamaV1(model="test-model")
|
||||
llm = ChatOllama(model="test-model")
|
||||
|
||||
with (
|
||||
caplog.at_level(logging.WARNING),
|
||||
pytest.raises(ValueError, match="No generations found in stream"),
|
||||
):
|
||||
llm.invoke([HumanMessageV1("Hello")])
|
||||
llm.invoke([HumanMessage("Hello")])
|
||||
assert "Ollama returned empty response with done_reason='load'" in caplog.text
|
||||
|
||||
|
||||
@ -582,10 +579,10 @@ def test_load_followed_by_content_response(
|
||||
mock_client_class.return_value = mock_client
|
||||
mock_client.chat.return_value = iter(load_then_content_response)
|
||||
|
||||
llm = ChatOllamaV1(model="test-model")
|
||||
llm = ChatOllama(model="test-model")
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
result = llm.invoke([HumanMessageV1("Hello")])
|
||||
result = llm.invoke([HumanMessage("Hello")])
|
||||
|
||||
assert "Ollama returned empty response with done_reason='load'" in caplog.text
|
||||
assert len(result.content) == 1
|
||||
@ -612,10 +609,10 @@ def test_load_response_with_actual_content_is_not_skipped(
|
||||
mock_client_class.return_value = mock_client
|
||||
mock_client.chat.return_value = iter(load_with_content_response)
|
||||
|
||||
llm = ChatOllamaV1(model="test-model")
|
||||
llm = ChatOllama(model="test-model")
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
result = llm.invoke([HumanMessageV1("Hello")])
|
||||
result = llm.invoke([HumanMessage("Hello")])
|
||||
|
||||
assert len(result.content) == 1
|
||||
assert result.text == "This is actual content"
|
||||
|
@ -9,7 +9,6 @@ from typing import Any, Union, cast
|
||||
import langchain_core.messages.content_blocks as types
|
||||
import pytest
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_core.language_models.v1.chat_models import BaseChatModelV1
|
||||
from langchain_core.messages.base import BaseMessage
|
||||
from langchain_core.messages.content_blocks import (
|
||||
AudioContentBlock,
|
||||
@ -38,8 +37,9 @@ from langchain_core.messages.content_blocks import (
|
||||
is_text_block,
|
||||
is_tool_call_block,
|
||||
)
|
||||
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, HumanMessage
|
||||
from langchain_core.tools import tool
|
||||
from langchain_core.v1.chat_models import BaseChatModel
|
||||
from langchain_core.v1.messages import AIMessage, AIMessageChunk, HumanMessage
|
||||
|
||||
from langchain_tests.unit_tests.chat_models_v1 import ChatModelV1Tests
|
||||
|
||||
@ -156,7 +156,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
return True
|
||||
|
||||
# Multimodal testing
|
||||
def test_image_content_blocks_with_analysis(self, model: BaseChatModelV1) -> None:
|
||||
def test_image_content_blocks_with_analysis(self, model: BaseChatModel) -> None:
|
||||
"""Test image analysis using ``ImageContentBlock``s."""
|
||||
if not self.supports_image_content_blocks:
|
||||
pytest.skip("Model does not support image inputs.")
|
||||
@ -179,7 +179,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
if result.text:
|
||||
assert len(result.text) > 10 # Substantial response
|
||||
|
||||
def test_video_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_video_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test video content block processing."""
|
||||
if not self.supports_video_content_blocks:
|
||||
pytest.skip("Model does not support video inputs.")
|
||||
@ -196,7 +196,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
if result.text:
|
||||
assert len(result.text) > 10 # Substantial response
|
||||
|
||||
def test_audio_content_blocks_processing(self, model: BaseChatModelV1) -> None:
|
||||
def test_audio_content_blocks_processing(self, model: BaseChatModel) -> None:
|
||||
"""Test audio content block processing with transcription."""
|
||||
if not self.supports_audio_content_blocks:
|
||||
pytest.skip("Model does not support audio inputs.")
|
||||
@ -213,7 +213,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
if result.text:
|
||||
assert len(result.text) > 10 # Substantial response
|
||||
|
||||
def test_complex_multimodal_reasoning(self, model: BaseChatModelV1) -> None:
|
||||
def test_complex_multimodal_reasoning(self, model: BaseChatModel) -> None:
|
||||
"""Test complex reasoning with multiple content types."""
|
||||
# TODO: come back to this, seems like a unique scenario
|
||||
if not self.supports_multimodal_reasoning:
|
||||
@ -251,7 +251,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
]
|
||||
assert len(reasoning_blocks) > 0
|
||||
|
||||
def test_citation_generation_with_sources(self, model: BaseChatModelV1) -> None:
|
||||
def test_citation_generation_with_sources(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can generate ``Citations`` with source links."""
|
||||
if not self.supports_structured_citations:
|
||||
pytest.skip("Model does not support structured citations.")
|
||||
@ -293,7 +293,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
assert "start_index" in annotation
|
||||
assert "end_index" in annotation
|
||||
|
||||
def test_web_search_integration(self, model: BaseChatModelV1) -> None:
|
||||
def test_web_search_integration(self, model: BaseChatModel) -> None:
|
||||
"""Test web search content blocks integration."""
|
||||
if not self.supports_web_search_blocks:
|
||||
pytest.skip("Model does not support web search blocks.")
|
||||
@ -319,7 +319,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
# TODO: should this be one or the other or both?
|
||||
assert len(search_call_blocks) > 0 or len(search_result_blocks) > 0
|
||||
|
||||
def test_code_interpreter_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_code_interpreter_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test code interpreter content blocks."""
|
||||
if not self.supports_code_interpreter:
|
||||
pytest.skip("Model does not support code interpreter blocks.")
|
||||
@ -344,7 +344,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
# TODO: should we require all three types or just an output/result?
|
||||
assert len(code_blocks) > 0
|
||||
|
||||
def test_tool_calling_with_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_tool_calling_with_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test tool calling with content blocks."""
|
||||
if not self.has_tool_calling:
|
||||
pytest.skip("Model does not support tool calls.")
|
||||
@ -364,7 +364,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
_validate_tool_call_message(result)
|
||||
|
||||
def test_plaintext_content_blocks_from_documents(
|
||||
self, model: BaseChatModelV1
|
||||
self, model: BaseChatModel
|
||||
) -> None:
|
||||
"""Test PlainTextContentBlock for document plaintext content."""
|
||||
if not self.supports_plaintext_content_blocks:
|
||||
@ -384,7 +384,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
assert isinstance(result, AIMessage)
|
||||
# TODO expand
|
||||
|
||||
def test_content_block_streaming_integration(self, model: BaseChatModelV1) -> None:
|
||||
def test_content_block_streaming_integration(self, model: BaseChatModel) -> None:
|
||||
"""Test streaming with content blocks."""
|
||||
if not self.supports_content_blocks_v1:
|
||||
pytest.skip("Model does not support content blocks v1.")
|
||||
@ -413,7 +413,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
assert isinstance(final_message.content, list)
|
||||
|
||||
def test_error_handling_with_invalid_content_blocks(
|
||||
self, model: BaseChatModelV1
|
||||
self, model: BaseChatModel
|
||||
) -> None:
|
||||
"""Test error handling with various invalid content block configurations."""
|
||||
if not self.supports_content_blocks_v1:
|
||||
@ -436,9 +436,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
# Acceptable to raise validation errors
|
||||
assert len(str(e)) > 0
|
||||
|
||||
async def test_async_content_blocks_processing(
|
||||
self, model: BaseChatModelV1
|
||||
) -> None:
|
||||
async def test_async_content_blocks_processing(self, model: BaseChatModel) -> None:
|
||||
"""Test asynchronous processing of content blocks."""
|
||||
if not self.supports_content_blocks_v1:
|
||||
pytest.skip("Model does not support content blocks v1.")
|
||||
@ -448,7 +446,7 @@ class ChatModelV1IntegrationTests(ChatModelV1Tests):
|
||||
result = await model.ainvoke([message])
|
||||
assert isinstance(result, AIMessage)
|
||||
|
||||
def test_content_blocks_with_callbacks(self, model: BaseChatModelV1) -> None:
|
||||
def test_content_blocks_with_callbacks(self, model: BaseChatModel) -> None:
|
||||
"""Test that content blocks work correctly with callback handlers."""
|
||||
if not self.supports_content_blocks_v1:
|
||||
pytest.skip("Model does not support content blocks v1.")
|
||||
|
@ -10,7 +10,6 @@ content blocks system.
|
||||
from typing import Literal, cast
|
||||
|
||||
import pytest
|
||||
from langchain_core.language_models.v1.chat_models import BaseChatModelV1
|
||||
from langchain_core.load import dumpd, load
|
||||
from langchain_core.messages.content_blocks import (
|
||||
ContentBlock,
|
||||
@ -24,8 +23,9 @@ from langchain_core.messages.content_blocks import (
|
||||
is_text_block,
|
||||
is_tool_call_block,
|
||||
)
|
||||
from langchain_core.messages.v1 import AIMessage, HumanMessage
|
||||
from langchain_core.tools import tool
|
||||
from langchain_core.v1.chat_models import BaseChatModel
|
||||
from langchain_core.v1.messages import AIMessage, HumanMessage
|
||||
|
||||
from langchain_tests.base import BaseStandardTests
|
||||
|
||||
@ -205,26 +205,26 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
"""
|
||||
|
||||
# Core Method Tests
|
||||
def test_invoke_basic(self, model: BaseChatModelV1) -> None:
|
||||
def test_invoke_basic(self, model: BaseChatModel) -> None:
|
||||
"""Test basic invoke functionality with simple string input."""
|
||||
result = model.invoke("Hello, world!")
|
||||
assert isinstance(result, AIMessage)
|
||||
assert result.content is not None
|
||||
|
||||
def test_invoke_with_message_list(self, model: BaseChatModelV1) -> None:
|
||||
def test_invoke_with_message_list(self, model: BaseChatModel) -> None:
|
||||
"""Test invoke with list of messages."""
|
||||
messages = [HumanMessage("Hello, world!")]
|
||||
result = model.invoke(messages)
|
||||
assert isinstance(result, AIMessage)
|
||||
assert result.content is not None
|
||||
|
||||
async def test_ainvoke_basic(self, model: BaseChatModelV1) -> None:
|
||||
async def test_ainvoke_basic(self, model: BaseChatModel) -> None:
|
||||
"""Test basic async invoke functionality."""
|
||||
result = await model.ainvoke("Hello, world!")
|
||||
assert isinstance(result, AIMessage)
|
||||
assert result.content is not None
|
||||
|
||||
def test_stream_basic(self, model: BaseChatModelV1) -> None:
|
||||
def test_stream_basic(self, model: BaseChatModel) -> None:
|
||||
"""Test basic streaming functionality."""
|
||||
chunks = []
|
||||
for chunk in model.stream("Hello, world!"):
|
||||
@ -239,7 +239,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
final_message = final_message + chunk
|
||||
assert isinstance(final_message.content, (str, list))
|
||||
|
||||
async def test_astream_basic(self, model: BaseChatModelV1) -> None:
|
||||
async def test_astream_basic(self, model: BaseChatModel) -> None:
|
||||
"""Test basic async streaming functionality."""
|
||||
chunks = []
|
||||
async for chunk in model.astream("Hello, world!"):
|
||||
@ -255,19 +255,19 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert isinstance(final_message.content, (str, list))
|
||||
|
||||
# Property Tests
|
||||
def test_llm_type_property(self, model: BaseChatModelV1) -> None:
|
||||
def test_llm_type_property(self, model: BaseChatModel) -> None:
|
||||
"""Test that ``_llm_type`` property is implemented and returns a string."""
|
||||
llm_type = model._llm_type
|
||||
assert isinstance(llm_type, str)
|
||||
assert len(llm_type) > 0
|
||||
|
||||
def test_identifying_params_property(self, model: BaseChatModelV1) -> None:
|
||||
def test_identifying_params_property(self, model: BaseChatModel) -> None:
|
||||
"""Test that ``_identifying_params`` property returns a mapping."""
|
||||
params = model._identifying_params
|
||||
assert isinstance(params, dict) # Should be dict-like mapping
|
||||
|
||||
# Serialization Tests
|
||||
def test_dump_serialization(self, model: BaseChatModelV1) -> None:
|
||||
def test_dump_serialization(self, model: BaseChatModel) -> None:
|
||||
"""Test that ``dump()`` returns proper serialization."""
|
||||
dumped = model.dump()
|
||||
assert isinstance(dumped, dict)
|
||||
@ -280,20 +280,20 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert dumped[key] == value
|
||||
|
||||
# Input Conversion Tests
|
||||
def test_input_conversion_string(self, model: BaseChatModelV1) -> None:
|
||||
def test_input_conversion_string(self, model: BaseChatModel) -> None:
|
||||
"""Test that string input is properly converted to messages."""
|
||||
# This test verifies the _convert_input method works correctly
|
||||
result = model.invoke("Test string input")
|
||||
assert isinstance(result, AIMessage)
|
||||
assert result.content is not None
|
||||
|
||||
def test_input_conversion_empty_string(self, model: BaseChatModelV1) -> None:
|
||||
def test_input_conversion_empty_string(self, model: BaseChatModel) -> None:
|
||||
"""Test that empty string input is handled gracefully."""
|
||||
result = model.invoke("")
|
||||
assert isinstance(result, AIMessage)
|
||||
# Content might be empty or some default response
|
||||
|
||||
def test_input_conversion_message_v1_list(self, model: BaseChatModelV1) -> None:
|
||||
def test_input_conversion_message_v1_list(self, model: BaseChatModel) -> None:
|
||||
"""Test that v1 message list input is handled correctly."""
|
||||
messages = [HumanMessage("Test message")]
|
||||
result = model.invoke(messages)
|
||||
@ -301,7 +301,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert result.content is not None
|
||||
|
||||
# Batch Processing Tests
|
||||
def test_batch_basic(self, model: BaseChatModelV1) -> None:
|
||||
def test_batch_basic(self, model: BaseChatModel) -> None:
|
||||
"""Test basic batch processing functionality."""
|
||||
inputs = ["Hello", "How are you?", "Goodbye"]
|
||||
results = model.batch(inputs) # type: ignore[arg-type]
|
||||
@ -312,7 +312,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert isinstance(result, AIMessage)
|
||||
assert result.content is not None
|
||||
|
||||
async def test_abatch_basic(self, model: BaseChatModelV1) -> None:
|
||||
async def test_abatch_basic(self, model: BaseChatModel) -> None:
|
||||
"""Test basic async batch processing functionality."""
|
||||
inputs = ["Hello", "How are you?", "Goodbye"]
|
||||
results = await model.abatch(inputs) # type: ignore[arg-type]
|
||||
@ -324,7 +324,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert result.content is not None
|
||||
|
||||
# Content Block Tests
|
||||
def test_text_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_text_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can handle the ``TextContentBlock`` format.
|
||||
|
||||
This test verifies that the model correctly processes messages containing
|
||||
@ -340,7 +340,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert isinstance(result, AIMessage)
|
||||
assert result.content is not None
|
||||
|
||||
def test_mixed_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_mixed_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can handle messages with mixed content blocks."""
|
||||
if not (
|
||||
self.supports_text_content_blocks and self.supports_image_content_blocks
|
||||
@ -363,7 +363,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert isinstance(result, AIMessage)
|
||||
assert result.content is not None
|
||||
|
||||
def test_reasoning_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_reasoning_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can generate ``ReasoningContentBlock``."""
|
||||
if not self.supports_reasoning_content_blocks:
|
||||
pytest.skip("Model does not support ReasoningContentBlock.")
|
||||
@ -380,7 +380,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
]
|
||||
assert len(reasoning_blocks) > 0
|
||||
|
||||
def test_citations_in_response(self, model: BaseChatModelV1) -> None:
|
||||
def test_citations_in_response(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can generate ``Citations`` in text blocks."""
|
||||
if not self.supports_citations:
|
||||
pytest.skip("Model does not support citations.")
|
||||
@ -416,7 +416,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
)
|
||||
assert has_citation, "No citations found in text blocks."
|
||||
|
||||
def test_non_standard_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_non_standard_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can handle ``NonStandardContentBlock``."""
|
||||
if not self.supports_non_standard_blocks:
|
||||
pytest.skip("Model does not support NonStandardContentBlock.")
|
||||
@ -435,7 +435,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert isinstance(result, AIMessage)
|
||||
|
||||
def test_enhanced_tool_calls_with_content_blocks(
|
||||
self, model: BaseChatModelV1
|
||||
self, model: BaseChatModel
|
||||
) -> None:
|
||||
"""Test enhanced tool calling with content blocks format."""
|
||||
if not self.has_tool_calling:
|
||||
@ -465,7 +465,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
# # Fallback to legacy tool_calls attribute
|
||||
# assert hasattr(result, "tool_calls") and result.tool_calls
|
||||
|
||||
def test_invalid_tool_call_handling(self, model: BaseChatModelV1) -> None:
|
||||
def test_invalid_tool_call_handling(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can handle ``InvalidToolCall`` blocks gracefully."""
|
||||
if not self.supports_invalid_tool_calls:
|
||||
pytest.skip("Model does not support InvalidToolCall handling.")
|
||||
@ -487,7 +487,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert result.content is not None
|
||||
# TODO: enhance/double check this
|
||||
|
||||
def test_web_search_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_web_search_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test generating ``WebSearchCall``/``WebSearchResult`` blocks."""
|
||||
if not self.supports_web_search_blocks:
|
||||
pytest.skip("Model does not support web search blocks.")
|
||||
@ -505,7 +505,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
]
|
||||
assert len(search_blocks) > 0
|
||||
|
||||
def test_file_content_blocks(self, model: BaseChatModelV1) -> None:
|
||||
def test_file_content_blocks(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model can handle ``FileContentBlock``."""
|
||||
if not self.supports_file_content_blocks:
|
||||
pytest.skip("Model does not support FileContentBlock.")
|
||||
@ -522,7 +522,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert result.content is not None
|
||||
# TODO: make more robust?
|
||||
|
||||
def test_content_block_streaming(self, model: BaseChatModelV1) -> None:
|
||||
def test_content_block_streaming(self, model: BaseChatModel) -> None:
|
||||
"""Test that content blocks work correctly with streaming."""
|
||||
if not self.supports_content_blocks_v1:
|
||||
pytest.skip("Model does not support content blocks v1.")
|
||||
@ -544,7 +544,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
|
||||
assert isinstance(final_message.content, (str, list))
|
||||
|
||||
def test_content_block_serialization(self, model: BaseChatModelV1) -> None:
|
||||
def test_content_block_serialization(self, model: BaseChatModel) -> None:
|
||||
"""Test that messages with content blocks can be serialized/deserialized."""
|
||||
if not self.supports_content_blocks_v1:
|
||||
pytest.skip("Model does not support content blocks v1.")
|
||||
@ -562,7 +562,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert deserialized.content == message.content
|
||||
# TODO: make more robust
|
||||
|
||||
def test_backwards_compatibility(self, model: BaseChatModelV1) -> None:
|
||||
def test_backwards_compatibility(self, model: BaseChatModel) -> None:
|
||||
"""Test that models still work with legacy string content."""
|
||||
# This should work regardless of content blocks support
|
||||
legacy_message = HumanMessage("Hello, world!")
|
||||
@ -577,7 +577,7 @@ class ChatModelV1UnitTests(ChatModelV1Tests):
|
||||
assert isinstance(result_named_param, AIMessage)
|
||||
assert result_named_param.content is not None
|
||||
|
||||
def test_content_block_validation(self, model: BaseChatModelV1) -> None:
|
||||
def test_content_block_validation(self, model: BaseChatModel) -> None:
|
||||
"""Test that invalid content blocks are handled gracefully."""
|
||||
if not self.supports_content_blocks_v1:
|
||||
pytest.skip("Model does not support content blocks v1.")
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""``ChatParrotLinkV1`` implementation for standard-tests with v1 messages.
|
||||
|
||||
This module provides a test implementation of ``BaseChatModelV1`` that supports the new
|
||||
This module provides a test implementation of ``BaseChatModel`` that supports the new
|
||||
v1 message format with content blocks.
|
||||
"""
|
||||
|
||||
@ -9,13 +9,13 @@ from typing import Any, Optional, cast
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun
|
||||
from langchain_core.language_models.v1.chat_models import BaseChatModelV1
|
||||
from langchain_core.messages.ai import UsageMetadata
|
||||
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, MessageV1
|
||||
from langchain_core.v1.chat_models import BaseChatModel
|
||||
from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1
|
||||
from pydantic import Field
|
||||
|
||||
|
||||
class ChatParrotLinkV1(BaseChatModelV1):
|
||||
class ChatParrotLinkV1(BaseChatModel):
|
||||
"""A custom v1 chat model that echoes input with content blocks support.
|
||||
|
||||
This model is designed for testing the v1 message format and content blocks. Echoes
|
||||
|
Loading…
Reference in New Issue
Block a user