mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-14 15:16:21 +00:00
Merge branch 'wip-v0.4' into mdrxy/ollama_v1
This commit is contained in:
commit
733da01bd4
@ -108,7 +108,6 @@ from uuid import uuid4
|
||||
|
||||
from typing_extensions import NotRequired, TypedDict, TypeGuard, get_args, get_origin
|
||||
|
||||
|
||||
def _ensure_id(id_val: Optional[str]) -> str:
|
||||
"""Ensure the ID is a valid string, generating a new UUID if not provided.
|
||||
|
||||
@ -314,7 +313,6 @@ class ToolCallChunk(TypedDict):
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
|
||||
"""
|
||||
|
||||
# TODO: Consider making fields NotRequired[str] in the future.
|
||||
|
||||
type: NotRequired[Literal["tool_call_chunk"]]
|
||||
@ -586,6 +584,9 @@ class ImageContentBlock(TypedDict):
|
||||
"""Provider-specific metadata."""
|
||||
|
||||
|
||||
class VideoContentBlock(TypedDict):
|
||||
"""Video data.
|
||||
|
||||
class VideoContentBlock(TypedDict):
|
||||
"""Video data.
|
||||
|
||||
@ -636,7 +637,6 @@ class AudioContentBlock(TypedDict):
|
||||
.. note::
|
||||
``create_audio_block`` may also be used as a factory to create an
|
||||
``AudioContentBlock``. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
|
||||
@ -659,6 +659,7 @@ class AudioContentBlock(TypedDict):
|
||||
"""MIME type of the audio. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#audio>`__
|
||||
|
||||
"""
|
||||
|
||||
index: NotRequired[int]
|
||||
@ -762,8 +763,9 @@ class FileContentBlock(TypedDict):
|
||||
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the file. Required for base64.
|
||||
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml>`__
|
||||
|
||||
"""
|
||||
|
||||
index: NotRequired[int]
|
||||
@ -822,6 +824,48 @@ class NonStandardContentBlock(TypedDict):
|
||||
|
||||
# --- Aliases ---
|
||||
|
||||
# Future modalities to consider:
|
||||
# - 3D models
|
||||
# - Tabular data
|
||||
|
||||
|
||||
class NonStandardContentBlock(TypedDict):
|
||||
"""Provider-specific data.
|
||||
|
||||
This block contains data for which there is not yet a standard type.
|
||||
|
||||
The purpose of this block should be to simply hold a provider-specific payload.
|
||||
If a provider's non-standard output includes reasoning and tool calls, it should be
|
||||
the adapter's job to parse that payload and emit the corresponding standard
|
||||
ReasoningContentBlock and ToolCallContentBlocks.
|
||||
|
||||
.. note::
|
||||
``create_non_standard_block`` may also be used as a factory to create a
|
||||
``NonStandardContentBlock``. Benefits include:
|
||||
|
||||
* Automatic ID generation (when not provided)
|
||||
* Required arguments strictly validated at creation time
|
||||
|
||||
"""
|
||||
|
||||
type: Literal["non_standard"]
|
||||
"""Type of the content block. Used for discrimination."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``))
|
||||
"""
|
||||
|
||||
value: dict[str, Any]
|
||||
"""Provider-specific data."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
# --- Aliases ---
|
||||
DataContentBlock = Union[
|
||||
ImageContentBlock,
|
||||
VideoContentBlock,
|
||||
@ -955,7 +999,6 @@ def is_invalid_tool_call_block(
|
||||
"""Type guard to check if a content block is an invalid tool call."""
|
||||
return block.get("type") == "invalid_tool_call"
|
||||
|
||||
|
||||
def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
|
||||
"""Convert image content block to format expected by OpenAI Chat Completions API."""
|
||||
if "url" in block:
|
||||
|
@ -505,6 +505,7 @@ class ChildTool(BaseTool):
|
||||
|
||||
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
||||
If ``"v1"``, output will be a v1 :class:`~langchain_core.v1.messages.ToolMessage`.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
|
@ -368,7 +368,7 @@ class AIMessageChunk(AIMessage):
|
||||
raise NotImplementedError(error_msg)
|
||||
|
||||
def to_message(self) -> "AIMessage":
|
||||
"""Convert this ``AIMessageChunk`` to an AIMessage."""
|
||||
"""Convert this ``AIMessageChunk`` to an ``AIMessage``."""
|
||||
return AIMessage(
|
||||
content=_init_tool_calls(self.content),
|
||||
id=self.id,
|
||||
|
@ -1,3 +1,3 @@
|
||||
"""langchain-core version information and utilities."""
|
||||
|
||||
VERSION = "0.3.72"
|
||||
VERSION = "0.4.0.dev0"
|
||||
|
@ -16,7 +16,7 @@ dependencies = [
|
||||
"pydantic>=2.7.4",
|
||||
]
|
||||
name = "langchain-core"
|
||||
version = "0.3.72"
|
||||
version = "0.4.0.dev0"
|
||||
description = "Building applications with LLMs through composability"
|
||||
readme = "README.md"
|
||||
|
||||
|
@ -0,0 +1,361 @@
|
||||
"""Unit tests for ResponseMetadata TypedDict."""
|
||||
|
||||
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, ResponseMetadata
|
||||
|
||||
|
||||
class TestResponseMetadata:
|
||||
"""Test the ResponseMetadata TypedDict functionality."""
|
||||
|
||||
def test_response_metadata_basic_fields(self) -> None:
|
||||
"""Test ResponseMetadata with basic required fields."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
}
|
||||
|
||||
assert metadata.get("model_provider") == "openai"
|
||||
assert metadata.get("model_name") == "gpt-4"
|
||||
|
||||
def test_response_metadata_is_optional(self) -> None:
|
||||
"""Test that ResponseMetadata fields are optional due to total=False."""
|
||||
# Should be able to create empty ResponseMetadata
|
||||
metadata: ResponseMetadata = {}
|
||||
assert metadata == {}
|
||||
|
||||
# Should be able to create with just one field
|
||||
metadata_partial: ResponseMetadata = {"model_provider": "anthropic"}
|
||||
assert metadata_partial.get("model_provider") == "anthropic"
|
||||
assert "model_name" not in metadata_partial
|
||||
|
||||
def test_response_metadata_supports_extra_fields(self) -> None:
|
||||
"""Test that ResponseMetadata supports provider-specific extra fields."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4-turbo",
|
||||
# Extra fields should be allowed
|
||||
"usage": {"input_tokens": 100, "output_tokens": 50},
|
||||
"system_fingerprint": "fp_12345",
|
||||
"logprobs": None,
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
|
||||
assert metadata.get("model_provider") == "openai"
|
||||
assert metadata.get("model_name") == "gpt-4-turbo"
|
||||
assert metadata.get("usage") == {"input_tokens": 100, "output_tokens": 50}
|
||||
assert metadata.get("system_fingerprint") == "fp_12345"
|
||||
assert metadata.get("logprobs") is None
|
||||
assert metadata.get("finish_reason") == "stop"
|
||||
|
||||
def test_response_metadata_various_data_types(self) -> None:
|
||||
"""Test that ResponseMetadata can store various data types in extra fields."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "anthropic",
|
||||
"model_name": "claude-3-sonnet",
|
||||
"string_field": "test_value", # type: ignore[typeddict-unknown-key]
|
||||
"int_field": 42, # type: ignore[typeddict-unknown-key]
|
||||
"float_field": 3.14, # type: ignore[typeddict-unknown-key]
|
||||
"bool_field": True, # type: ignore[typeddict-unknown-key]
|
||||
"none_field": None, # type: ignore[typeddict-unknown-key]
|
||||
"list_field": [1, 2, 3, "test"], # type: ignore[typeddict-unknown-key]
|
||||
"dict_field": { # type: ignore[typeddict-unknown-key]
|
||||
"nested": {"deeply": "nested_value"}
|
||||
},
|
||||
}
|
||||
|
||||
assert metadata.get("string_field") == "test_value" # type: ignore[typeddict-item]
|
||||
assert metadata.get("int_field") == 42 # type: ignore[typeddict-item]
|
||||
assert metadata.get("float_field") == 3.14 # type: ignore[typeddict-item]
|
||||
assert metadata.get("bool_field") is True # type: ignore[typeddict-item]
|
||||
assert metadata.get("none_field") is None # type: ignore[typeddict-item]
|
||||
|
||||
list_field = metadata.get("list_field") # type: ignore[typeddict-item]
|
||||
assert isinstance(list_field, list)
|
||||
assert list_field == [1, 2, 3, "test"]
|
||||
|
||||
dict_field = metadata.get("dict_field") # type: ignore[typeddict-item]
|
||||
assert isinstance(dict_field, dict)
|
||||
nested = dict_field.get("nested") # type: ignore[union-attr]
|
||||
assert isinstance(nested, dict)
|
||||
assert nested.get("deeply") == "nested_value" # type: ignore[union-attr]
|
||||
|
||||
def test_response_metadata_can_be_modified(self) -> None:
|
||||
"""Test that ResponseMetadata can be modified after creation."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
}
|
||||
|
||||
# Modify existing fields
|
||||
metadata["model_name"] = "gpt-4"
|
||||
assert metadata.get("model_name") == "gpt-4"
|
||||
|
||||
# Add new fields
|
||||
metadata["request_id"] = "req_12345" # type: ignore[typeddict-unknown-key]
|
||||
assert metadata.get("request_id") == "req_12345" # type: ignore[typeddict-item]
|
||||
|
||||
# Modify nested structures
|
||||
metadata["usage"] = {"input_tokens": 10} # type: ignore[typeddict-unknown-key]
|
||||
metadata["usage"]["output_tokens"] = 20 # type: ignore[typeddict-item]
|
||||
|
||||
usage = metadata.get("usage") # type: ignore[typeddict-item]
|
||||
assert isinstance(usage, dict)
|
||||
assert usage.get("input_tokens") == 10 # type: ignore[union-attr]
|
||||
assert usage.get("output_tokens") == 20 # type: ignore[union-attr]
|
||||
|
||||
def test_response_metadata_provider_specific_examples(self) -> None:
|
||||
"""Test ResponseMetadata with realistic provider-specific examples."""
|
||||
# OpenAI-style metadata
|
||||
openai_metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4-turbo-2024-04-09",
|
||||
"usage": { # type: ignore[typeddict-unknown-key]
|
||||
"prompt_tokens": 50,
|
||||
"completion_tokens": 25,
|
||||
"total_tokens": 75,
|
||||
},
|
||||
"system_fingerprint": "fp_abc123", # type: ignore[typeddict-unknown-key]
|
||||
"created": 1234567890, # type: ignore[typeddict-unknown-key]
|
||||
"logprobs": None, # type: ignore[typeddict-unknown-key]
|
||||
"finish_reason": "stop", # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
assert openai_metadata.get("model_provider") == "openai"
|
||||
assert openai_metadata.get("system_fingerprint") == "fp_abc123" # type: ignore[typeddict-item]
|
||||
|
||||
# Anthropic-style metadata
|
||||
anthropic_metadata: ResponseMetadata = {
|
||||
"model_provider": "anthropic",
|
||||
"model_name": "claude-3-sonnet-20240229",
|
||||
"usage": { # type: ignore[typeddict-unknown-key]
|
||||
"input_tokens": 75,
|
||||
"output_tokens": 30,
|
||||
},
|
||||
"stop_reason": "end_turn", # type: ignore[typeddict-unknown-key]
|
||||
"stop_sequence": None, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
assert anthropic_metadata.get("model_provider") == "anthropic"
|
||||
assert anthropic_metadata.get("stop_reason") == "end_turn" # type: ignore[typeddict-item]
|
||||
|
||||
# Custom provider metadata
|
||||
custom_metadata: ResponseMetadata = {
|
||||
"model_provider": "custom_llm_service",
|
||||
"model_name": "custom-model-v1",
|
||||
"service_tier": "premium", # type: ignore[typeddict-unknown-key]
|
||||
"rate_limit_info": { # type: ignore[typeddict-unknown-key]
|
||||
"requests_remaining": 100,
|
||||
"reset_time": "2024-01-01T00:00:00Z",
|
||||
},
|
||||
"response_time_ms": 1250, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
assert custom_metadata.get("service_tier") == "premium" # type: ignore[typeddict-item]
|
||||
rate_limit = custom_metadata.get("rate_limit_info") # type: ignore[typeddict-item]
|
||||
assert isinstance(rate_limit, dict)
|
||||
assert rate_limit.get("requests_remaining") == 100 # type: ignore[union-attr]
|
||||
|
||||
|
||||
class TestResponseMetadataWithAIMessages:
|
||||
"""Test ResponseMetadata integration with AI message classes."""
|
||||
|
||||
def test_ai_message_with_response_metadata(self) -> None:
|
||||
"""Test AIMessage with ResponseMetadata."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"usage": {"input_tokens": 10, "output_tokens": 5}, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
message = AIMessage(content="Hello, world!", response_metadata=metadata)
|
||||
|
||||
assert message.response_metadata == metadata
|
||||
assert message.response_metadata.get("model_provider") == "openai"
|
||||
assert message.response_metadata.get("model_name") == "gpt-4"
|
||||
|
||||
usage = message.response_metadata.get("usage") # type: ignore[typeddict-item]
|
||||
assert isinstance(usage, dict)
|
||||
assert usage.get("input_tokens") == 10 # type: ignore[union-attr]
|
||||
|
||||
def test_ai_message_chunk_with_response_metadata(self) -> None:
|
||||
"""Test AIMessageChunk with ResponseMetadata."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "anthropic",
|
||||
"model_name": "claude-3-sonnet",
|
||||
"stream_id": "stream_12345", # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
chunk = AIMessageChunk(content="Hello", response_metadata=metadata)
|
||||
|
||||
assert chunk.response_metadata == metadata
|
||||
assert chunk.response_metadata.get("stream_id") == "stream_12345" # type: ignore[typeddict-item]
|
||||
|
||||
def test_ai_message_default_empty_response_metadata(self) -> None:
|
||||
"""Test that AIMessage creates empty ResponseMetadata by default."""
|
||||
message = AIMessage(content="Test message")
|
||||
|
||||
# Should have empty dict as default
|
||||
assert message.response_metadata == {}
|
||||
assert isinstance(message.response_metadata, dict)
|
||||
|
||||
def test_ai_message_chunk_default_empty_response_metadata(self) -> None:
|
||||
"""Test that AIMessageChunk creates empty ResponseMetadata by default."""
|
||||
chunk = AIMessageChunk(content="Test chunk")
|
||||
|
||||
# Should have empty dict as default
|
||||
assert chunk.response_metadata == {}
|
||||
assert isinstance(chunk.response_metadata, dict)
|
||||
|
||||
def test_response_metadata_merging_in_chunks(self) -> None:
|
||||
"""Test that ResponseMetadata is properly merged when adding AIMessageChunks."""
|
||||
metadata1: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"request_id": "req_123", # type: ignore[typeddict-unknown-key]
|
||||
"usage": {"input_tokens": 10}, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
metadata2: ResponseMetadata = {
|
||||
"stream_chunk": 1, # type: ignore[typeddict-unknown-key]
|
||||
"usage": {"output_tokens": 5}, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
chunk1 = AIMessageChunk(content="Hello ", response_metadata=metadata1)
|
||||
chunk2 = AIMessageChunk(content="world!", response_metadata=metadata2)
|
||||
|
||||
merged = chunk1 + chunk2
|
||||
|
||||
# Should have merged response_metadata
|
||||
assert merged.response_metadata.get("model_provider") == "openai"
|
||||
assert merged.response_metadata.get("model_name") == "gpt-4"
|
||||
assert merged.response_metadata.get("request_id") == "req_123" # type: ignore[typeddict-item]
|
||||
assert merged.response_metadata.get("stream_chunk") == 1 # type: ignore[typeddict-item]
|
||||
|
||||
# Usage should be merged (from merge_dicts behavior)
|
||||
merged_usage = merged.response_metadata.get("usage") # type: ignore[typeddict-item]
|
||||
assert isinstance(merged_usage, dict)
|
||||
assert merged_usage.get("input_tokens") == 10 # type: ignore[union-attr]
|
||||
assert merged_usage.get("output_tokens") == 5 # type: ignore[union-attr]
|
||||
|
||||
def test_response_metadata_modification_after_message_creation(self) -> None:
|
||||
"""Test that ResponseMetadata can be modified after message creation."""
|
||||
message = AIMessage(
|
||||
content="Initial message",
|
||||
response_metadata={"model_provider": "openai", "model_name": "gpt-3.5"},
|
||||
)
|
||||
|
||||
# Modify existing field
|
||||
message.response_metadata["model_name"] = "gpt-4"
|
||||
assert message.response_metadata.get("model_name") == "gpt-4"
|
||||
|
||||
# Add new field
|
||||
message.response_metadata["finish_reason"] = "stop" # type: ignore[typeddict-unknown-key]
|
||||
assert message.response_metadata.get("finish_reason") == "stop" # type: ignore[typeddict-item]
|
||||
|
||||
def test_response_metadata_with_none_values(self) -> None:
|
||||
"""Test ResponseMetadata handling of None values."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"system_fingerprint": None, # type: ignore[typeddict-unknown-key]
|
||||
"logprobs": None, # type: ignore[typeddict-unknown-key]
|
||||
}
|
||||
|
||||
message = AIMessage(content="Test", response_metadata=metadata)
|
||||
|
||||
assert message.response_metadata.get("system_fingerprint") is None # type: ignore[typeddict-item]
|
||||
assert message.response_metadata.get("logprobs") is None # type: ignore[typeddict-item]
|
||||
assert "system_fingerprint" in message.response_metadata
|
||||
assert "logprobs" in message.response_metadata
|
||||
|
||||
|
||||
class TestResponseMetadataEdgeCases:
|
||||
"""Test edge cases and error conditions for ResponseMetadata."""
|
||||
|
||||
def test_response_metadata_with_complex_nested_structures(self) -> None:
|
||||
"""Test ResponseMetadata with deeply nested and complex structures."""
|
||||
metadata: ResponseMetadata = {
|
||||
"model_provider": "custom",
|
||||
"model_name": "complex-model",
|
||||
"complex_data": { # type: ignore[typeddict-unknown-key]
|
||||
"level1": {
|
||||
"level2": {
|
||||
"level3": {
|
||||
"deeply_nested": "value",
|
||||
"array": [
|
||||
{"item": 1, "metadata": {"nested": True}},
|
||||
{"item": 2, "metadata": {"nested": False}},
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
complex_data = metadata.get("complex_data") # type: ignore[typeddict-item]
|
||||
assert isinstance(complex_data, dict)
|
||||
level1 = complex_data.get("level1") # type: ignore[union-attr]
|
||||
assert isinstance(level1, dict)
|
||||
level2 = level1.get("level2") # type: ignore[union-attr]
|
||||
assert isinstance(level2, dict)
|
||||
level3 = level2.get("level3") # type: ignore[union-attr]
|
||||
assert isinstance(level3, dict)
|
||||
|
||||
assert level3.get("deeply_nested") == "value" # type: ignore[union-attr]
|
||||
array = level3.get("array") # type: ignore[union-attr]
|
||||
assert isinstance(array, list)
|
||||
assert len(array) == 2 # type: ignore[arg-type]
|
||||
assert array[0]["item"] == 1 # type: ignore[index, typeddict-item]
|
||||
assert array[0]["metadata"]["nested"] is True # type: ignore[index, typeddict-item]
|
||||
|
||||
def test_response_metadata_large_data(self) -> None:
|
||||
"""Test ResponseMetadata with large amounts of data."""
|
||||
# Create metadata with many fields
|
||||
large_metadata: ResponseMetadata = {
|
||||
"model_provider": "test_provider",
|
||||
"model_name": "test_model",
|
||||
}
|
||||
|
||||
# Add 100 extra fields
|
||||
for i in range(100):
|
||||
large_metadata[f"field_{i}"] = f"value_{i}" # type: ignore[literal-required]
|
||||
|
||||
message = AIMessage(content="Test", response_metadata=large_metadata)
|
||||
|
||||
# Verify all fields are accessible
|
||||
assert message.response_metadata.get("model_provider") == "test_provider"
|
||||
for i in range(100):
|
||||
assert message.response_metadata.get(f"field_{i}") == f"value_{i}" # type: ignore[typeddict-item]
|
||||
|
||||
def test_response_metadata_empty_vs_none(self) -> None:
|
||||
"""Test the difference between empty ResponseMetadata and None."""
|
||||
# Message with empty metadata
|
||||
message_empty = AIMessage(content="Test", response_metadata={})
|
||||
assert message_empty.response_metadata == {}
|
||||
assert isinstance(message_empty.response_metadata, dict)
|
||||
|
||||
# Message with None metadata (should become empty dict)
|
||||
message_none = AIMessage(content="Test", response_metadata=None)
|
||||
assert message_none.response_metadata == {}
|
||||
assert isinstance(message_none.response_metadata, dict)
|
||||
|
||||
# Default message (no metadata specified)
|
||||
message_default = AIMessage(content="Test")
|
||||
assert message_default.response_metadata == {}
|
||||
assert isinstance(message_default.response_metadata, dict)
|
||||
|
||||
def test_response_metadata_preserves_original_dict_type(self) -> None:
|
||||
"""Test that ResponseMetadata preserves the original dict when passed."""
|
||||
original_dict = {
|
||||
"model_provider": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"custom_field": "custom_value",
|
||||
}
|
||||
|
||||
message = AIMessage(content="Test", response_metadata=original_dict)
|
||||
|
||||
# Should be the same dict object
|
||||
assert message.response_metadata is original_dict
|
||||
|
||||
# Modifications to the message's response_metadata should affect original
|
||||
message.response_metadata["new_field"] = "new_value" # type: ignore[typeddict-unknown-key]
|
||||
assert original_dict.get("new_field") == "new_value" # type: ignore[typeddict-item]
|
@ -987,7 +987,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.72"
|
||||
version = "0.4.0.dev0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
|
@ -7,7 +7,7 @@ authors = []
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.9, <4.0"
|
||||
dependencies = [
|
||||
"langchain-core<1.0.0,>=0.3.72",
|
||||
"langchain-core<1.0.0,>=0.4.0.dev0",
|
||||
"langchain-text-splitters<1.0.0,>=0.3.9",
|
||||
"langsmith>=0.1.17",
|
||||
"pydantic<3.0.0,>=2.7.4",
|
||||
@ -17,7 +17,7 @@ dependencies = [
|
||||
"async-timeout<5.0.0,>=4.0.0; python_version < \"3.11\"",
|
||||
]
|
||||
name = "langchain"
|
||||
version = "0.3.27"
|
||||
version = "0.4.0.dev0"
|
||||
description = "Building applications with LLMs through composability"
|
||||
readme = "README.md"
|
||||
|
||||
@ -25,9 +25,9 @@ readme = "README.md"
|
||||
community = ["langchain-community"]
|
||||
anthropic = ["langchain-anthropic"]
|
||||
openai = ["langchain-openai"]
|
||||
azure-ai = ["langchain-azure-ai"]
|
||||
cohere = ["langchain-cohere"]
|
||||
google-vertexai = ["langchain-google-vertexai"]
|
||||
# azure-ai = ["langchain-azure-ai"]
|
||||
# cohere = ["langchain-cohere"]
|
||||
# google-vertexai = ["langchain-google-vertexai"]
|
||||
google-genai = ["langchain-google-genai"]
|
||||
fireworks = ["langchain-fireworks"]
|
||||
ollama = ["langchain-ollama"]
|
||||
@ -35,9 +35,9 @@ together = ["langchain-together"]
|
||||
mistralai = ["langchain-mistralai"]
|
||||
huggingface = ["langchain-huggingface"]
|
||||
groq = ["langchain-groq"]
|
||||
aws = ["langchain-aws"]
|
||||
# aws = ["langchain-aws"]
|
||||
deepseek = ["langchain-deepseek"]
|
||||
xai = ["langchain-xai"]
|
||||
# xai = ["langchain-xai"]
|
||||
perplexity = ["langchain-perplexity"]
|
||||
|
||||
[project.urls]
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -7,12 +7,12 @@ authors = []
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"langchain-core<1.0.0,>=0.3.68",
|
||||
"langchain-core<1.0.0,>=0.4.0.dev0",
|
||||
"openai<2.0.0,>=1.86.0",
|
||||
"tiktoken<1,>=0.7",
|
||||
]
|
||||
name = "langchain-openai"
|
||||
version = "0.3.28"
|
||||
version = "0.4.0.dev0"
|
||||
description = "An integration package connecting OpenAI and LangChain"
|
||||
readme = "README.md"
|
||||
|
||||
@ -92,4 +92,4 @@ filterwarnings = [
|
||||
"tests/**/*.py" = [
|
||||
"S101", # Tests need assertions
|
||||
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
|
||||
]
|
||||
]
|
||||
|
@ -480,7 +480,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.72"
|
||||
version = "0.4.0.dev0"
|
||||
source = { editable = "../../core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@ -538,7 +538,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "0.3.28"
|
||||
version = "0.4.0.dev0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
|
Loading…
Reference in New Issue
Block a user