Merge remote-tracking branch 'origin/standard_outputs_copy' into mdrxy/ollama_v1

This commit is contained in:
Mason Daugherty 2025-08-05 09:56:17 -04:00
commit 4651457c7e
No known key found for this signature in database
58 changed files with 493 additions and 195 deletions

View File

@ -7,7 +7,7 @@ from typing import TYPE_CHECKING, Any, Optional, Union
from typing_extensions import Self
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, MessageV1
from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1
if TYPE_CHECKING:
from collections.abc import Sequence

View File

@ -38,15 +38,15 @@ from langchain_core.callbacks.base import (
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import (
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, LLMResult
from langchain_core.tracers.schemas import Run
from langchain_core.utils.env import env_var_is_set
from langchain_core.v1.messages import (
AIMessage,
AIMessageChunk,
MessageV1,
MessageV1Types,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, LLMResult
from langchain_core.tracers.schemas import Run
from langchain_core.utils.env import env_var_is_set
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Coroutine, Generator, Sequence

View File

@ -12,8 +12,8 @@ from langchain_core.callbacks.base import BaseCallbackHandler
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.messages.v1 import AIMessage, MessageV1
from langchain_core.outputs import LLMResult
from langchain_core.v1.messages import AIMessage, MessageV1
class StreamingStdOutCallbackHandler(BaseCallbackHandler):

View File

@ -12,8 +12,8 @@ from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.v1.messages import AIMessage as AIMessageV1
class UsageMetadataCallbackHandler(BaseCallbackHandler):

View File

@ -4,7 +4,7 @@ from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
from langchain_core.messages.v1 import MessageV1
from langchain_core.v1.messages import MessageV1
def _is_openai_data_block(block: dict) -> bool:

View File

@ -28,10 +28,10 @@ from langchain_core.messages import (
MessageLikeRepresentation,
get_buffer_string,
)
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.prompt_values import PromptValue
from langchain_core.runnables import Runnable, RunnableSerializable
from langchain_core.utils import get_pydantic_field_names
from langchain_core.v1.messages import AIMessage as AIMessageV1
if TYPE_CHECKING:
from langchain_core.outputs import LLMResult

View File

@ -13,13 +13,13 @@ from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
from langchain_core.language_models.v1.chat_models import BaseChatModelV1
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import MessageV1
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import RunnableConfig
from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import MessageV1
class FakeMessagesListChatModel(BaseChatModel):

View File

@ -1 +0,0 @@
"""LangChain v1.0 chat models."""

View File

@ -40,12 +40,12 @@ from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
from langchain_core.messages.v1 import MessageV1, MessageV1Types
from langchain_core.messages.v1 import SystemMessage as SystemMessageV1
from langchain_core.messages.v1 import ToolMessage as ToolMessageV1
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from langchain_core.v1.messages import MessageV1, MessageV1Types
from langchain_core.v1.messages import SystemMessage as SystemMessageV1
from langchain_core.v1.messages import ToolMessage as ToolMessageV1
if TYPE_CHECKING:
from langchain_text_splitters import TextSplitter

View File

@ -18,10 +18,10 @@ from typing_extensions import override
from langchain_core.language_models import LanguageModelOutput
from langchain_core.messages import AnyMessage, BaseMessage
from langchain_core.messages.v1 import AIMessage, MessageV1, MessageV1Types
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable
from langchain_core.runnables.config import run_in_executor
from langchain_core.v1.messages import AIMessage, MessageV1, MessageV1Types
if TYPE_CHECKING:
from langchain_core.prompt_values import PromptValue

View File

@ -13,7 +13,6 @@ from pydantic.v1 import BaseModel
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.messages.v1 import AIMessage
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
@ -22,6 +21,7 @@ from langchain_core.utils.json import (
parse_json_markdown,
parse_partial_json,
)
from langchain_core.v1.messages import AIMessage
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel]

View File

@ -12,8 +12,8 @@ from typing import TYPE_CHECKING, TypeVar, Union
from typing_extensions import override
from langchain_core.messages import BaseMessage
from langchain_core.messages.v1 import AIMessage
from langchain_core.output_parsers.transform import BaseTransformOutputParser
from langchain_core.v1.messages import AIMessage
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator

View File

@ -11,13 +11,13 @@ from pydantic.v1 import BaseModel as BaseModelV1
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.messages.v1 import AIMessage
from langchain_core.output_parsers import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
)
from langchain_core.output_parsers.json import parse_partial_json
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.v1.messages import AIMessage
class OutputFunctionsParser(BaseGenerationOutputParser[Any]):

View File

@ -12,11 +12,11 @@ from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, InvalidToolCall
from langchain_core.messages.tool import invalid_tool_call
from langchain_core.messages.tool import tool_call as create_tool_call
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.utils.json import parse_partial_json
from langchain_core.utils.pydantic import TypeBaseModel
from langchain_core.v1.messages import AIMessage as AIMessageV1
logger = logging.getLogger(__name__)

View File

@ -8,13 +8,13 @@ from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.messages.v1 import AIMessage
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
PydanticBaseModel,
TBaseModel,
)
from langchain_core.v1.messages import AIMessage
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):

View File

@ -12,7 +12,6 @@ from typing import (
from typing_extensions import override
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.messages.v1 import AIMessage, AIMessageChunk
from langchain_core.output_parsers.base import BaseOutputParser, T
from langchain_core.outputs import (
ChatGeneration,
@ -21,6 +20,7 @@ from langchain_core.outputs import (
GenerationChunk,
)
from langchain_core.runnables.config import run_in_executor
from langchain_core.v1.messages import AIMessage, AIMessageChunk
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator

View File

@ -13,9 +13,9 @@ from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import BaseMessage
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import AIMessage
from langchain_core.output_parsers.transform import BaseTransformOutputParser
from langchain_core.runnables.utils import AddableDict
from langchain_core.v1.messages import AIMessage
XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file.
1. Output should conform to the tags below.

View File

@ -23,11 +23,11 @@ from langchain_core.messages import (
get_buffer_string,
)
from langchain_core.messages import content_blocks as types
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
from langchain_core.messages.v1 import MessageV1, ResponseMetadata
from langchain_core.messages.v1 import SystemMessage as SystemMessageV1
from langchain_core.messages.v1 import ToolMessage as ToolMessageV1
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from langchain_core.v1.messages import MessageV1, ResponseMetadata
from langchain_core.v1.messages import SystemMessage as SystemMessageV1
from langchain_core.v1.messages import ToolMessage as ToolMessageV1
def _convert_to_v1(message: BaseMessage) -> MessageV1:

View File

@ -2361,6 +2361,7 @@ class Runnable(ABC, Generic[Input, Output]):
name: Optional[str] = None,
description: Optional[str] = None,
arg_types: Optional[dict[str, type]] = None,
output_version: Literal["v0", "v1"] = "v0",
) -> BaseTool:
"""Create a BaseTool from a Runnable.
@ -2376,6 +2377,11 @@ class Runnable(ABC, Generic[Input, Output]):
name: The name of the tool. Defaults to None.
description: The description of the tool. Defaults to None.
arg_types: A dictionary of argument names to types. Defaults to None.
output_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
Returns:
A BaseTool instance.
@ -2451,7 +2457,7 @@ class Runnable(ABC, Generic[Input, Output]):
.. versionadded:: 0.2.14
"""
""" # noqa: E501
# Avoid circular import
from langchain_core.tools import convert_runnable_to_tool
@ -2461,6 +2467,7 @@ class Runnable(ABC, Generic[Input, Output]):
name=name,
description=description,
arg_types=arg_types,
output_version=output_version,
)

View File

@ -68,6 +68,7 @@ from langchain_core.utils.pydantic import (
is_pydantic_v1_subclass,
is_pydantic_v2_subclass,
)
from langchain_core.v1.messages import ToolMessage as ToolMessageV1
if TYPE_CHECKING:
import uuid
@ -498,6 +499,14 @@ class ChildTool(BaseTool):
two-tuple corresponding to the (content, artifact) of a ToolMessage.
"""
output_version: Literal["v0", "v1"] = "v0"
"""Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the tool."""
if (
@ -835,7 +844,7 @@ class ChildTool(BaseTool):
content = None
artifact = None
status = "success"
status: Literal["success", "error"] = "success"
error_to_raise: Union[Exception, KeyboardInterrupt, None] = None
try:
child_config = patch_config(config, callbacks=run_manager.get_child())
@ -879,7 +888,14 @@ class ChildTool(BaseTool):
if error_to_raise:
run_manager.on_tool_error(error_to_raise)
raise error_to_raise
output = _format_output(content, artifact, tool_call_id, self.name, status)
output = _format_output(
content,
artifact,
tool_call_id,
self.name,
status,
output_version=self.output_version,
)
run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output
@ -945,7 +961,7 @@ class ChildTool(BaseTool):
)
content = None
artifact = None
status = "success"
status: Literal["success", "error"] = "success"
error_to_raise: Optional[Union[Exception, KeyboardInterrupt]] = None
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id)
@ -993,7 +1009,14 @@ class ChildTool(BaseTool):
await run_manager.on_tool_error(error_to_raise)
raise error_to_raise
output = _format_output(content, artifact, tool_call_id, self.name, status)
output = _format_output(
content,
artifact,
tool_call_id,
self.name,
status,
output_version=self.output_version,
)
await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output
@ -1131,7 +1154,9 @@ def _format_output(
artifact: Any,
tool_call_id: Optional[str],
name: str,
status: str,
status: Literal["success", "error"],
*,
output_version: Literal["v0", "v1"] = "v0",
) -> Union[ToolOutputMixin, Any]:
"""Format tool output as a ToolMessage if appropriate.
@ -1141,6 +1166,7 @@ def _format_output(
tool_call_id: The ID of the tool call.
name: The name of the tool.
status: The execution status.
output_version: The version of the ToolMessage to return.
Returns:
The formatted output, either as a ToolMessage or the original content.
@ -1149,7 +1175,15 @@ def _format_output(
return content
if not _is_message_content_type(content):
content = _stringify(content)
return ToolMessage(
if output_version == "v0":
return ToolMessage(
content,
artifact=artifact,
tool_call_id=tool_call_id,
name=name,
status=status,
)
return ToolMessageV1(
content,
artifact=artifact,
tool_call_id=tool_call_id,

View File

@ -22,6 +22,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0",
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
@ -37,6 +38,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ...
@ -51,6 +53,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0",
) -> BaseTool: ...
@ -65,6 +68,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0",
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
@ -79,6 +83,7 @@ def tool(
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = True,
output_version: Literal["v0", "v1"] = "v0",
) -> Union[
BaseTool,
Callable[[Union[Callable, Runnable]], BaseTool],
@ -118,6 +123,11 @@ def tool(
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
whether to raise ValueError on invalid Google Style docstrings.
Defaults to True.
output_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
Returns:
The tool.
@ -216,7 +226,7 @@ def tool(
\"\"\"
return bar
""" # noqa: D214, D410, D411
""" # noqa: D214, D410, D411, E501
def _create_tool_factory(
tool_name: str,
@ -274,6 +284,7 @@ def tool(
response_format=response_format,
parse_docstring=parse_docstring,
error_on_invalid_docstring=error_on_invalid_docstring,
output_version=output_version,
)
# If someone doesn't want a schema applied, we must treat it as
# a simple string->string function
@ -290,6 +301,7 @@ def tool(
return_direct=return_direct,
coroutine=coroutine,
response_format=response_format,
output_version=output_version,
)
return _tool_factory
@ -383,6 +395,7 @@ def convert_runnable_to_tool(
name: Optional[str] = None,
description: Optional[str] = None,
arg_types: Optional[dict[str, type]] = None,
output_version: Literal["v0", "v1"] = "v0",
) -> BaseTool:
"""Convert a Runnable into a BaseTool.
@ -392,10 +405,15 @@ def convert_runnable_to_tool(
name: The name of the tool. Defaults to None.
description: The description of the tool. Defaults to None.
arg_types: The types of the arguments. Defaults to None.
output_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
Returns:
The tool.
"""
""" # noqa: E501
if args_schema:
runnable = runnable.with_types(input_type=args_schema)
description = description or _get_description_from_runnable(runnable)
@ -408,6 +426,7 @@ def convert_runnable_to_tool(
func=runnable.invoke,
coroutine=runnable.ainvoke,
description=description,
output_version=output_version,
)
async def ainvoke_wrapper(
@ -435,4 +454,5 @@ def convert_runnable_to_tool(
coroutine=ainvoke_wrapper,
description=description,
args_schema=args_schema,
output_version=output_version,
)

View File

@ -72,6 +72,7 @@ def create_retriever_tool(
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content",
output_version: Literal["v0", "v1"] = "v1",
) -> Tool:
r"""Create a tool to do retrieval of documents.
@ -88,10 +89,15 @@ def create_retriever_tool(
"content_and_artifact" then the output is expected to be a two-tuple
corresponding to the (content, artifact) of a ToolMessage (artifact
being a list of documents in this case). Defaults to "content".
output_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
Returns:
Tool class to pass to an agent.
"""
""" # noqa: E501
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
@ -114,4 +120,5 @@ def create_retriever_tool(
coroutine=afunc,
args_schema=RetrieverInput,
response_format=response_format,
output_version=output_version,
)

View File

@ -129,6 +129,7 @@ class StructuredTool(BaseTool):
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = False,
output_version: Literal["v0", "v1"] = "v0",
**kwargs: Any,
) -> StructuredTool:
"""Create tool from a given function.
@ -157,6 +158,12 @@ class StructuredTool(BaseTool):
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
whether to raise ValueError on invalid Google Style docstrings.
Defaults to False.
output_version: Version of ToolMessage to return given
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
kwargs: Additional arguments to pass to the tool
Returns:
@ -175,7 +182,7 @@ class StructuredTool(BaseTool):
tool = StructuredTool.from_function(add)
tool.run(1, 2) # 3
"""
""" # noqa: E501
if func is not None:
source_function = func
elif coroutine is not None:
@ -232,6 +239,7 @@ class StructuredTool(BaseTool):
description=description_,
return_direct=return_direct,
response_format=response_format,
output_version=output_version,
**kwargs,
)

View File

@ -16,8 +16,8 @@ from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.exceptions import TracerException # noqa: F401
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, MessageV1
from langchain_core.tracers.core import _TracerCore
from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1
if TYPE_CHECKING:
from collections.abc import Sequence

View File

@ -19,12 +19,6 @@ from typing import (
from langchain_core.exceptions import TracerException
from langchain_core.load import dumpd
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import (
AIMessage,
AIMessageChunk,
MessageV1,
MessageV1Types,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
@ -32,6 +26,12 @@ from langchain_core.outputs import (
LLMResult,
)
from langchain_core.tracers.schemas import Run
from langchain_core.v1.messages import (
AIMessage,
AIMessageChunk,
MessageV1,
MessageV1Types,
)
if TYPE_CHECKING:
from collections.abc import Coroutine, Sequence

View File

@ -19,7 +19,6 @@ from typing_extensions import NotRequired, TypedDict, override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.messages import AIMessageChunk, BaseMessage, BaseMessageChunk
from langchain_core.messages.v1 import MessageV1
from langchain_core.outputs import (
ChatGenerationChunk,
GenerationChunk,
@ -39,15 +38,16 @@ from langchain_core.runnables.utils import (
from langchain_core.tracers._streaming import _StreamingCallbackHandler
from langchain_core.tracers.memory_stream import _MemoryStream
from langchain_core.utils.aiter import aclosing, py_anext
from langchain_core.v1.messages import MessageV1
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator, Sequence
from langchain_core.documents import Document
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.tracers.log_stream import LogEntry
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
logger = logging.getLogger(__name__)

View File

@ -22,14 +22,14 @@ from typing_extensions import override
from langchain_core.env import get_runtime_environment
from langchain_core.load import dumpd
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import MessageV1Types
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.v1.messages import MessageV1Types
if TYPE_CHECKING:
from langchain_core.messages import BaseMessage
from langchain_core.messages.v1 import AIMessageChunk, MessageV1
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
from langchain_core.v1.messages import AIMessageChunk, MessageV1
logger = logging.getLogger(__name__)
_LOGGED = set()

View File

@ -32,9 +32,9 @@ if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator, Sequence
from uuid import UUID
from langchain_core.messages.v1 import AIMessageChunk
from langchain_core.runnables.utils import Input, Output
from langchain_core.tracers.schemas import Run
from langchain_core.v1.messages import AIMessageChunk
class LogEntry(TypedDict):

View File

@ -0,0 +1 @@
"""LangChain v1.0 types."""

View File

@ -52,10 +52,6 @@ from langchain_core.messages.utils import (
convert_from_v1_message,
convert_to_messages_v1,
)
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
from langchain_core.messages.v1 import MessageV1, add_ai_message_chunks
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
@ -71,6 +67,10 @@ from langchain_core.utils.function_calling import (
convert_to_openai_tool,
)
from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from langchain_core.v1.messages import MessageV1, add_ai_message_chunks
if TYPE_CHECKING:
from langchain_core.output_parsers.base import OutputParserLike
@ -189,7 +189,7 @@ def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) ->
return ls_structured_output_format_dict
class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC):
class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC):
"""Base class for chat models.
Key imperative methods:
@ -383,8 +383,8 @@ class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC
**kwargs: Any,
) -> bool:
"""Determine if a given model call should hit the streaming API."""
sync_not_implemented = type(self)._stream == BaseChatModelV1._stream # noqa: SLF001
async_not_implemented = type(self)._astream == BaseChatModelV1._astream # noqa: SLF001
sync_not_implemented = type(self)._stream == BaseChatModel._stream # noqa: SLF001
async_not_implemented = type(self)._astream == BaseChatModel._astream # noqa: SLF001
# Check if streaming is implemented.
if (not async_api) and sync_not_implemented:
@ -946,7 +946,7 @@ class BaseChatModelV1(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC
PydanticToolsParser,
)
if type(self).bind_tools is BaseChatModelV1.bind_tools:
if type(self).bind_tools is BaseChatModel.bind_tools:
msg = "with_structured_output is not implemented for this model."
raise NotImplementedError(msg)

View File

@ -19,6 +19,7 @@ from langchain_core.messages.ai import (
add_usage,
)
from langchain_core.messages.base import merge_content
from langchain_core.messages.tool import ToolOutputMixin
from langchain_core.messages.tool import invalid_tool_call as create_invalid_tool_call
from langchain_core.messages.tool import tool_call as create_tool_call
from langchain_core.utils._merge import merge_dicts
@ -647,7 +648,7 @@ class SystemMessage:
@dataclass
class ToolMessage:
class ToolMessage(ToolOutputMixin):
"""A message containing the result of a tool execution.
Represents the output from executing a tool or function call,

View File

@ -10,9 +10,9 @@ from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import MessageV1
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import MessageV1
class MyCustomAsyncHandler(AsyncCallbackHandler):

View File

@ -9,7 +9,7 @@ from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.messages.v1 import MessageV1
from langchain_core.v1.messages import MessageV1
class BaseFakeCallbackHandler(BaseModel):

View File

@ -15,9 +15,9 @@ from langchain_core.language_models import (
ParrotFakeChatModel,
)
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import MessageV1
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import MessageV1
from tests.unit_tests.stubs import (
_any_id_ai_message,
_any_id_ai_message_chunk,

View File

@ -25,7 +25,6 @@ from langchain_core.messages import (
HumanMessage,
SystemMessage,
)
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.outputs.llm_result import LLMResult
from langchain_core.tracers import LogStreamCallbackHandler
@ -33,6 +32,7 @@ from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.context import collect_runs
from langchain_core.tracers.event_stream import _AstreamEventsCallbackHandler
from langchain_core.tracers.schemas import Run
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from tests.unit_tests.fake.callbacks import (
BaseFakeCallbackHandler,
FakeAsyncCallbackHandler,

View File

@ -1,6 +1,6 @@
"""Unit tests for ResponseMetadata TypedDict."""
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, ResponseMetadata
from langchain_core.v1.messages import AIMessage, AIMessageChunk, ResponseMetadata
class TestResponseMetadata:

View File

@ -8,12 +8,12 @@ from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.language_models.fake_chat_models import GenericFakeChatModelV1
from langchain_core.messages import AIMessage
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutputParser,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.v1.messages import AIMessage as AIMessageV1
def test_base_generation_parser() -> None:

View File

@ -10,14 +10,14 @@ from langchain_core.messages import (
BaseMessage,
ToolCallChunk,
)
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
STREAMED_MESSAGES: list = [
AIMessageChunk(content=""),

View File

@ -2529,7 +2529,7 @@
'title': 'ToolMessage',
'type': 'object',
}),
'langchain_core__messages__v1__AIMessage': dict({
'langchain_core__v1__messages__AIMessage': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -2655,7 +2655,7 @@
'title': 'AIMessage',
'type': 'object',
}),
'langchain_core__messages__v1__AIMessageChunk': dict({
'langchain_core__v1__messages__AIMessageChunk': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -2781,7 +2781,7 @@
'title': 'AIMessageChunk',
'type': 'object',
}),
'langchain_core__messages__v1__HumanMessage': dict({
'langchain_core__v1__messages__HumanMessage': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -2869,7 +2869,7 @@
'title': 'HumanMessage',
'type': 'object',
}),
'langchain_core__messages__v1__SystemMessage': dict({
'langchain_core__v1__messages__SystemMessage': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -2969,7 +2969,7 @@
'title': 'SystemMessage',
'type': 'object',
}),
'langchain_core__messages__v1__ToolMessage': dict({
'langchain_core__v1__messages__ToolMessage': dict({
'properties': dict({
'artifact': dict({
'anyOf': list([
@ -3123,19 +3123,19 @@
]),
}),
dict({
'$ref': '#/$defs/langchain_core__messages__v1__AIMessage',
'$ref': '#/$defs/langchain_core__v1__messages__AIMessage',
}),
dict({
'$ref': '#/$defs/langchain_core__messages__v1__AIMessageChunk',
'$ref': '#/$defs/langchain_core__v1__messages__AIMessageChunk',
}),
dict({
'$ref': '#/$defs/langchain_core__messages__v1__HumanMessage',
'$ref': '#/$defs/langchain_core__v1__messages__HumanMessage',
}),
dict({
'$ref': '#/$defs/langchain_core__messages__v1__SystemMessage',
'$ref': '#/$defs/langchain_core__v1__messages__SystemMessage',
}),
dict({
'$ref': '#/$defs/langchain_core__messages__v1__ToolMessage',
'$ref': '#/$defs/langchain_core__v1__messages__ToolMessage',
}),
]),
'title': 'RunnableParallel<as_list,as_str>Input',

View File

@ -9412,19 +9412,19 @@
]),
}),
dict({
'$ref': '#/definitions/langchain_core__messages__v1__AIMessage',
'$ref': '#/definitions/langchain_core__v1__messages__AIMessage',
}),
dict({
'$ref': '#/definitions/langchain_core__messages__v1__AIMessageChunk',
'$ref': '#/definitions/langchain_core__v1__messages__AIMessageChunk',
}),
dict({
'$ref': '#/definitions/langchain_core__messages__v1__HumanMessage',
'$ref': '#/definitions/langchain_core__v1__messages__HumanMessage',
}),
dict({
'$ref': '#/definitions/langchain_core__messages__v1__SystemMessage',
'$ref': '#/definitions/langchain_core__v1__messages__SystemMessage',
}),
dict({
'$ref': '#/definitions/langchain_core__messages__v1__ToolMessage',
'$ref': '#/definitions/langchain_core__v1__messages__ToolMessage',
}),
]),
'definitions': dict({
@ -11521,7 +11521,7 @@
'title': 'ToolMessage',
'type': 'object',
}),
'langchain_core__messages__v1__AIMessage': dict({
'langchain_core__v1__messages__AIMessage': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -11646,7 +11646,7 @@
'title': 'AIMessage',
'type': 'object',
}),
'langchain_core__messages__v1__AIMessageChunk': dict({
'langchain_core__v1__messages__AIMessageChunk': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -11771,7 +11771,7 @@
'title': 'AIMessageChunk',
'type': 'object',
}),
'langchain_core__messages__v1__HumanMessage': dict({
'langchain_core__v1__messages__HumanMessage': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -11858,7 +11858,7 @@
'title': 'HumanMessage',
'type': 'object',
}),
'langchain_core__messages__v1__SystemMessage': dict({
'langchain_core__v1__messages__SystemMessage': dict({
'properties': dict({
'content': dict({
'items': dict({
@ -11957,7 +11957,7 @@
'title': 'SystemMessage',
'type': 'object',
}),
'langchain_core__messages__v1__ToolMessage': dict({
'langchain_core__v1__messages__ToolMessage': dict({
'properties': dict({
'artifact': dict({
'anyOf': list([

View File

@ -35,9 +35,9 @@ from langchain_core.messages.content_blocks import KNOWN_BLOCK_TYPES, ContentBlo
from langchain_core.messages.tool import invalid_tool_call as create_invalid_tool_call
from langchain_core.messages.tool import tool_call as create_tool_call
from langchain_core.messages.tool import tool_call_chunk as create_tool_call_chunk
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.utils._merge import merge_lists
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
def test_message_init() -> None:

View File

@ -68,8 +68,10 @@ from langchain_core.utils.pydantic import (
_create_subset_model,
create_model_v2,
)
from langchain_core.v1.messages import ToolMessage as ToolMessageV1
from tests.unit_tests.fake.callbacks import FakeCallbackHandler
from tests.unit_tests.pydantic_utils import _schema
from tests.unit_tests.stubs import AnyStr
def _get_tool_call_json_schema(tool: BaseTool) -> dict:
@ -1379,17 +1381,28 @@ def test_tool_annotated_descriptions() -> None:
}
def test_tool_call_input_tool_message_output() -> None:
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_tool_call_input_tool_message(output_version: Literal["v0", "v1"]) -> None:
tool_call = {
"name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123",
"type": "tool_call",
}
tool = _MockStructuredTool()
expected = ToolMessage(
"1 True {'img': 'base64string...'}", tool_call_id="123", name="structured_api"
)
tool = _MockStructuredTool(output_version=output_version)
if output_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"1 True {'img': 'base64string...'}",
tool_call_id="123",
name="structured_api",
)
else:
expected = ToolMessageV1(
"1 True {'img': 'base64string...'}",
tool_call_id="123",
name="structured_api",
id=AnyStr("lc_abc123"),
)
actual = tool.invoke(tool_call)
assert actual == expected
@ -1421,6 +1434,14 @@ def _mock_structured_tool_with_artifact(
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
@tool("structured_api", response_format="content_and_artifact", output_version="v1")
def _mock_structured_tool_with_artifact_v1(
*, arg1: int, arg2: bool, arg3: Optional[dict] = None
) -> tuple[str, dict]:
"""A Structured Tool."""
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
@pytest.mark.parametrize(
"tool", [_MockStructuredToolWithRawOutput(), _mock_structured_tool_with_artifact]
)
@ -1445,6 +1466,38 @@ def test_tool_call_input_tool_message_with_artifact(tool: BaseTool) -> None:
assert actual_content == expected.content
@pytest.mark.parametrize(
"tool",
[
_MockStructuredToolWithRawOutput(output_version="v1"),
_mock_structured_tool_with_artifact_v1,
],
)
def test_tool_call_input_tool_message_with_artifact_v1(tool: BaseTool) -> None:
tool_call: dict = {
"name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123",
"type": "tool_call",
}
expected = ToolMessageV1(
"1 True",
artifact=tool_call["args"],
tool_call_id="123",
name="structured_api",
id=AnyStr("lc_abc123"),
)
actual = tool.invoke(tool_call)
assert actual == expected
tool_call.pop("type")
with pytest.raises(ValidationError):
tool.invoke(tool_call)
actual_content = tool.invoke(tool_call["args"])
assert actual_content == expected.text
def test_convert_from_runnable_dict() -> None:
# Test with typed dict input
class Args(TypedDict):
@ -1550,6 +1603,17 @@ def injected_tool(x: int, y: Annotated[str, InjectedToolArg]) -> str:
return y
@tool("foo", parse_docstring=True, output_version="v1")
def injected_tool_v1(x: int, y: Annotated[str, InjectedToolArg]) -> str:
"""Foo.
Args:
x: abc
y: 123
"""
return y
class InjectedTool(BaseTool):
name: str = "foo"
description: str = "foo."
@ -1587,7 +1651,12 @@ def injected_tool_with_schema(x: int, y: str) -> str:
return y
@pytest.mark.parametrize("tool_", [InjectedTool()])
@tool("foo", args_schema=fooSchema, output_version="v1")
def injected_tool_with_schema_v1(x: int, y: str) -> str:
return y
@pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(output_version="v1")])
def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
assert _schema(tool_.get_input_schema()) == {
"title": "foo",
@ -1607,14 +1676,25 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
if tool_.output_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
else:
expected = ToolMessageV1(
"bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123")
)
assert (
tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
)
== expected
)
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
@ -1634,7 +1714,12 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
@pytest.mark.parametrize(
"tool_",
[injected_tool_with_schema, InjectedToolWithSchema()],
[
injected_tool_with_schema,
InjectedToolWithSchema(),
injected_tool_with_schema_v1,
InjectedToolWithSchema(output_version="v1"),
],
)
def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
assert _schema(tool_.get_input_schema()) == {
@ -1655,14 +1740,25 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
if tool_.output_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
else:
expected = ToolMessageV1(
"bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123")
)
assert (
tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
)
== expected
)
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
@ -1680,8 +1776,9 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
}
def test_tool_injected_arg() -> None:
tool_ = injected_tool
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_tool_injected_arg(output_version: Literal["v0", "v1"]) -> None:
tool_ = injected_tool if output_version == "v0" else injected_tool_v1
assert _schema(tool_.get_input_schema()) == {
"title": "foo",
"description": "Foo.",
@ -1700,14 +1797,25 @@ def test_tool_injected_arg() -> None:
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
if output_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
else:
expected = ToolMessageV1(
"bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123")
)
assert (
tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
)
== expected
)
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
@ -1725,7 +1833,8 @@ def test_tool_injected_arg() -> None:
}
def test_tool_inherited_injected_arg() -> None:
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_tool_inherited_injected_arg(output_version: Literal["v0", "v1"]) -> None:
class BarSchema(BaseModel):
"""bar."""
@ -1746,7 +1855,7 @@ def test_tool_inherited_injected_arg() -> None:
def _run(self, x: int, y: str) -> Any:
return y
tool_ = InheritedInjectedArgTool()
tool_ = InheritedInjectedArgTool(output_version=output_version)
assert tool_.get_input_schema().model_json_schema() == {
"title": "FooSchema", # Matches the title from the provided schema
"description": "foo.",
@ -1766,14 +1875,25 @@ def test_tool_inherited_injected_arg() -> None:
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
if output_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"bar", tool_call_id="123", name="foo"
)
else:
expected = ToolMessageV1(
"bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123")
)
assert (
tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
)
== expected
)
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
@ -2133,7 +2253,8 @@ def test_tool_annotations_preserved() -> None:
assert schema.__annotations__ == expected_type_hints
def test_create_retriever_tool() -> None:
@pytest.mark.parametrize("output_version", ["v0", "v1"])
def test_create_retriever_tool(output_version: Literal["v0", "v1"]) -> None:
class MyRetriever(BaseRetriever):
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
@ -2142,21 +2263,36 @@ def test_create_retriever_tool() -> None:
retriever = MyRetriever()
retriever_tool = tools.create_retriever_tool(
retriever, "retriever_tool_content", "Retriever Tool Content"
retriever,
"retriever_tool_content",
"Retriever Tool Content",
output_version=output_version,
)
assert isinstance(retriever_tool, BaseTool)
assert retriever_tool.name == "retriever_tool_content"
assert retriever_tool.description == "Retriever Tool Content"
assert retriever_tool.invoke("bar") == "foo bar\n\nbar"
assert retriever_tool.invoke(
ToolCall(
name="retriever_tool_content",
args={"query": "bar"},
id="123",
type="tool_call",
if output_version == "v0":
expected: Union[ToolMessage, ToolMessageV1] = ToolMessage(
"foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content"
)
) == ToolMessage(
"foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content"
else:
expected = ToolMessageV1(
"foo bar\n\nbar",
tool_call_id="123",
name="retriever_tool_content",
id=AnyStr("lc_abc123"),
)
assert (
retriever_tool.invoke(
ToolCall(
name="retriever_tool_content",
args={"query": "bar"},
id="123",
type="tool_call",
)
)
== expected
)
retriever_tool_artifact = tools.create_retriever_tool(
@ -2164,23 +2300,37 @@ def test_create_retriever_tool() -> None:
"retriever_tool_artifact",
"Retriever Tool Artifact",
response_format="content_and_artifact",
output_version=output_version,
)
assert isinstance(retriever_tool_artifact, BaseTool)
assert retriever_tool_artifact.name == "retriever_tool_artifact"
assert retriever_tool_artifact.description == "Retriever Tool Artifact"
assert retriever_tool_artifact.invoke("bar") == "foo bar\n\nbar"
assert retriever_tool_artifact.invoke(
ToolCall(
if output_version == "v0":
expected = ToolMessage(
"foo bar\n\nbar",
artifact=[Document(page_content="foo bar"), Document(page_content="bar")],
tool_call_id="123",
name="retriever_tool_artifact",
args={"query": "bar"},
id="123",
type="tool_call",
)
) == ToolMessage(
"foo bar\n\nbar",
artifact=[Document(page_content="foo bar"), Document(page_content="bar")],
tool_call_id="123",
name="retriever_tool_artifact",
else:
expected = ToolMessageV1(
"foo bar\n\nbar",
artifact=[Document(page_content="foo bar"), Document(page_content="bar")],
tool_call_id="123",
name="retriever_tool_artifact",
id=AnyStr("lc_abc123"),
)
assert (
retriever_tool_artifact.invoke(
ToolCall(
name="retriever_tool_artifact",
args={"query": "bar"},
id="123",
type="tool_call",
)
)
== expected
)
@ -2313,6 +2463,45 @@ def test_tool_injected_tool_call_id() -> None:
) == ToolMessage(0, tool_call_id="bar") # type: ignore[arg-type]
def test_tool_injected_tool_call_id_v1() -> None:
@tool
def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallId]) -> ToolMessageV1:
"""Foo."""
return ToolMessageV1(str(x), tool_call_id=tool_call_id)
assert foo.invoke(
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == ToolMessageV1("0", tool_call_id="bar", id=AnyStr("lc_abc123"))
with pytest.raises(
ValueError,
match="When tool includes an InjectedToolCallId argument, "
"tool must always be invoked with a full model ToolCall",
):
assert foo.invoke({"x": 0})
@tool
def foo2(
x: int, tool_call_id: Annotated[str, InjectedToolCallId()]
) -> ToolMessageV1:
"""Foo."""
return ToolMessageV1(str(x), tool_call_id=tool_call_id)
assert foo2.invoke(
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == ToolMessageV1("0", tool_call_id="bar", id=AnyStr("lc_abc123"))
def test_tool_uninjected_tool_call_id() -> None:
@tool
def foo(x: int, tool_call_id: str) -> ToolMessage:
@ -2332,6 +2521,25 @@ def test_tool_uninjected_tool_call_id() -> None:
) == ToolMessage(0, tool_call_id="zap") # type: ignore[arg-type]
def test_tool_uninjected_tool_call_id_v1() -> None:
@tool
def foo(x: int, tool_call_id: str) -> ToolMessageV1:
"""Foo."""
return ToolMessageV1(str(x), tool_call_id=tool_call_id)
with pytest.raises(ValueError, match="1 validation error for foo"):
foo.invoke({"type": "tool_call", "args": {"x": 0}, "name": "foo", "id": "bar"})
assert foo.invoke(
{
"type": "tool_call",
"args": {"x": 0, "tool_call_id": "zap"},
"name": "foo",
"id": "bar",
}
) == ToolMessageV1("0", tool_call_id="zap", id=AnyStr("lc_abc123"))
def test_tool_return_output_mixin() -> None:
class Bar(ToolOutputMixin):
def __init__(self, x: int) -> None:
@ -2457,6 +2665,19 @@ def test_empty_string_tool_call_id() -> None:
)
def test_empty_string_tool_call_id_v1() -> None:
@tool(output_version="v1")
def foo(x: int) -> str:
"""Foo."""
return "hi"
assert foo.invoke(
{"type": "tool_call", "args": {"x": 0}, "id": ""}
) == ToolMessageV1(
content="hi", name="foo", tool_call_id="", id=AnyStr("lc_abc123")
)
def test_tool_decorator_description() -> None:
# test basic tool
@tool

View File

@ -12,11 +12,11 @@ from freezegun import freeze_time
from langchain_core.callbacks import AsyncCallbackManager
from langchain_core.exceptions import TracerException
from langchain_core.messages import HumanMessage
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
from langchain_core.messages.v1 import MessageV1
from langchain_core.outputs import LLMResult
from langchain_core.tracers.base import AsyncBaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from langchain_core.v1.messages import MessageV1
if TYPE_CHECKING:
from langchain_core.messages import BaseMessage

View File

@ -15,12 +15,12 @@ from langsmith import Client, traceable
from langchain_core.callbacks import CallbackManager
from langchain_core.exceptions import TracerException
from langchain_core.messages import HumanMessage
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
from langchain_core.messages.v1 import MessageV1
from langchain_core.outputs import LLMResult
from langchain_core.runnables import chain as as_runnable
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from langchain_core.v1.messages import MessageV1
if TYPE_CHECKING:
from langchain_core.messages import BaseMessage

View File

@ -9,8 +9,8 @@ from langchain_core.messages import (
BaseMessage,
)
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.v1.messages import AIMessage as AIMessageV1
from typing_extensions import override
from langchain.agents.agent import AgentOutputParser

View File

@ -3,8 +3,8 @@ from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.v1.messages import AIMessage as AIMessageV1
from typing_extensions import override
from langchain.agents.agent import MultiActionAgentOutputParser

View File

@ -10,8 +10,8 @@ from langchain_core.messages import (
ToolCall,
)
from langchain_core.messages.utils import convert_from_v1_message
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.v1.messages import AIMessage as AIMessageV1
from typing_extensions import override
from langchain.agents.agent import MultiActionAgentOutputParser

View File

@ -5,8 +5,8 @@ from collections.abc import AsyncIterator
from typing import Any, Literal, Union, cast
from langchain_core.callbacks import AsyncCallbackHandler
from langchain_core.messages.v1 import AIMessage
from langchain_core.outputs import LLMResult
from langchain_core.v1.messages import AIMessage
from typing_extensions import override
# TODO If used by two LLM runs in parallel this won't work as expected

View File

@ -2,8 +2,8 @@ from __future__ import annotations
from typing import Any, Optional, Union
from langchain_core.messages.v1 import AIMessage
from langchain_core.outputs import LLMResult
from langchain_core.v1.messages import AIMessage
from typing_extensions import override
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler

View File

@ -7,8 +7,8 @@ from uuid import UUID
from langchain_core.callbacks import base as base_callbacks
from langchain_core.documents import Document
from langchain_core.messages.v1 import AIMessage
from langchain_core.outputs import LLMResult
from langchain_core.v1.messages import AIMessage
from typing_extensions import override

View File

@ -6,7 +6,7 @@ from uuid import UUID
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.messages.v1 import MessageV1
from langchain_core.v1.messages import MessageV1
from pydantic import BaseModel
from typing_extensions import override

View File

@ -6,9 +6,9 @@ from uuid import UUID
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import MessageV1
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import MessageV1
from typing_extensions import override
from tests.unit_tests.llms.fake_chat_model import GenericFakeChatModel

View File

@ -73,7 +73,7 @@ from typing import Any, Literal, Optional, Union, cast
from langchain_core.messages import AIMessage, is_data_content_block
from langchain_core.messages import content_blocks as types
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessage as AIMessageV1
_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"

View File

@ -38,11 +38,6 @@ from langchain_core.callbacks import (
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import LangSmithParams
from langchain_core.language_models.v1.chat_models import (
BaseChatModelV1,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
InvalidToolCall,
ToolCall,
@ -55,12 +50,6 @@ from langchain_core.messages.ai import (
UsageMetadata,
)
from langchain_core.messages.tool import tool_call_chunk
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
from langchain_core.messages.v1 import MessageV1, ResponseMetadata
from langchain_core.messages.v1 import SystemMessage as SystemMessageV1
from langchain_core.messages.v1 import ToolMessage as ToolMessageV1
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
@ -88,6 +77,17 @@ from langchain_core.utils.pydantic import (
is_basemodel_subclass,
)
from langchain_core.utils.utils import _build_model_kwargs, from_env, secret_from_env
from langchain_core.v1.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from langchain_core.v1.messages import MessageV1, ResponseMetadata
from langchain_core.v1.messages import SystemMessage as SystemMessageV1
from langchain_core.v1.messages import ToolMessage as ToolMessageV1
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from pydantic.v1 import BaseModel as BaseModelV1
from typing_extensions import Self
@ -387,7 +387,7 @@ class _AllReturnType(TypedDict):
parsing_error: Optional[BaseException]
class BaseChatOpenAI(BaseChatModelV1):
class BaseChatOpenAI(BaseChatModel):
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
root_client: Any = Field(default=None, exclude=True) #: :meta private:
@ -1631,7 +1631,7 @@ class BaseChatOpenAI(BaseChatModelV1):
kwargs: Additional keyword args are passed through to the model.
Returns:
A Runnable that takes same inputs as a :class:`from langchain_core.language_models.v1.chat_models import BaseChatModelV1`.
A Runnable that takes same inputs as a :class:`from langchain_core.v1.chat_models import BaseChatModel`.
| If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
@ -2646,7 +2646,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
kwargs: Additional keyword args are passed through to the model.
Returns:
A Runnable that takes same inputs as a :class:`from langchain_core.language_models.v1.chat_models import BaseChatModelV1`.
A Runnable that takes same inputs as a :class:`from langchain_core.v1.chat_models import BaseChatModel`.
| If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.

View File

@ -14,9 +14,9 @@ from langchain_core.messages import (
HumanMessage,
MessageLikeRepresentation,
)
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
from langchain_core.v1.messages import AIMessage as AIMessageV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import HumanMessage as HumanMessageV1
from pydantic import BaseModel
from typing_extensions import TypedDict

View File

@ -22,11 +22,11 @@ from langchain_core.messages import (
)
from langchain_core.messages import content_blocks as types
from langchain_core.messages.ai import UsageMetadata
from langchain_core.messages.v1 import AIMessage as AIMessageV1
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.runnables import RunnableLambda
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.v1.messages import AIMessage as AIMessageV1
from openai.types.responses import ResponseOutputMessage, ResponseReasoningItem
from openai.types.responses.response import IncompleteDetails, Response, ResponseUsage
from openai.types.responses.response_error import ResponseError

View File

@ -2,7 +2,7 @@ from typing import Any, Optional
from unittest.mock import MagicMock, patch
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
from openai.types.responses import (
ResponseCompletedEvent,
ResponseContentPartAddedEvent,

View File

@ -6,7 +6,7 @@ from uuid import UUID
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.messages.v1 import MessageV1
from langchain_core.v1.messages import MessageV1
from pydantic import BaseModel