mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-14 07:07:34 +00:00
formatting/nits/docs
This commit is contained in:
parent
d5b26bc358
commit
cd8d6ae7cd
@ -629,15 +629,16 @@ def tool_example_to_messages(
|
|||||||
|
|
||||||
The list of messages per example by default corresponds to:
|
The list of messages per example by default corresponds to:
|
||||||
|
|
||||||
1) HumanMessage: contains the content from which content should be extracted.
|
1. ``HumanMessage``: contains the content from which content should be extracted.
|
||||||
2) AIMessage: contains the extracted information from the model
|
2. ``AIMessage``: contains the extracted information from the model
|
||||||
3) ToolMessage: contains confirmation to the model that the model requested a tool
|
3. ``ToolMessage``: contains confirmation to the model that the model requested a
|
||||||
correctly.
|
tool correctly.
|
||||||
|
|
||||||
If `ai_response` is specified, there will be a final AIMessage with that response.
|
If ``ai_response`` is specified, there will be a final ``AIMessage`` with that
|
||||||
|
response.
|
||||||
|
|
||||||
The ToolMessage is required because some chat models are hyper-optimized for agents
|
The ``ToolMessage`` is required because some chat models are hyper-optimized for
|
||||||
rather than for an extraction use case.
|
agents rather than for an extraction use case.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
input: string, the user input
|
input: string, the user input
|
||||||
@ -646,7 +647,7 @@ def tool_example_to_messages(
|
|||||||
tool_outputs: Optional[list[str]], a list of tool call outputs.
|
tool_outputs: Optional[list[str]], a list of tool call outputs.
|
||||||
Does not need to be provided. If not provided, a placeholder value
|
Does not need to be provided. If not provided, a placeholder value
|
||||||
will be inserted. Defaults to None.
|
will be inserted. Defaults to None.
|
||||||
ai_response: Optional[str], if provided, content for a final AIMessage.
|
ai_response: Optional[str], if provided, content for a final ``AIMessage``.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A list of messages
|
A list of messages
|
||||||
@ -728,6 +729,7 @@ def _parse_google_docstring(
|
|||||||
"""Parse the function and argument descriptions from the docstring of a function.
|
"""Parse the function and argument descriptions from the docstring of a function.
|
||||||
|
|
||||||
Assumes the function docstring follows Google Python style guide.
|
Assumes the function docstring follows Google Python style guide.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if docstring:
|
if docstring:
|
||||||
docstring_blocks = docstring.split("\n\n")
|
docstring_blocks = docstring.split("\n\n")
|
||||||
|
@ -190,7 +190,7 @@ def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) ->
|
|||||||
|
|
||||||
|
|
||||||
class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC):
|
class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC):
|
||||||
"""Base class for chat models.
|
"""Base class for v1 chat models.
|
||||||
|
|
||||||
Key imperative methods:
|
Key imperative methods:
|
||||||
Methods that actually call the underlying model.
|
Methods that actually call the underlying model.
|
||||||
|
@ -61,7 +61,7 @@ class ResponseMetadata(TypedDict, total=False):
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class AIMessage:
|
class AIMessage:
|
||||||
"""A message generated by an AI assistant.
|
"""A v1 message generated by an AI assistant.
|
||||||
|
|
||||||
Represents a response from an AI model, including text content, tool calls,
|
Represents a response from an AI model, including text content, tool calls,
|
||||||
and metadata about the generation process.
|
and metadata about the generation process.
|
||||||
@ -133,7 +133,7 @@ class AIMessage:
|
|||||||
invalid_tool_calls: Optional[list[types.InvalidToolCall]] = None,
|
invalid_tool_calls: Optional[list[types.InvalidToolCall]] = None,
|
||||||
parsed: Optional[Union[dict[str, Any], BaseModel]] = None,
|
parsed: Optional[Union[dict[str, Any], BaseModel]] = None,
|
||||||
):
|
):
|
||||||
"""Initialize an AI message.
|
"""Initialize a v1 AI message.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
content: Message content as string or list of content blocks.
|
content: Message content as string or list of content blocks.
|
||||||
@ -263,7 +263,7 @@ class AIMessageChunk(AIMessage):
|
|||||||
parsed: Optional[Union[dict[str, Any], BaseModel]] = None,
|
parsed: Optional[Union[dict[str, Any], BaseModel]] = None,
|
||||||
chunk_position: Optional[Literal["last"]] = None,
|
chunk_position: Optional[Literal["last"]] = None,
|
||||||
):
|
):
|
||||||
"""Initialize an AI message.
|
"""Initialize a v1 AI message.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
content: Message content as string or list of content blocks.
|
content: Message content as string or list of content blocks.
|
||||||
@ -541,7 +541,7 @@ class HumanMessage:
|
|||||||
id: Optional[str] = None,
|
id: Optional[str] = None,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
"""Initialize a human message.
|
"""Initialize a v1 human message.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
content: Message content as string or list of content blocks.
|
content: Message content as string or list of content blocks.
|
||||||
@ -623,7 +623,7 @@ class SystemMessage:
|
|||||||
custom_role: Optional[str] = None,
|
custom_role: Optional[str] = None,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
"""Initialize a human message.
|
"""Initialize a v1 system message.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
content: Message content as string or list of content blocks.
|
content: Message content as string or list of content blocks.
|
||||||
@ -711,7 +711,7 @@ class ToolMessage(ToolOutputMixin):
|
|||||||
artifact: Optional[Any] = None,
|
artifact: Optional[Any] = None,
|
||||||
status: Literal["success", "error"] = "success",
|
status: Literal["success", "error"] = "success",
|
||||||
):
|
):
|
||||||
"""Initialize a human message.
|
"""Initialize a v1 tool message.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
content: Message content as string or list of content blocks.
|
content: Message content as string or list of content blocks.
|
||||||
|
@ -374,11 +374,6 @@ class TestFakeChatModelV1Integration(ChatModelV1IntegrationTests):
|
|||||||
"""Enable non-standard blocks support."""
|
"""Enable non-standard blocks support."""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
|
||||||
def requires_api_key(self) -> bool:
|
|
||||||
"""This fake model doesn't require an API key."""
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
# Example of a more realistic integration test configuration
|
# Example of a more realistic integration test configuration
|
||||||
# that would require API keys and external services
|
# that would require API keys and external services
|
||||||
|
@ -108,6 +108,7 @@ def magic_function_no_args() -> int:
|
|||||||
def _validate_tool_call_message(message: BaseMessage) -> None:
|
def _validate_tool_call_message(message: BaseMessage) -> None:
|
||||||
assert isinstance(message, AIMessage)
|
assert isinstance(message, AIMessage)
|
||||||
assert len(message.tool_calls) == 1
|
assert len(message.tool_calls) == 1
|
||||||
|
|
||||||
tool_call = message.tool_calls[0]
|
tool_call = message.tool_calls[0]
|
||||||
assert tool_call["name"] == "magic_function"
|
assert tool_call["name"] == "magic_function"
|
||||||
assert tool_call["args"] == {"input": 3}
|
assert tool_call["args"] == {"input": 3}
|
||||||
@ -118,6 +119,7 @@ def _validate_tool_call_message(message: BaseMessage) -> None:
|
|||||||
def _validate_tool_call_message_no_args(message: BaseMessage) -> None:
|
def _validate_tool_call_message_no_args(message: BaseMessage) -> None:
|
||||||
assert isinstance(message, AIMessage)
|
assert isinstance(message, AIMessage)
|
||||||
assert len(message.tool_calls) == 1
|
assert len(message.tool_calls) == 1
|
||||||
|
|
||||||
tool_call = message.tool_calls[0]
|
tool_call = message.tool_calls[0]
|
||||||
assert tool_call["name"] == "magic_function_no_args"
|
assert tool_call["name"] == "magic_function_no_args"
|
||||||
assert tool_call["args"] == {}
|
assert tool_call["args"] == {}
|
||||||
@ -750,10 +752,10 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
First, debug
|
First, debug
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`.
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`.
|
||||||
because `stream` has a default implementation that calls `invoke` and yields
|
because ``stream`` has a default implementation that calls ``invoke`` and
|
||||||
the result as a single chunk.
|
yields the result as a single chunk.
|
||||||
|
|
||||||
If that test passes but not this one, you should make sure your _stream
|
If that test passes but not this one, you should make sure your ``_stream``
|
||||||
method does not raise any exceptions, and that it yields valid
|
method does not raise any exceptions, and that it yields valid
|
||||||
:class:`~langchain_core.outputs.chat_generation.ChatGenerationChunk`
|
:class:`~langchain_core.outputs.chat_generation.ChatGenerationChunk`
|
||||||
objects like so:
|
objects like so:
|
||||||
@ -769,6 +771,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
for chunk in model.stream("Hello"):
|
for chunk in model.stream("Hello"):
|
||||||
assert chunk is not None
|
assert chunk is not None
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
|
assert isinstance(chunk.content, (str, list))
|
||||||
num_chunks += 1
|
num_chunks += 1
|
||||||
assert num_chunks > 0
|
assert num_chunks > 0
|
||||||
|
|
||||||
@ -785,11 +788,11 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_stream`.
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_stream`.
|
||||||
and
|
and
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke`.
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke`.
|
||||||
because `astream` has a default implementation that calls `_stream` in an
|
because ``astream`` has a default implementation that calls ``_stream`` in
|
||||||
async context if it is implemented, or `ainvoke` and yields the result as a
|
an async context if it is implemented, or ``ainvoke`` and yields the result
|
||||||
single chunk if not.
|
as a single chunk if not.
|
||||||
|
|
||||||
If those tests pass but not this one, you should make sure your _astream
|
If those tests pass but not this one, you should make sure your ``_astream``
|
||||||
method does not raise any exceptions, and that it yields valid
|
method does not raise any exceptions, and that it yields valid
|
||||||
:class:`~langchain_core.outputs.chat_generation.ChatGenerationChunk`
|
:class:`~langchain_core.outputs.chat_generation.ChatGenerationChunk`
|
||||||
objects like so:
|
objects like so:
|
||||||
@ -819,12 +822,13 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
First, debug
|
First, debug
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
|
||||||
because `batch` has a default implementation that calls `invoke` for each
|
because ``batch`` has a default implementation that calls ``invoke`` for
|
||||||
message in the batch.
|
each message in the batch.
|
||||||
|
|
||||||
If that test passes but not this one, you should make sure your `batch`
|
If that test passes but not this one, you should make sure your ``batch``
|
||||||
method does not raise any exceptions, and that it returns a list of valid
|
method does not raise any exceptions, and that it returns a list of valid
|
||||||
:class:`~langchain_core.messages.AIMessage` objects.
|
:class:`~langchain_core.messages.AIMessage` objects.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
batch_results = model.batch(["Hello", "Hey"])
|
batch_results = model.batch(["Hello", "Hey"])
|
||||||
assert batch_results is not None
|
assert batch_results is not None
|
||||||
@ -848,10 +852,10 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_batch`
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_batch`
|
||||||
and
|
and
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke`
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke`
|
||||||
because `abatch` has a default implementation that calls `ainvoke` for each
|
because ``abatch`` has a default implementation that calls ``ainvoke`` for
|
||||||
message in the batch.
|
each message in the batch.
|
||||||
|
|
||||||
If those tests pass but not this one, you should make sure your `abatch`
|
If those tests pass but not this one, you should make sure your ``abatch``
|
||||||
method does not raise any exceptions, and that it returns a list of valid
|
method does not raise any exceptions, and that it returns a list of valid
|
||||||
:class:`~langchain_core.messages.AIMessage` objects.
|
:class:`~langchain_core.messages.AIMessage` objects.
|
||||||
|
|
||||||
@ -877,7 +881,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
First, debug
|
First, debug
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
|
||||||
because this test also uses `model.invoke()`.
|
because this test also uses ``model.invoke()``.
|
||||||
|
|
||||||
If that test passes but not this one, you should verify that:
|
If that test passes but not this one, you should verify that:
|
||||||
1. Your model correctly processes the message history
|
1. Your model correctly processes the message history
|
||||||
@ -890,6 +894,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
AIMessage("hello"),
|
AIMessage("hello"),
|
||||||
HumanMessage("how are you"),
|
HumanMessage("how are you"),
|
||||||
]
|
]
|
||||||
|
|
||||||
result = model.invoke(messages)
|
result = model.invoke(messages)
|
||||||
assert result is not None
|
assert result is not None
|
||||||
assert isinstance(result, AIMessage)
|
assert isinstance(result, AIMessage)
|
||||||
@ -907,18 +912,17 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
First, debug
|
First, debug
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
|
||||||
because this test also uses `model.invoke()`.
|
because this test also uses ``model.invoke()``.
|
||||||
|
|
||||||
Second, debug
|
Second, debug
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_conversation`
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_conversation`
|
||||||
because this test is the "basic case" without double messages.
|
because this test is the "basic case" without double messages.
|
||||||
|
|
||||||
If that test passes those but not this one, you should verify that:
|
If that test passes those but not this one, you should verify that:
|
||||||
1. Your model API can handle double messages, or the integration should
|
1. Your model API can handle double messages, or the integration should merge messages before sending them to the API.
|
||||||
merge messages before sending them to the API.
|
|
||||||
2. The response is a valid :class:`~langchain_core.messages.AIMessage`
|
2. The response is a valid :class:`~langchain_core.messages.AIMessage`
|
||||||
|
|
||||||
"""
|
""" # noqa: E501
|
||||||
messages = [
|
messages = [
|
||||||
SystemMessage("hello"),
|
SystemMessage("hello"),
|
||||||
SystemMessage("hello"),
|
SystemMessage("hello"),
|
||||||
@ -928,6 +932,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
AIMessage("hello"),
|
AIMessage("hello"),
|
||||||
HumanMessage("how are you"),
|
HumanMessage("how are you"),
|
||||||
]
|
]
|
||||||
|
|
||||||
result = model.invoke(messages)
|
result = model.invoke(messages)
|
||||||
assert result is not None
|
assert result is not None
|
||||||
assert isinstance(result, AIMessage)
|
assert isinstance(result, AIMessage)
|
||||||
@ -1023,9 +1028,11 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.returns_usage_metadata:
|
if not self.returns_usage_metadata:
|
||||||
pytest.skip("Not implemented.")
|
pytest.skip("Not implemented.")
|
||||||
|
|
||||||
result = model.invoke("Hello")
|
result = model.invoke("Hello")
|
||||||
assert result is not None
|
assert result is not None
|
||||||
assert isinstance(result, AIMessage)
|
assert isinstance(result, AIMessage)
|
||||||
|
|
||||||
assert result.usage_metadata is not None
|
assert result.usage_metadata is not None
|
||||||
assert isinstance(result.usage_metadata["input_tokens"], int)
|
assert isinstance(result.usage_metadata["input_tokens"], int)
|
||||||
assert isinstance(result.usage_metadata["output_tokens"], int)
|
assert isinstance(result.usage_metadata["output_tokens"], int)
|
||||||
@ -1201,6 +1208,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.returns_usage_metadata:
|
if not self.returns_usage_metadata:
|
||||||
pytest.skip("Not implemented.")
|
pytest.skip("Not implemented.")
|
||||||
|
|
||||||
full: Optional[AIMessageChunk] = None
|
full: Optional[AIMessageChunk] = None
|
||||||
for chunk in model.stream("Write me 2 haikus. Only include the haikus."):
|
for chunk in model.stream("Write me 2 haikus. Only include the haikus."):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
@ -1339,6 +1347,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
tool_choice_value = None if not self.has_tool_choice else "any"
|
tool_choice_value = None if not self.has_tool_choice else "any"
|
||||||
# Emit warning if tool_choice_value property is overridden
|
# Emit warning if tool_choice_value property is overridden
|
||||||
if inspect.getattr_static(
|
if inspect.getattr_static(
|
||||||
@ -1413,6 +1422,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
tool_choice_value = None if not self.has_tool_choice else "any"
|
tool_choice_value = None if not self.has_tool_choice else "any"
|
||||||
model_with_tools = model.bind_tools(
|
model_with_tools = model.bind_tools(
|
||||||
[magic_function], tool_choice=tool_choice_value
|
[magic_function], tool_choice=tool_choice_value
|
||||||
@ -1522,10 +1532,10 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, check that:
|
If this test fails, check that:
|
||||||
|
|
||||||
1. The model can correctly handle message histories that include AIMessage objects with ``""`` content.
|
1. The model can correctly handle message histories that include ``AIMessage`` objects with ``""`` content.
|
||||||
2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format.
|
2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly handled and passed to the model in an appropriate format.
|
||||||
3. The model can correctly handle ToolMessage objects with string content and arbitrary string values for ``tool_call_id``.
|
3. The model can correctly handle ``ToolMessage`` objects with string content and arbitrary string values for ``tool_call_id``.
|
||||||
assert tool_call.get("type") == "tool_call"
|
|
||||||
You can ``xfail`` the test if tool calling is implemented but this format
|
You can ``xfail`` the test if tool calling is implemented but this format
|
||||||
is not supported.
|
is not supported.
|
||||||
|
|
||||||
@ -1538,6 +1548,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
model_with_tools = model.bind_tools([my_adder_tool])
|
model_with_tools = model.bind_tools([my_adder_tool])
|
||||||
function_name = "my_adder_tool"
|
function_name = "my_adder_tool"
|
||||||
function_args = {"a": "1", "b": "2"}
|
function_args = {"a": "1", "b": "2"}
|
||||||
@ -1623,6 +1634,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
model_with_tools = model.bind_tools([my_adder_tool])
|
model_with_tools = model.bind_tools([my_adder_tool])
|
||||||
function_name = "my_adder_tool"
|
function_name = "my_adder_tool"
|
||||||
function_args = {"a": 1, "b": 2}
|
function_args = {"a": 1, "b": 2}
|
||||||
@ -1695,7 +1707,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
pytest.skip("Test requires tool choice.")
|
pytest.skip("Test requires tool choice.")
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def get_weather(location: str) -> str: # pylint: disable=unused-argument
|
def get_weather(location: str) -> str:
|
||||||
"""Get weather at a location."""
|
"""Get weather at a location."""
|
||||||
return "It's sunny."
|
return "It's sunny."
|
||||||
|
|
||||||
@ -1753,6 +1765,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
tool_choice_value = None if not self.has_tool_choice else "any"
|
tool_choice_value = None if not self.has_tool_choice else "any"
|
||||||
model_with_tools = model.bind_tools(
|
model_with_tools = model.bind_tools(
|
||||||
[magic_function_no_args], tool_choice=tool_choice_value
|
[magic_function_no_args], tool_choice=tool_choice_value
|
||||||
@ -1770,7 +1783,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
def test_tool_message_error_status(
|
def test_tool_message_error_status(
|
||||||
self, model: BaseChatModel, my_adder_tool: BaseTool
|
self, model: BaseChatModel, my_adder_tool: BaseTool
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test that ToolMessage with ``status="error"`` can be handled.
|
"""Test that ``ToolMessage`` with ``status="error"`` can be handled.
|
||||||
|
|
||||||
These messages may take the form:
|
These messages may take the form:
|
||||||
|
|
||||||
@ -1809,6 +1822,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
model_with_tools = model.bind_tools([my_adder_tool])
|
model_with_tools = model.bind_tools([my_adder_tool])
|
||||||
messages = [
|
messages = [
|
||||||
HumanMessage("What is 1 + 2"),
|
HumanMessage("What is 1 + 2"),
|
||||||
@ -1863,8 +1877,9 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
.. dropdown:: Troubleshooting
|
.. dropdown:: Troubleshooting
|
||||||
|
|
||||||
This test uses a utility function in ``langchain_core`` to generate a
|
This test uses `a utility function <https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.tool_example_to_messages.html>`__
|
||||||
sequence of messages representing "few-shot" examples: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.tool_example_to_messages.html
|
in ``langchain_core`` to generate a sequence of messages representing
|
||||||
|
"few-shot" examples.
|
||||||
|
|
||||||
If this test fails, check that the model can correctly handle this
|
If this test fails, check that the model can correctly handle this
|
||||||
sequence of messages.
|
sequence of messages.
|
||||||
@ -1881,6 +1896,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
model_with_tools = model.bind_tools([my_adder_tool], tool_choice="any")
|
model_with_tools = model.bind_tools([my_adder_tool], tool_choice="any")
|
||||||
function_result = json.dumps({"result": 3})
|
function_result = json.dumps({"result": 3})
|
||||||
|
|
||||||
@ -1924,10 +1940,12 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, ensure that the model's ``bind_tools`` method
|
If this test fails, ensure that the model's ``bind_tools`` method
|
||||||
properly handles both JSON Schema and Pydantic V2 models.
|
properly handles both JSON Schema and Pydantic V2 models.
|
||||||
``langchain_core`` implements a utility function that will accommodate
|
|
||||||
most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
|
||||||
|
|
||||||
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
|
``langchain_core`` implements `a utility function <https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html>`__
|
||||||
|
that will accommodate most formats.
|
||||||
|
|
||||||
|
See `example implementation <https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output>`__
|
||||||
|
of ``with_structured_output``.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.has_structured_output:
|
if not self.has_structured_output:
|
||||||
@ -2003,10 +2021,12 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, ensure that the model's ``bind_tools`` method
|
If this test fails, ensure that the model's ``bind_tools`` method
|
||||||
properly handles both JSON Schema and Pydantic V2 models.
|
properly handles both JSON Schema and Pydantic V2 models.
|
||||||
``langchain_core`` implements a utility function that will accommodate
|
|
||||||
most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
|
||||||
|
|
||||||
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
|
``langchain_core`` implements `a utility function <https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html>`__
|
||||||
|
that will accommodate most formats.
|
||||||
|
|
||||||
|
See `example implementation <https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output>`__
|
||||||
|
of ``with_structured_output``.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.has_structured_output:
|
if not self.has_structured_output:
|
||||||
@ -2055,10 +2075,9 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Test requires pydantic 2.")
|
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Test requires pydantic 2.")
|
||||||
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
|
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify we can generate structured output using
|
"""Test to verify we can generate structured output using ``pydantic.v1.BaseModel``.
|
||||||
``pydantic.v1.BaseModel``.
|
|
||||||
|
|
||||||
``pydantic.v1.BaseModel`` is available in the pydantic 2 package.
|
``pydantic.v1.BaseModel`` is available in the Pydantic 2 package.
|
||||||
|
|
||||||
This test is optional and should be skipped if the model does not support
|
This test is optional and should be skipped if the model does not support
|
||||||
structured output (see Configuration below).
|
structured output (see Configuration below).
|
||||||
@ -2082,12 +2101,14 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, ensure that the model's ``bind_tools`` method
|
If this test fails, ensure that the model's ``bind_tools`` method
|
||||||
properly handles both JSON Schema and Pydantic V1 models.
|
properly handles both JSON Schema and Pydantic V1 models.
|
||||||
``langchain_core`` implements a utility function that will accommodate
|
|
||||||
most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
|
||||||
|
|
||||||
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
|
``langchain_core`` implements `a utility function <https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html>`__
|
||||||
|
that will accommodate most formats.
|
||||||
|
|
||||||
"""
|
See `example implementation <https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output>`__
|
||||||
|
of ``with_structured_output``.
|
||||||
|
|
||||||
|
""" # noqa: E501
|
||||||
if not self.has_structured_output:
|
if not self.has_structured_output:
|
||||||
pytest.skip("Test requires structured output.")
|
pytest.skip("Test requires structured output.")
|
||||||
|
|
||||||
@ -2144,10 +2165,12 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, ensure that the model's ``bind_tools`` method
|
If this test fails, ensure that the model's ``bind_tools`` method
|
||||||
properly handles Pydantic V2 models with optional parameters.
|
properly handles Pydantic V2 models with optional parameters.
|
||||||
``langchain_core`` implements a utility function that will accommodate
|
|
||||||
most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
|
||||||
|
|
||||||
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
|
``langchain_core`` implements `a utility function <https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html>`__
|
||||||
|
that will accommodate most formats.
|
||||||
|
|
||||||
|
See `example implementation <https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output>`__
|
||||||
|
of ``with_structured_output``.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.has_structured_output:
|
if not self.has_structured_output:
|
||||||
@ -2228,7 +2251,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
# Type ignoring since the interface only officially supports pydantic 1
|
# Type ignoring since the interface only officially supports pydantic 1
|
||||||
# or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2.
|
# or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2.
|
||||||
# We'll need to do a pass updating the type signatures.
|
# We'll need to do a pass updating the type signatures.
|
||||||
chat = model.with_structured_output(Joke, method="json_mode") # type: ignore[arg-type]
|
chat = model.with_structured_output(Joke, method="json_mode")
|
||||||
msg = (
|
msg = (
|
||||||
"Tell me a joke about cats. Return the result as a JSON with 'setup' and "
|
"Tell me a joke about cats. Return the result as a JSON with 'setup' and "
|
||||||
"'punchline' keys. Return nothing other than JSON."
|
"'punchline' keys. Return nothing other than JSON."
|
||||||
@ -2291,6 +2314,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.supports_pdf_inputs:
|
if not self.supports_pdf_inputs:
|
||||||
pytest.skip("Model does not support PDF inputs.")
|
pytest.skip("Model does not support PDF inputs.")
|
||||||
|
|
||||||
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
|
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
|
||||||
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
|
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
|
||||||
|
|
||||||
@ -2367,6 +2391,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.supports_audio_inputs:
|
if not self.supports_audio_inputs:
|
||||||
pytest.skip("Model does not support audio inputs.")
|
pytest.skip("Model does not support audio inputs.")
|
||||||
|
|
||||||
url = "https://upload.wikimedia.org/wikipedia/commons/3/3d/Alcal%C3%A1_de_Henares_%28RPS_13-04-2024%29_canto_de_ruise%C3%B1or_%28Luscinia_megarhynchos%29_en_el_Soto_del_Henares.wav"
|
url = "https://upload.wikimedia.org/wikipedia/commons/3/3d/Alcal%C3%A1_de_Henares_%28RPS_13-04-2024%29_canto_de_ruise%C3%B1or_%28Luscinia_megarhynchos%29_en_el_Soto_del_Henares.wav"
|
||||||
audio_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
|
audio_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
|
||||||
|
|
||||||
@ -2468,6 +2493,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.supports_image_inputs:
|
if not self.supports_image_inputs:
|
||||||
pytest.skip("Model does not support image message.")
|
pytest.skip("Model does not support image message.")
|
||||||
|
|
||||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||||
|
|
||||||
@ -2575,6 +2601,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""
|
"""
|
||||||
if not self.supports_image_tool_message:
|
if not self.supports_image_tool_message:
|
||||||
pytest.skip("Model does not support image tool message.")
|
pytest.skip("Model does not support image tool message.")
|
||||||
|
|
||||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||||
|
|
||||||
@ -2845,7 +2872,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
chat model.
|
chat model.
|
||||||
|
|
||||||
Check also that all required information (e.g., tool calling identifiers)
|
Check also that all required information (e.g., tool calling identifiers)
|
||||||
from AIMessage objects is propagated correctly to model payloads.
|
from ``AIMessage`` objects is propagated correctly to model payloads.
|
||||||
|
|
||||||
This test may fail if the chat model does not consistently generate tool
|
This test may fail if the chat model does not consistently generate tool
|
||||||
calls in response to an appropriate query. In these cases you can ``xfail``
|
calls in response to an appropriate query. In these cases you can ``xfail``
|
||||||
@ -2862,7 +2889,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
pytest.skip("Test requires tool calling.")
|
pytest.skip("Test requires tool calling.")
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def get_weather(location: str) -> str: # pylint: disable=unused-argument
|
def get_weather(location: str) -> str:
|
||||||
"""Call to surf the web."""
|
"""Call to surf the web."""
|
||||||
return "It's sunny."
|
return "It's sunny."
|
||||||
|
|
||||||
@ -2956,7 +2983,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
model: The chat model to test
|
model: The chat model to test
|
||||||
tool_choice: Tool choice parameter to pass to bind_tools (provider-specific)
|
tool_choice: Tool choice parameter to pass to ``bind_tools()`` (provider-specific)
|
||||||
force_tool_call: Whether to force a tool call (use ``tool_choice=True`` if None)
|
force_tool_call: Whether to force a tool call (use ``tool_choice=True`` if None)
|
||||||
|
|
||||||
Tests that Unicode characters in tool call arguments are preserved correctly,
|
Tests that Unicode characters in tool call arguments are preserved correctly,
|
||||||
|
Loading…
Reference in New Issue
Block a user