mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-20 01:54:14 +00:00
fmat
This commit is contained in:
@@ -70,7 +70,7 @@ The module defines several types of content blocks, including:
|
|||||||
|
|
||||||
- ``TextContentBlock``: Standard text.
|
- ``TextContentBlock``: Standard text.
|
||||||
- ``ImageContentBlock``, ``Audio...``, ``Video...``, ``PlainText...``, ``File...``: For multimodal data.
|
- ``ImageContentBlock``, ``Audio...``, ``Video...``, ``PlainText...``, ``File...``: For multimodal data.
|
||||||
- ``ToolCallContentBlock``, ``ToolOutputContentBlock``: For function calling.
|
- ``ToolCallContentBlock``: For function calling.
|
||||||
- ``ReasoningContentBlock``: To capture a model's thought process.
|
- ``ReasoningContentBlock``: To capture a model's thought process.
|
||||||
- ``Citation``: For annotations that link generated text to a source document.
|
- ``Citation``: For annotations that link generated text to a source document.
|
||||||
|
|
||||||
|
@@ -135,6 +135,7 @@ def unicode_customer(customer_name: str, description: str) -> str:
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A confirmation message about the customer creation.
|
A confirmation message about the customer creation.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return f"Created customer: {customer_name} - {description}"
|
return f"Created customer: {customer_name} - {description}"
|
||||||
|
|
||||||
@@ -680,13 +681,13 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
return {}
|
return {}
|
||||||
|
|
||||||
def test_invoke(self, model: BaseChatModel) -> None:
|
def test_invoke(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify that `model.invoke(simple_message)` works.
|
"""Test to verify that ``model.invoke(simple_message)`` works.
|
||||||
|
|
||||||
This should pass for all integrations.
|
This should pass for all integrations.
|
||||||
|
|
||||||
.. dropdown:: Troubleshooting
|
.. dropdown:: Troubleshooting
|
||||||
|
|
||||||
If this test fails, you should make sure your _generate method
|
If this test fails, you should make sure your ``_generate`` method
|
||||||
does not raise any exceptions, and that it returns a valid
|
does not raise any exceptions, and that it returns a valid
|
||||||
:class:`~langchain_core.outputs.chat_result.ChatResult` like so:
|
:class:`~langchain_core.outputs.chat_result.ChatResult` like so:
|
||||||
|
|
||||||
@@ -706,7 +707,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert len(result.content) > 0
|
assert len(result.content) > 0
|
||||||
|
|
||||||
async def test_ainvoke(self, model: BaseChatModel) -> None:
|
async def test_ainvoke(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify that `await model.ainvoke(simple_message)` works.
|
"""Test to verify that ``await model.ainvoke(simple_message)`` works.
|
||||||
|
|
||||||
This should pass for all integrations. Passing this test does not indicate
|
This should pass for all integrations. Passing this test does not indicate
|
||||||
a "natively async" implementation, but rather that the model can be used
|
a "natively async" implementation, but rather that the model can be used
|
||||||
@@ -716,7 +717,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
First, debug
|
First, debug
|
||||||
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`.
|
:meth:`~langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`.
|
||||||
because `ainvoke` has a default implementation that calls `invoke` in an
|
because ``ainvoke`` has a default implementation that calls ``invoke`` in an
|
||||||
async context.
|
async context.
|
||||||
|
|
||||||
If that test passes but not this one, you should make sure your _agenerate
|
If that test passes but not this one, you should make sure your _agenerate
|
||||||
@@ -739,7 +740,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert len(result.content) > 0
|
assert len(result.content) > 0
|
||||||
|
|
||||||
def test_stream(self, model: BaseChatModel) -> None:
|
def test_stream(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify that `model.stream(simple_message)` works.
|
"""Test to verify that ``model.stream(simple_message)`` works.
|
||||||
|
|
||||||
This should pass for all integrations. Passing this test does not indicate
|
This should pass for all integrations. Passing this test does not indicate
|
||||||
a "streaming" implementation, but rather that the model can be used in a
|
a "streaming" implementation, but rather that the model can be used in a
|
||||||
@@ -772,7 +773,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert num_chunks > 0
|
assert num_chunks > 0
|
||||||
|
|
||||||
async def test_astream(self, model: BaseChatModel) -> None:
|
async def test_astream(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify that `await model.astream(simple_message)` works.
|
"""Test to verify that ``await model.astream(simple_message)`` works.
|
||||||
|
|
||||||
This should pass for all integrations. Passing this test does not indicate
|
This should pass for all integrations. Passing this test does not indicate
|
||||||
a "natively async" or "streaming" implementation, but rather that the model can
|
a "natively async" or "streaming" implementation, but rather that the model can
|
||||||
@@ -809,7 +810,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert num_chunks > 0
|
assert num_chunks > 0
|
||||||
|
|
||||||
def test_batch(self, model: BaseChatModel) -> None:
|
def test_batch(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify that `model.batch([messages])` works.
|
"""Test to verify that ``model.batch([messages])`` works.
|
||||||
|
|
||||||
This should pass for all integrations. Tests the model's ability to process
|
This should pass for all integrations. Tests the model's ability to process
|
||||||
multiple prompts in a single batch.
|
multiple prompts in a single batch.
|
||||||
@@ -836,7 +837,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert len(result.content) > 0
|
assert len(result.content) > 0
|
||||||
|
|
||||||
async def test_abatch(self, model: BaseChatModel) -> None:
|
async def test_abatch(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify that `await model.abatch([messages])` works.
|
"""Test to verify that ``await model.abatch([messages])`` works.
|
||||||
|
|
||||||
This should pass for all integrations. Tests the model's ability to process
|
This should pass for all integrations. Tests the model's ability to process
|
||||||
multiple prompts in a single batch asynchronously.
|
multiple prompts in a single batch asynchronously.
|
||||||
@@ -853,6 +854,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
If those tests pass but not this one, you should make sure your `abatch`
|
If those tests pass but not this one, you should make sure your `abatch`
|
||||||
method does not raise any exceptions, and that it returns a list of valid
|
method does not raise any exceptions, and that it returns a list of valid
|
||||||
:class:`~langchain_core.messages.AIMessage` objects.
|
:class:`~langchain_core.messages.AIMessage` objects.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
batch_results = await model.abatch(["Hello", "Hey"])
|
batch_results = await model.abatch(["Hello", "Hey"])
|
||||||
assert batch_results is not None
|
assert batch_results is not None
|
||||||
@@ -881,6 +883,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
1. Your model correctly processes the message history
|
1. Your model correctly processes the message history
|
||||||
2. The model maintains appropriate context from previous messages
|
2. The model maintains appropriate context from previous messages
|
||||||
3. The response is a valid :class:`~langchain_core.messages.AIMessage`
|
3. The response is a valid :class:`~langchain_core.messages.AIMessage`
|
||||||
|
|
||||||
"""
|
"""
|
||||||
messages = [
|
messages = [
|
||||||
HumanMessage("hello"),
|
HumanMessage("hello"),
|
||||||
@@ -914,6 +917,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
1. Your model API can handle double messages, or the integration should
|
1. Your model API can handle double messages, or the integration should
|
||||||
merge messages before sending them to the API.
|
merge messages before sending them to the API.
|
||||||
2. The response is a valid :class:`~langchain_core.messages.AIMessage`
|
2. The response is a valid :class:`~langchain_core.messages.AIMessage`
|
||||||
|
|
||||||
"""
|
"""
|
||||||
messages = [
|
messages = [
|
||||||
SystemMessage("hello"),
|
SystemMessage("hello"),
|
||||||
@@ -938,7 +942,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
.. versionchanged:: 0.3.17
|
.. versionchanged:: 0.3.17
|
||||||
|
|
||||||
Additionally check for the presence of `model_name` in the response
|
Additionally check for the presence of ``model_name`` in the response
|
||||||
metadata, which is needed for usage tracking in callback handlers.
|
metadata, which is needed for usage tracking in callback handlers.
|
||||||
|
|
||||||
.. dropdown:: Configuration
|
.. dropdown:: Configuration
|
||||||
@@ -987,7 +991,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, first verify that your model returns
|
If this test fails, first verify that your model returns
|
||||||
:class:`~langchain_core.messages.ai.UsageMetadata` dicts
|
:class:`~langchain_core.messages.ai.UsageMetadata` dicts
|
||||||
attached to the returned AIMessage object in `_generate`:
|
attached to the returned AIMessage object in ``_generate``:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@@ -1105,7 +1109,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
.. versionchanged:: 0.3.17
|
.. versionchanged:: 0.3.17
|
||||||
|
|
||||||
Additionally check for the presence of `model_name` in the response
|
Additionally check for the presence of ``model_name`` in the response
|
||||||
metadata, which is needed for usage tracking in callback handlers.
|
metadata, which is needed for usage tracking in callback handlers.
|
||||||
|
|
||||||
.. dropdown:: Configuration
|
.. dropdown:: Configuration
|
||||||
@@ -1152,16 +1156,16 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, first verify that your model yields
|
If this test fails, first verify that your model yields
|
||||||
:class:`~langchain_core.messages.ai.UsageMetadata` dicts
|
:class:`~langchain_core.messages.ai.UsageMetadata` dicts
|
||||||
attached to the returned AIMessage object in `_stream`
|
attached to the returned AIMessage object in ``_stream``
|
||||||
that sum up to the total usage metadata.
|
that sum up to the total usage metadata.
|
||||||
|
|
||||||
Note that `input_tokens` should only be included on one of the chunks
|
Note that ``input_tokens`` should only be included on one of the chunks
|
||||||
(typically the first or the last chunk), and the rest should have 0 or None
|
(typically the first or the last chunk), and the rest should have ``0`` or
|
||||||
to avoid counting input tokens multiple times.
|
``None`` to avoid counting input tokens multiple times.
|
||||||
|
|
||||||
`output_tokens` typically count the number of tokens in each chunk, not the
|
``output_tokens`` typically count the number of tokens in each chunk, not
|
||||||
sum. This test will pass as long as the sum of `output_tokens` across all
|
the sum. This test will pass as long as the sum of ``output_tokens`` across
|
||||||
chunks is not 0.
|
all chunks is not ``0``.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@@ -1261,7 +1265,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""Test that model does not fail when invoked with the ``stop`` parameter,
|
"""Test that model does not fail when invoked with the ``stop`` parameter,
|
||||||
which is a standard parameter for stopping generation at a certain token.
|
which is a standard parameter for stopping generation at a certain token.
|
||||||
|
|
||||||
More on standard parameters here: https://python.langchain.com/docs/concepts/chat_models/#standard-parameters
|
`More on standard parameters <https://python.langchain.com/docs/concepts/chat_models/#standard-parameters>`__
|
||||||
|
|
||||||
This should pass for all integrations.
|
This should pass for all integrations.
|
||||||
|
|
||||||
@@ -1569,7 +1573,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
"""Test that message histories are compatible with list tool contents
|
"""Test that message histories are compatible with list tool contents
|
||||||
(e.g. Anthropic format).
|
(e.g. Anthropic format).
|
||||||
|
|
||||||
These message histories will include AIMessage objects with "tool use" and
|
These message histories will include ``AIMessage`` objects with "tool use" and
|
||||||
content blocks, e.g.,
|
content blocks, e.g.,
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
@@ -1603,8 +1607,8 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, check that:
|
If this test fails, check that:
|
||||||
|
|
||||||
1. The model can correctly handle message histories that include AIMessage objects with list content.
|
1. The model can correctly handle message histories that include ``AIMessage`` objects with list content.
|
||||||
2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format.
|
2. The ``tool_calls`` attribute on ``AIMessage`` objects is correctly handled and passed to the model in an appropriate format.
|
||||||
3. The model can correctly handle ToolMessage objects with string content and arbitrary string values for ``tool_call_id``.
|
3. The model can correctly handle ToolMessage objects with string content and arbitrary string values for ``tool_call_id``.
|
||||||
|
|
||||||
You can ``xfail`` the test if tool calling is implemented but this format
|
You can ``xfail`` the test if tool calling is implemented but this format
|
||||||
@@ -2052,9 +2056,9 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Test requires pydantic 2.")
|
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Test requires pydantic 2.")
|
||||||
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
|
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
|
||||||
"""Test to verify we can generate structured output using
|
"""Test to verify we can generate structured output using
|
||||||
pydantic.v1.BaseModel.
|
``pydantic.v1.BaseModel``.
|
||||||
|
|
||||||
pydantic.v1.BaseModel is available in the pydantic 2 package.
|
``pydantic.v1.BaseModel`` is available in the pydantic 2 package.
|
||||||
|
|
||||||
This test is optional and should be skipped if the model does not support
|
This test is optional and should be skipped if the model does not support
|
||||||
structured output (see Configuration below).
|
structured output (see Configuration below).
|
||||||
@@ -2686,7 +2690,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
1. The model can correctly handle message histories that include message objects with list content.
|
1. The model can correctly handle message histories that include message objects with list content.
|
||||||
2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format.
|
2. The ``tool_calls`` attribute on AIMessage objects is correctly handled and passed to the model in an appropriate format.
|
||||||
3. HumanMessages with "tool_result" content blocks are correctly handled.
|
3. ``HumanMessage``s with "tool_result" content blocks are correctly handled.
|
||||||
|
|
||||||
Otherwise, if Anthropic tool call and result formats are not supported,
|
Otherwise, if Anthropic tool call and result formats are not supported,
|
||||||
set the ``supports_anthropic_inputs`` property to False.
|
set the ``supports_anthropic_inputs`` property to False.
|
||||||
@@ -2792,7 +2796,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert isinstance(response, AIMessage)
|
assert isinstance(response, AIMessage)
|
||||||
|
|
||||||
def test_message_with_name(self, model: BaseChatModel) -> None:
|
def test_message_with_name(self, model: BaseChatModel) -> None:
|
||||||
"""Test that HumanMessage with values for the ``name`` field can be handled.
|
"""Test that ``HumanMessage`` with values for the ``name`` field can be handled.
|
||||||
|
|
||||||
These messages may take the form:
|
These messages may take the form:
|
||||||
|
|
||||||
@@ -2953,11 +2957,12 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
Args:
|
Args:
|
||||||
model: The chat model to test
|
model: The chat model to test
|
||||||
tool_choice: Tool choice parameter to pass to bind_tools (provider-specific)
|
tool_choice: Tool choice parameter to pass to bind_tools (provider-specific)
|
||||||
force_tool_call: Whether to force a tool call (use tool_choice=True if None)
|
force_tool_call: Whether to force a tool call (use ``tool_choice=True`` if None)
|
||||||
|
|
||||||
Tests that Unicode characters in tool call arguments are preserved correctly,
|
Tests that Unicode characters in tool call arguments are preserved correctly,
|
||||||
not escaped as \\uXXXX sequences.
|
not escaped as ``\\uXXXX`` sequences.
|
||||||
"""
|
|
||||||
|
""" # noqa: E501
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
pytest.skip("Test requires tool calling support.")
|
pytest.skip("Test requires tool calling support.")
|
||||||
|
|
||||||
|
@@ -26,6 +26,7 @@ def generate_schema_pydantic_v1_from_2() -> Any:
|
|||||||
"""Use to generate a schema from v1 namespace in pydantic 2.
|
"""Use to generate a schema from v1 namespace in pydantic 2.
|
||||||
|
|
||||||
:private:
|
:private:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if PYDANTIC_MAJOR_VERSION != 2:
|
if PYDANTIC_MAJOR_VERSION != 2:
|
||||||
msg = "This function is only compatible with Pydantic v2."
|
msg = "This function is only compatible with Pydantic v2."
|
||||||
@@ -44,6 +45,7 @@ def generate_schema_pydantic() -> Any:
|
|||||||
"""Works with either pydantic 1 or 2.
|
"""Works with either pydantic 1 or 2.
|
||||||
|
|
||||||
:private:
|
:private:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class PersonA(BaseModel):
|
class PersonA(BaseModel):
|
||||||
@@ -65,6 +67,7 @@ class ChatModelTests(BaseStandardTests):
|
|||||||
"""Base class for chat model tests.
|
"""Base class for chat model tests.
|
||||||
|
|
||||||
:private:
|
:private:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -148,16 +151,12 @@ class ChatModelTests(BaseStandardTests):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_image_inputs(self) -> bool:
|
def supports_image_inputs(self) -> bool:
|
||||||
"""(bool) whether the chat model supports image inputs, defaults to
|
"""(bool) whether the chat model supports image inputs, defaults to ``False``.""" # noqa: E501
|
||||||
``False``.
|
|
||||||
"""
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_image_urls(self) -> bool:
|
def supports_image_urls(self) -> bool:
|
||||||
"""(bool) whether the chat model supports image inputs from URLs, defaults to
|
"""(bool) whether the chat model supports image inputs from URLs, defaults to ``False``.""" # noqa: E501
|
||||||
``False``.
|
|
||||||
"""
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -167,23 +166,21 @@ class ChatModelTests(BaseStandardTests):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_audio_inputs(self) -> bool:
|
def supports_audio_inputs(self) -> bool:
|
||||||
"""(bool) whether the chat model supports audio inputs, defaults to
|
"""(bool) whether the chat model supports audio inputs, defaults to ``False``.""" # noqa: E501
|
||||||
``False``.
|
|
||||||
"""
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_video_inputs(self) -> bool:
|
def supports_video_inputs(self) -> bool:
|
||||||
"""(bool) whether the chat model supports video inputs, defaults to ``False``.
|
"""(bool) whether the chat model supports video inputs, defaults to ``False``.
|
||||||
|
|
||||||
No current tests are written for this feature.
|
No current tests are written for this feature.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def returns_usage_metadata(self) -> bool:
|
def returns_usage_metadata(self) -> bool:
|
||||||
"""(bool) whether the chat model returns usage metadata on invoke and streaming
|
"""(bool) whether the chat model returns usage metadata on invoke and streaming responses.""" # noqa: E501
|
||||||
responses.
|
|
||||||
"""
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -193,9 +190,7 @@ class ChatModelTests(BaseStandardTests):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_image_tool_message(self) -> bool:
|
def supports_image_tool_message(self) -> bool:
|
||||||
"""(bool) whether the chat model supports ``ToolMessage``s that include image
|
"""(bool) whether the chat model supports ``ToolMessage``s that include image content.""" # noqa: E501
|
||||||
content.
|
|
||||||
"""
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -205,6 +200,7 @@ class ChatModelTests(BaseStandardTests):
|
|||||||
.. important::
|
.. important::
|
||||||
See ``enable_vcr_tests`` dropdown :class:`above <ChatModelTests>` for more
|
See ``enable_vcr_tests`` dropdown :class:`above <ChatModelTests>` for more
|
||||||
information.
|
information.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -806,6 +802,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
def init_from_env_params(self) -> tuple[dict, dict, dict]:
|
def init_from_env_params(self) -> tuple[dict, dict, dict]:
|
||||||
"""(tuple) environment variables, additional initialization args, and expected
|
"""(tuple) environment variables, additional initialization args, and expected
|
||||||
instance attributes for testing initialization from environment variables.
|
instance attributes for testing initialization from environment variables.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return {}, {}, {}
|
return {}, {}, {}
|
||||||
|
|
||||||
@@ -818,6 +815,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
|
|
||||||
1. ``chat_model_params`` is specified and the model can be initialized from those params;
|
1. ``chat_model_params`` is specified and the model can be initialized from those params;
|
||||||
2. The model accommodates `standard parameters <https://python.langchain.com/docs/concepts/chat_models/#standard-parameters>`__
|
2. The model accommodates `standard parameters <https://python.langchain.com/docs/concepts/chat_models/#standard-parameters>`__
|
||||||
|
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
model = self.chat_model_class(
|
model = self.chat_model_class(
|
||||||
**{
|
**{
|
||||||
@@ -837,6 +835,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
If this test fails, ensure that ``init_from_env_params`` is specified
|
If this test fails, ensure that ``init_from_env_params`` is specified
|
||||||
correctly and that model parameters are properly set from environment
|
correctly and that model parameters are properly set from environment
|
||||||
variables during initialization.
|
variables during initialization.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
env_params, model_params, expected_attrs = self.init_from_env_params
|
env_params, model_params, expected_attrs = self.init_from_env_params
|
||||||
if not env_params:
|
if not env_params:
|
||||||
@@ -861,6 +860,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, ensure that the model can be initialized with a
|
If this test fails, ensure that the model can be initialized with a
|
||||||
boolean ``streaming`` parameter.
|
boolean ``streaming`` parameter.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
model = self.chat_model_class(
|
model = self.chat_model_class(
|
||||||
**{
|
**{
|
||||||
@@ -887,6 +887,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
||||||
|
|
||||||
See example implementation of ``bind_tools`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.bind_tools
|
See example implementation of ``bind_tools`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.bind_tools
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.has_tool_calling:
|
if not self.has_tool_calling:
|
||||||
return
|
return
|
||||||
@@ -927,6 +928,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
|
||||||
|
|
||||||
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
|
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.has_structured_output:
|
if not self.has_structured_output:
|
||||||
return
|
return
|
||||||
@@ -949,6 +951,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
|
|
||||||
Check also that the model class is named according to convention
|
Check also that the model class is named according to convention
|
||||||
(e.g., ``ChatProviderName``).
|
(e.g., ``ChatProviderName``).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class ExpectedParams(BaseModelV1):
|
class ExpectedParams(BaseModelV1):
|
||||||
@@ -986,6 +989,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
|
|
||||||
If this test fails, check that the ``init_from_env_params`` property is
|
If this test fails, check that the ``init_from_env_params`` property is
|
||||||
correctly set on the test class.
|
correctly set on the test class.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.chat_model_class.is_lc_serializable():
|
if not self.chat_model_class.is_lc_serializable():
|
||||||
pytest.skip("Model is not serializable.")
|
pytest.skip("Model is not serializable.")
|
||||||
@@ -1005,6 +1009,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
def test_init_time(self, benchmark: BenchmarkFixture) -> None:
|
def test_init_time(self, benchmark: BenchmarkFixture) -> None:
|
||||||
"""Test initialization time of the chat model. If this test fails, check that
|
"""Test initialization time of the chat model. If this test fails, check that
|
||||||
we are not introducing undue overhead in the model's initialization.
|
we are not introducing undue overhead in the model's initialization.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _init_in_loop() -> None:
|
def _init_in_loop() -> None:
|
||||||
|
@@ -102,8 +102,8 @@ class TestChatParrotLinkV1Unit(ChatModelV1UnitTests):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_enhanced_tool_calls(self) -> bool:
|
def supports_tool_calls(self) -> bool:
|
||||||
"""``ChatParrotLinkV1`` does not support enhanced tool calls."""
|
"""``ChatParrotLinkV1`` does not support tool calls."""
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
Reference in New Issue
Block a user