mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-17 00:17:47 +00:00
Merge branch 'master' into wip-v0.4
# Conflicts: # libs/core/langchain_core/version.py # libs/core/pyproject.toml # libs/core/uv.lock # libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py # libs/partners/openai/uv.lock
This commit is contained in:
commit
cfe13f673a
@ -447,6 +447,163 @@
|
|||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "c5d9d19d-8ab1-4d9d-b3a0-56ee4e89c528",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Custom tools\n",
|
||||||
|
"\n",
|
||||||
|
":::info Requires ``langchain-openai>=0.3.29``\n",
|
||||||
|
"\n",
|
||||||
|
":::\n",
|
||||||
|
"\n",
|
||||||
|
"[Custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) support tools with arbitrary string inputs. They can be particularly useful when you expect your string arguments to be long or complex."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "a47c809b-852f-46bd-8b9e-d9534c17213d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||||
|
"\n",
|
||||||
|
"Use the tool to calculate 3^3.\n",
|
||||||
|
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||||
|
"\n",
|
||||||
|
"[{'id': 'rs_6894ff5747c0819d9b02fc5645b0be9c000169fd9fb68d99', 'summary': [], 'type': 'reasoning'}, {'call_id': 'call_7SYwMSQPbbEqFcKlKOpXeEux', 'input': 'print(3**3)', 'name': 'execute_code', 'type': 'custom_tool_call', 'id': 'ctc_6894ff5b9f54819d8155a63638d34103000169fd9fb68d99', 'status': 'completed'}]\n",
|
||||||
|
"Tool Calls:\n",
|
||||||
|
" execute_code (call_7SYwMSQPbbEqFcKlKOpXeEux)\n",
|
||||||
|
" Call ID: call_7SYwMSQPbbEqFcKlKOpXeEux\n",
|
||||||
|
" Args:\n",
|
||||||
|
" __arg1: print(3**3)\n",
|
||||||
|
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||||
|
"Name: execute_code\n",
|
||||||
|
"\n",
|
||||||
|
"[{'type': 'custom_tool_call_output', 'output': '27'}]\n",
|
||||||
|
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||||
|
"\n",
|
||||||
|
"[{'type': 'text', 'text': '27', 'annotations': [], 'id': 'msg_6894ff5db3b8819d9159b3a370a25843000169fd9fb68d99'}]\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain_openai import ChatOpenAI, custom_tool\n",
|
||||||
|
"from langgraph.prebuilt import create_react_agent\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"@custom_tool\n",
|
||||||
|
"def execute_code(code: str) -> str:\n",
|
||||||
|
" \"\"\"Execute python code.\"\"\"\n",
|
||||||
|
" return \"27\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"llm = ChatOpenAI(model=\"gpt-5\", output_version=\"responses/v1\")\n",
|
||||||
|
"\n",
|
||||||
|
"agent = create_react_agent(llm, [execute_code])\n",
|
||||||
|
"\n",
|
||||||
|
"input_message = {\"role\": \"user\", \"content\": \"Use the tool to calculate 3^3.\"}\n",
|
||||||
|
"for step in agent.stream(\n",
|
||||||
|
" {\"messages\": [input_message]},\n",
|
||||||
|
" stream_mode=\"values\",\n",
|
||||||
|
"):\n",
|
||||||
|
" step[\"messages\"][-1].pretty_print()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "5ef93be6-6d4c-4eea-acfd-248774074082",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"<details>\n",
|
||||||
|
"<summary>Context-free grammars</summary>\n",
|
||||||
|
"\n",
|
||||||
|
"OpenAI supports the specification of a [context-free grammar](https://platform.openai.com/docs/guides/function-calling#context-free-grammars) for custom tool inputs in `lark` or `regex` format. See [OpenAI docs](https://platform.openai.com/docs/guides/function-calling#context-free-grammars) for details. The `format` parameter can be passed into `@custom_tool` as shown below:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "2ae04586-be33-49c6-8947-7867801d868f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||||
|
"\n",
|
||||||
|
"Use the tool to calculate 3^3.\n",
|
||||||
|
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||||
|
"\n",
|
||||||
|
"[{'id': 'rs_689500828a8481a297ff0f98e328689c0681550c89797f43', 'summary': [], 'type': 'reasoning'}, {'call_id': 'call_jzH01RVhu6EFz7yUrOFXX55s', 'input': '3 * 3 * 3', 'name': 'do_math', 'type': 'custom_tool_call', 'id': 'ctc_6895008d57bc81a2b84d0993517a66b90681550c89797f43', 'status': 'completed'}]\n",
|
||||||
|
"Tool Calls:\n",
|
||||||
|
" do_math (call_jzH01RVhu6EFz7yUrOFXX55s)\n",
|
||||||
|
" Call ID: call_jzH01RVhu6EFz7yUrOFXX55s\n",
|
||||||
|
" Args:\n",
|
||||||
|
" __arg1: 3 * 3 * 3\n",
|
||||||
|
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||||
|
"Name: do_math\n",
|
||||||
|
"\n",
|
||||||
|
"[{'type': 'custom_tool_call_output', 'output': '27'}]\n",
|
||||||
|
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||||
|
"\n",
|
||||||
|
"[{'type': 'text', 'text': '27', 'annotations': [], 'id': 'msg_6895009776b881a2a25f0be8507d08f20681550c89797f43'}]\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain_openai import ChatOpenAI, custom_tool\n",
|
||||||
|
"from langgraph.prebuilt import create_react_agent\n",
|
||||||
|
"\n",
|
||||||
|
"grammar = \"\"\"\n",
|
||||||
|
"start: expr\n",
|
||||||
|
"expr: term (SP ADD SP term)* -> add\n",
|
||||||
|
"| term\n",
|
||||||
|
"term: factor (SP MUL SP factor)* -> mul\n",
|
||||||
|
"| factor\n",
|
||||||
|
"factor: INT\n",
|
||||||
|
"SP: \" \"\n",
|
||||||
|
"ADD: \"+\"\n",
|
||||||
|
"MUL: \"*\"\n",
|
||||||
|
"%import common.INT\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"format_ = {\"type\": \"grammar\", \"syntax\": \"lark\", \"definition\": grammar}\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# highlight-next-line\n",
|
||||||
|
"@custom_tool(format=format_)\n",
|
||||||
|
"def do_math(input_string: str) -> str:\n",
|
||||||
|
" \"\"\"Do a mathematical operation.\"\"\"\n",
|
||||||
|
" return \"27\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"llm = ChatOpenAI(model=\"gpt-5\", output_version=\"responses/v1\")\n",
|
||||||
|
"\n",
|
||||||
|
"agent = create_react_agent(llm, [do_math])\n",
|
||||||
|
"\n",
|
||||||
|
"input_message = {\"role\": \"user\", \"content\": \"Use the tool to calculate 3^3.\"}\n",
|
||||||
|
"for step in agent.stream(\n",
|
||||||
|
" {\"messages\": [input_message]},\n",
|
||||||
|
" stream_mode=\"values\",\n",
|
||||||
|
"):\n",
|
||||||
|
" step[\"messages\"][-1].pretty_print()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "c63430c9-c7b0-4e92-a491-3f165dddeb8f",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"</details>"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "84833dd0-17e9-4269-82ed-550639d65751",
|
"id": "84833dd0-17e9-4269-82ed-550639d65751",
|
||||||
|
@ -75,7 +75,14 @@ if TYPE_CHECKING:
|
|||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
|
|
||||||
FILTERED_ARGS = ("run_manager", "callbacks")
|
FILTERED_ARGS = ("run_manager", "callbacks")
|
||||||
TOOL_MESSAGE_BLOCK_TYPES = ("text", "image_url", "image", "json", "search_result")
|
TOOL_MESSAGE_BLOCK_TYPES = (
|
||||||
|
"text",
|
||||||
|
"image_url",
|
||||||
|
"image",
|
||||||
|
"json",
|
||||||
|
"search_result",
|
||||||
|
"custom_tool_call_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class SchemaAnnotationError(TypeError):
|
class SchemaAnnotationError(TypeError):
|
||||||
|
@ -575,12 +575,23 @@ def convert_to_openai_tool(
|
|||||||
|
|
||||||
Added support for OpenAI's image generation built-in tool.
|
Added support for OpenAI's image generation built-in tool.
|
||||||
"""
|
"""
|
||||||
|
from langchain_core.tools import Tool
|
||||||
|
|
||||||
if isinstance(tool, dict):
|
if isinstance(tool, dict):
|
||||||
if tool.get("type") in _WellKnownOpenAITools:
|
if tool.get("type") in _WellKnownOpenAITools:
|
||||||
return tool
|
return tool
|
||||||
# As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11"
|
# As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11"
|
||||||
if (tool.get("type") or "").startswith("web_search_preview"):
|
if (tool.get("type") or "").startswith("web_search_preview"):
|
||||||
return tool
|
return tool
|
||||||
|
if isinstance(tool, Tool) and (tool.metadata or {}).get("type") == "custom_tool":
|
||||||
|
oai_tool = {
|
||||||
|
"type": "custom",
|
||||||
|
"name": tool.name,
|
||||||
|
"description": tool.description,
|
||||||
|
}
|
||||||
|
if tool.metadata is not None and "format" in tool.metadata:
|
||||||
|
oai_tool["format"] = tool.metadata["format"]
|
||||||
|
return oai_tool
|
||||||
oai_function = convert_to_openai_function(tool, strict=strict)
|
oai_function = convert_to_openai_function(tool, strict=strict)
|
||||||
return {"type": "function", "function": oai_function}
|
return {"type": "function", "function": oai_function}
|
||||||
|
|
||||||
|
@ -85,6 +85,32 @@ def test_init_unknown_provider() -> None:
|
|||||||
clear=True,
|
clear=True,
|
||||||
)
|
)
|
||||||
def test_configurable() -> None:
|
def test_configurable() -> None:
|
||||||
|
"""Test configurable chat model behavior without default parameters.
|
||||||
|
|
||||||
|
Verifies that a configurable chat model initialized without default parameters:
|
||||||
|
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
|
||||||
|
- Blocks access to non-configurable methods until configuration is provided
|
||||||
|
- Supports declarative operations (``bind_tools``) without mutating original model
|
||||||
|
- Can chain declarative operations and configuration to access full functionality
|
||||||
|
- Properly resolves to the configured model type when parameters are provided
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. python::
|
||||||
|
|
||||||
|
# This creates a configurable model without specifying which model
|
||||||
|
model = init_chat_model()
|
||||||
|
|
||||||
|
# This will FAIL - no model specified yet
|
||||||
|
model.get_num_tokens("hello") # AttributeError!
|
||||||
|
|
||||||
|
# This works - provides model at runtime
|
||||||
|
response = model.invoke(
|
||||||
|
"Hello",
|
||||||
|
config={"configurable": {"model": "gpt-4o"}}
|
||||||
|
)
|
||||||
|
|
||||||
|
"""
|
||||||
model = init_chat_model()
|
model = init_chat_model()
|
||||||
|
|
||||||
for method in (
|
for method in (
|
||||||
@ -142,6 +168,7 @@ def test_configurable() -> None:
|
|||||||
"presence_penalty": None,
|
"presence_penalty": None,
|
||||||
"reasoning": None,
|
"reasoning": None,
|
||||||
"reasoning_effort": None,
|
"reasoning_effort": None,
|
||||||
|
"verbosity": None,
|
||||||
"frequency_penalty": None,
|
"frequency_penalty": None,
|
||||||
"include": None,
|
"include": None,
|
||||||
"seed": None,
|
"seed": None,
|
||||||
@ -187,6 +214,32 @@ def test_configurable() -> None:
|
|||||||
clear=True,
|
clear=True,
|
||||||
)
|
)
|
||||||
def test_configurable_with_default() -> None:
|
def test_configurable_with_default() -> None:
|
||||||
|
"""Test configurable chat model behavior with default parameters.
|
||||||
|
|
||||||
|
Verifies that a configurable chat model initialized with default parameters:
|
||||||
|
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
|
||||||
|
- Provides immediate access to non-configurable methods (e.g. ``get_num_tokens``)
|
||||||
|
- Supports model switching through runtime configuration using ``config_prefix``
|
||||||
|
- Maintains proper model identity and attributes when reconfigured
|
||||||
|
- Can be used in chains with different model providers via configuration
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. python::
|
||||||
|
|
||||||
|
# This creates a configurable model with default parameters (model)
|
||||||
|
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
|
||||||
|
|
||||||
|
# This works immediately - uses default gpt-4o
|
||||||
|
tokens = model.get_num_tokens("hello")
|
||||||
|
|
||||||
|
# This also works - switches to Claude at runtime
|
||||||
|
response = model.invoke(
|
||||||
|
"Hello",
|
||||||
|
config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
|
||||||
|
)
|
||||||
|
|
||||||
|
""" # noqa: E501
|
||||||
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
|
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
|
||||||
for method in (
|
for method in (
|
||||||
"invoke",
|
"invoke",
|
||||||
|
@ -68,6 +68,32 @@ def test_init_unknown_provider() -> None:
|
|||||||
clear=True,
|
clear=True,
|
||||||
)
|
)
|
||||||
def test_configurable() -> None:
|
def test_configurable() -> None:
|
||||||
|
"""Test configurable chat model behavior without default parameters.
|
||||||
|
|
||||||
|
Verifies that a configurable chat model initialized without default parameters:
|
||||||
|
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
|
||||||
|
- Blocks access to non-configurable methods until configuration is provided
|
||||||
|
- Supports declarative operations (``bind_tools``) without mutating original model
|
||||||
|
- Can chain declarative operations and configuration to access full functionality
|
||||||
|
- Properly resolves to the configured model type when parameters are provided
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. python::
|
||||||
|
|
||||||
|
# This creates a configurable model without specifying which model
|
||||||
|
model = init_chat_model()
|
||||||
|
|
||||||
|
# This will FAIL - no model specified yet
|
||||||
|
model.get_num_tokens("hello") # AttributeError!
|
||||||
|
|
||||||
|
# This works - provides model at runtime
|
||||||
|
response = model.invoke(
|
||||||
|
"Hello",
|
||||||
|
config={"configurable": {"model": "gpt-4o"}}
|
||||||
|
)
|
||||||
|
|
||||||
|
"""
|
||||||
model = init_chat_model()
|
model = init_chat_model()
|
||||||
|
|
||||||
for method in (
|
for method in (
|
||||||
@ -125,6 +151,7 @@ def test_configurable() -> None:
|
|||||||
"presence_penalty": None,
|
"presence_penalty": None,
|
||||||
"reasoning": None,
|
"reasoning": None,
|
||||||
"reasoning_effort": None,
|
"reasoning_effort": None,
|
||||||
|
"verbosity": None,
|
||||||
"frequency_penalty": None,
|
"frequency_penalty": None,
|
||||||
"include": None,
|
"include": None,
|
||||||
"seed": None,
|
"seed": None,
|
||||||
@ -170,6 +197,32 @@ def test_configurable() -> None:
|
|||||||
clear=True,
|
clear=True,
|
||||||
)
|
)
|
||||||
def test_configurable_with_default() -> None:
|
def test_configurable_with_default() -> None:
|
||||||
|
"""Test configurable chat model behavior with default parameters.
|
||||||
|
|
||||||
|
Verifies that a configurable chat model initialized with default parameters:
|
||||||
|
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
|
||||||
|
- Provides immediate access to non-configurable methods (e.g. ``get_num_tokens``)
|
||||||
|
- Supports model switching through runtime configuration using ``config_prefix``
|
||||||
|
- Maintains proper model identity and attributes when reconfigured
|
||||||
|
- Can be used in chains with different model providers via configuration
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. python::
|
||||||
|
|
||||||
|
# This creates a configurable model with default parameters (model)
|
||||||
|
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
|
||||||
|
|
||||||
|
# This works immediately - uses default gpt-4o
|
||||||
|
tokens = model.get_num_tokens("hello")
|
||||||
|
|
||||||
|
# This also works - switches to Claude at runtime
|
||||||
|
response = model.invoke(
|
||||||
|
"Hello",
|
||||||
|
config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
|
||||||
|
)
|
||||||
|
|
||||||
|
""" # noqa: E501
|
||||||
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
|
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
|
||||||
for method in (
|
for method in (
|
||||||
"invoke",
|
"invoke",
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from langchain_openai.chat_models import AzureChatOpenAI, ChatOpenAI
|
from langchain_openai.chat_models import AzureChatOpenAI, ChatOpenAI
|
||||||
from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
|
from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
|
||||||
from langchain_openai.llms import AzureOpenAI, OpenAI
|
from langchain_openai.llms import AzureOpenAI, OpenAI
|
||||||
|
from langchain_openai.tools import custom_tool
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"OpenAI",
|
"OpenAI",
|
||||||
@ -9,4 +10,5 @@ __all__ = [
|
|||||||
"AzureOpenAI",
|
"AzureOpenAI",
|
||||||
"AzureChatOpenAI",
|
"AzureChatOpenAI",
|
||||||
"AzureOpenAIEmbeddings",
|
"AzureOpenAIEmbeddings",
|
||||||
|
"custom_tool",
|
||||||
]
|
]
|
||||||
|
@ -458,8 +458,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None)
|
alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None)
|
||||||
)
|
)
|
||||||
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
|
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
|
||||||
"""Base URL path for API requests, leave blank if not using a proxy or service
|
"""Base URL path for API requests, leave blank if not using a proxy or service emulator.""" # noqa: E501
|
||||||
emulator."""
|
|
||||||
openai_organization: Optional[str] = Field(default=None, alias="organization")
|
openai_organization: Optional[str] = Field(default=None, alias="organization")
|
||||||
"""Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided."""
|
"""Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided."""
|
||||||
# to support explicit proxy for OpenAI
|
# to support explicit proxy for OpenAI
|
||||||
@ -507,8 +506,9 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
|
|
||||||
Reasoning models only, like OpenAI o1, o3, and o4-mini.
|
Reasoning models only, like OpenAI o1, o3, and o4-mini.
|
||||||
|
|
||||||
Currently supported values are low, medium, and high. Reducing reasoning effort
|
Currently supported values are ``'minimal'``, ``'low'``, ``'medium'``, and
|
||||||
can result in faster responses and fewer tokens used on reasoning in a response.
|
``'high'``. Reducing reasoning effort can result in faster responses and fewer
|
||||||
|
tokens used on reasoning in a response.
|
||||||
|
|
||||||
.. versionadded:: 0.2.14
|
.. versionadded:: 0.2.14
|
||||||
"""
|
"""
|
||||||
@ -527,6 +527,17 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
|
|
||||||
.. versionadded:: 0.3.24
|
.. versionadded:: 0.3.24
|
||||||
|
|
||||||
|
"""
|
||||||
|
verbosity: Optional[str] = None
|
||||||
|
"""Controls the verbosity level of responses for reasoning models. For use with the
|
||||||
|
Responses API.
|
||||||
|
|
||||||
|
Currently supported values are ``'low'``, ``'medium'``, and ``'high'``.
|
||||||
|
|
||||||
|
Controls how detailed the model's responses are.
|
||||||
|
|
||||||
|
.. versionadded:: 0.3.28
|
||||||
|
|
||||||
"""
|
"""
|
||||||
tiktoken_model_name: Optional[str] = None
|
tiktoken_model_name: Optional[str] = None
|
||||||
"""The model name to pass to tiktoken when using this class.
|
"""The model name to pass to tiktoken when using this class.
|
||||||
@ -654,6 +665,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="o4-mini",
|
model="o4-mini",
|
||||||
use_responses_api=True,
|
use_responses_api=True,
|
||||||
|
output_version="responses/v1",
|
||||||
)
|
)
|
||||||
llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123")
|
llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123")
|
||||||
|
|
||||||
@ -701,10 +713,24 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_temperature(cls, values: dict[str, Any]) -> Any:
|
def validate_temperature(cls, values: dict[str, Any]) -> Any:
|
||||||
"""Currently o1 models only allow temperature=1."""
|
"""Validate temperature parameter for different models.
|
||||||
|
|
||||||
|
- o1 models only allow temperature=1
|
||||||
|
- gpt-5 models only allow temperature=1 or unset (defaults to 1)
|
||||||
|
"""
|
||||||
model = values.get("model_name") or values.get("model") or ""
|
model = values.get("model_name") or values.get("model") or ""
|
||||||
|
|
||||||
|
# For o1 models, set temperature=1 if not provided
|
||||||
if model.startswith("o1") and "temperature" not in values:
|
if model.startswith("o1") and "temperature" not in values:
|
||||||
values["temperature"] = 1
|
values["temperature"] = 1
|
||||||
|
|
||||||
|
# For gpt-5 models, handle temperature restrictions
|
||||||
|
if model.startswith("gpt-5"):
|
||||||
|
temperature = values.get("temperature")
|
||||||
|
if temperature is not None and temperature != 1:
|
||||||
|
# For gpt-5, only temperature=1 is supported, so remove non-defaults
|
||||||
|
values.pop("temperature", None)
|
||||||
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
@ -805,6 +831,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
"temperature": self.temperature,
|
"temperature": self.temperature,
|
||||||
"reasoning_effort": self.reasoning_effort,
|
"reasoning_effort": self.reasoning_effort,
|
||||||
"reasoning": self.reasoning,
|
"reasoning": self.reasoning,
|
||||||
|
"verbosity": self.verbosity,
|
||||||
"include": self.include,
|
"include": self.include,
|
||||||
"service_tier": self.service_tier,
|
"service_tier": self.service_tier,
|
||||||
"truncation": self.truncation,
|
"truncation": self.truncation,
|
||||||
@ -1178,6 +1205,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
kwargs["stop"] = stop
|
kwargs["stop"] = stop
|
||||||
|
|
||||||
payload = {**self._default_params, **kwargs}
|
payload = {**self._default_params, **kwargs}
|
||||||
|
|
||||||
if self._use_responses_api(payload):
|
if self._use_responses_api(payload):
|
||||||
if self.use_previous_response_id:
|
if self.use_previous_response_id:
|
||||||
last_messages, previous_response_id = _get_last_messages(messages)
|
last_messages, previous_response_id = _get_last_messages(messages)
|
||||||
@ -1447,8 +1475,10 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
encoding = tiktoken.encoding_for_model(model)
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
encoder = "cl100k_base"
|
encoder = "cl100k_base"
|
||||||
if self.model_name.startswith("gpt-4o") or self.model_name.startswith(
|
if (
|
||||||
"gpt-4.1"
|
self.model_name.startswith("gpt-4o")
|
||||||
|
or self.model_name.startswith("gpt-4.1")
|
||||||
|
or self.model_name.startswith("gpt-5")
|
||||||
):
|
):
|
||||||
encoder = "o200k_base"
|
encoder = "o200k_base"
|
||||||
encoding = tiktoken.get_encoding(encoder)
|
encoding = tiktoken.get_encoding(encoder)
|
||||||
@ -1499,7 +1529,11 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
tokens_per_message = 4
|
tokens_per_message = 4
|
||||||
# if there's a name, the role is omitted
|
# if there's a name, the role is omitted
|
||||||
tokens_per_name = -1
|
tokens_per_name = -1
|
||||||
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"):
|
elif (
|
||||||
|
model.startswith("gpt-3.5-turbo")
|
||||||
|
or model.startswith("gpt-4")
|
||||||
|
or model.startswith("gpt-5")
|
||||||
|
):
|
||||||
tokens_per_message = 3
|
tokens_per_message = 3
|
||||||
tokens_per_name = 1
|
tokens_per_name = 1
|
||||||
else:
|
else:
|
||||||
@ -2360,7 +2394,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
|
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
|
|
||||||
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
|
llm = ChatOpenAI(
|
||||||
|
model="gpt-4.1-mini",
|
||||||
|
use_responses_api=True,
|
||||||
|
output_version="responses/v1",
|
||||||
|
)
|
||||||
response = llm.invoke("Hi, I'm Bob.")
|
response = llm.invoke("Hi, I'm Bob.")
|
||||||
response.text()
|
response.text()
|
||||||
|
|
||||||
@ -3480,6 +3518,11 @@ def _construct_responses_api_payload(
|
|||||||
if "reasoning_effort" in payload and "reasoning" not in payload:
|
if "reasoning_effort" in payload and "reasoning" not in payload:
|
||||||
payload["reasoning"] = {"effort": payload.pop("reasoning_effort")}
|
payload["reasoning"] = {"effort": payload.pop("reasoning_effort")}
|
||||||
|
|
||||||
|
# Remove temperature parameter for models that don't support it in responses API
|
||||||
|
model = payload.get("model", "")
|
||||||
|
if model.startswith("gpt-5"):
|
||||||
|
payload.pop("temperature", None)
|
||||||
|
|
||||||
payload["input"] = _construct_responses_api_input(messages)
|
payload["input"] = _construct_responses_api_input(messages)
|
||||||
if tools := payload.pop("tools", None):
|
if tools := payload.pop("tools", None):
|
||||||
new_tools: list = []
|
new_tools: list = []
|
||||||
@ -3582,6 +3625,20 @@ def _make_computer_call_output_from_message(message: ToolMessage) -> dict:
|
|||||||
return computer_call_output
|
return computer_call_output
|
||||||
|
|
||||||
|
|
||||||
|
def _make_custom_tool_output_from_message(message: ToolMessage) -> Optional[dict]:
|
||||||
|
custom_tool_output = None
|
||||||
|
for block in message.content:
|
||||||
|
if isinstance(block, dict) and block.get("type") == "custom_tool_call_output":
|
||||||
|
custom_tool_output = {
|
||||||
|
"type": "custom_tool_call_output",
|
||||||
|
"call_id": message.tool_call_id,
|
||||||
|
"output": block.get("output") or "",
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
return custom_tool_output
|
||||||
|
|
||||||
|
|
||||||
def _pop_index_and_sub_index(block: dict) -> dict:
|
def _pop_index_and_sub_index(block: dict) -> dict:
|
||||||
"""When streaming, langchain-core uses the ``index`` key to aggregate
|
"""When streaming, langchain-core uses the ``index`` key to aggregate
|
||||||
text blocks. OpenAI API does not support this key, so we need to remove it.
|
text blocks. OpenAI API does not support this key, so we need to remove it.
|
||||||
@ -3608,7 +3665,10 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
|||||||
msg.pop("name")
|
msg.pop("name")
|
||||||
if msg["role"] == "tool":
|
if msg["role"] == "tool":
|
||||||
tool_output = msg["content"]
|
tool_output = msg["content"]
|
||||||
if lc_msg.additional_kwargs.get("type") == "computer_call_output":
|
custom_tool_output = _make_custom_tool_output_from_message(lc_msg) # type: ignore[arg-type]
|
||||||
|
if custom_tool_output:
|
||||||
|
input_.append(custom_tool_output)
|
||||||
|
elif lc_msg.additional_kwargs.get("type") == "computer_call_output":
|
||||||
computer_call_output = _make_computer_call_output_from_message(
|
computer_call_output = _make_computer_call_output_from_message(
|
||||||
cast(ToolMessage, lc_msg)
|
cast(ToolMessage, lc_msg)
|
||||||
)
|
)
|
||||||
@ -3663,6 +3723,7 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
|||||||
"file_search_call",
|
"file_search_call",
|
||||||
"function_call",
|
"function_call",
|
||||||
"computer_call",
|
"computer_call",
|
||||||
|
"custom_tool_call",
|
||||||
"code_interpreter_call",
|
"code_interpreter_call",
|
||||||
"mcp_call",
|
"mcp_call",
|
||||||
"mcp_list_tools",
|
"mcp_list_tools",
|
||||||
@ -3690,7 +3751,8 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
|||||||
content_call_ids = {
|
content_call_ids = {
|
||||||
block["call_id"]
|
block["call_id"]
|
||||||
for block in input_
|
for block in input_
|
||||||
if block.get("type") == "function_call" and "call_id" in block
|
if block.get("type") in ("function_call", "custom_tool_call")
|
||||||
|
and "call_id" in block
|
||||||
}
|
}
|
||||||
for tool_call in tool_calls:
|
for tool_call in tool_calls:
|
||||||
if tool_call["id"] not in content_call_ids:
|
if tool_call["id"] not in content_call_ids:
|
||||||
@ -3841,6 +3903,15 @@ def _construct_lc_result_from_responses_api(
|
|||||||
"error": error,
|
"error": error,
|
||||||
}
|
}
|
||||||
invalid_tool_calls.append(tool_call)
|
invalid_tool_calls.append(tool_call)
|
||||||
|
elif output.type == "custom_tool_call":
|
||||||
|
content_blocks.append(output.model_dump(exclude_none=True, mode="json"))
|
||||||
|
tool_call = {
|
||||||
|
"type": "tool_call",
|
||||||
|
"name": output.name,
|
||||||
|
"args": {"__arg1": output.input},
|
||||||
|
"id": output.call_id,
|
||||||
|
}
|
||||||
|
tool_calls.append(tool_call)
|
||||||
elif output.type in (
|
elif output.type in (
|
||||||
"reasoning",
|
"reasoning",
|
||||||
"web_search_call",
|
"web_search_call",
|
||||||
@ -4044,6 +4115,23 @@ def _convert_responses_chunk_to_generation_chunk(
|
|||||||
tool_output = chunk.item.model_dump(exclude_none=True, mode="json")
|
tool_output = chunk.item.model_dump(exclude_none=True, mode="json")
|
||||||
tool_output["index"] = current_index
|
tool_output["index"] = current_index
|
||||||
content.append(tool_output)
|
content.append(tool_output)
|
||||||
|
elif (
|
||||||
|
chunk.type == "response.output_item.done"
|
||||||
|
and chunk.item.type == "custom_tool_call"
|
||||||
|
):
|
||||||
|
_advance(chunk.output_index)
|
||||||
|
tool_output = chunk.item.model_dump(exclude_none=True, mode="json")
|
||||||
|
tool_output["index"] = current_index
|
||||||
|
content.append(tool_output)
|
||||||
|
tool_call_chunks.append(
|
||||||
|
{
|
||||||
|
"type": "tool_call_chunk",
|
||||||
|
"name": chunk.item.name,
|
||||||
|
"args": json.dumps({"__arg1": chunk.item.input}),
|
||||||
|
"id": chunk.item.call_id,
|
||||||
|
"index": current_index,
|
||||||
|
}
|
||||||
|
)
|
||||||
elif chunk.type == "response.function_call_arguments.delta":
|
elif chunk.type == "response.function_call_arguments.delta":
|
||||||
_advance(chunk.output_index)
|
_advance(chunk.output_index)
|
||||||
tool_call_chunks.append(
|
tool_call_chunks.append(
|
||||||
|
3
libs/partners/openai/langchain_openai/tools/__init__.py
Normal file
3
libs/partners/openai/langchain_openai/tools/__init__.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
from langchain_openai.tools.custom_tool import custom_tool
|
||||||
|
|
||||||
|
__all__ = ["custom_tool"]
|
109
libs/partners/openai/langchain_openai/tools/custom_tool.py
Normal file
109
libs/partners/openai/langchain_openai/tools/custom_tool.py
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
import inspect
|
||||||
|
from collections.abc import Awaitable
|
||||||
|
from typing import Any, Callable
|
||||||
|
|
||||||
|
from langchain_core.tools import tool
|
||||||
|
|
||||||
|
|
||||||
|
def _make_wrapped_func(func: Callable[..., str]) -> Callable[..., list[dict[str, Any]]]:
|
||||||
|
def wrapped(x: str) -> list[dict[str, Any]]:
|
||||||
|
return [{"type": "custom_tool_call_output", "output": func(x)}]
|
||||||
|
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
|
def _make_wrapped_coroutine(
|
||||||
|
coroutine: Callable[..., Awaitable[str]],
|
||||||
|
) -> Callable[..., Awaitable[list[dict[str, Any]]]]:
|
||||||
|
async def wrapped(*args: Any, **kwargs: Any) -> list[dict[str, Any]]:
|
||||||
|
result = await coroutine(*args, **kwargs)
|
||||||
|
return [{"type": "custom_tool_call_output", "output": result}]
|
||||||
|
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
|
def custom_tool(*args: Any, **kwargs: Any) -> Any:
|
||||||
|
"""Decorator to create an OpenAI custom tool.
|
||||||
|
|
||||||
|
Custom tools allow for tools with (potentially long) freeform string inputs.
|
||||||
|
|
||||||
|
See below for an example using LangGraph:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
@custom_tool
|
||||||
|
def execute_code(code: str) -> str:
|
||||||
|
\"\"\"Execute python code.\"\"\"
|
||||||
|
return "27"
|
||||||
|
|
||||||
|
|
||||||
|
llm = ChatOpenAI(model="gpt-5", output_version="responses/v1")
|
||||||
|
|
||||||
|
agent = create_react_agent(llm, [execute_code])
|
||||||
|
|
||||||
|
input_message = {"role": "user", "content": "Use the tool to calculate 3^3."}
|
||||||
|
for step in agent.stream(
|
||||||
|
{"messages": [input_message]},
|
||||||
|
stream_mode="values",
|
||||||
|
):
|
||||||
|
step["messages"][-1].pretty_print()
|
||||||
|
|
||||||
|
You can also specify a format for a corresponding context-free grammar using the
|
||||||
|
``format`` kwarg:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
from langchain_openai import ChatOpenAI, custom_tool
|
||||||
|
from langgraph.prebuilt import create_react_agent
|
||||||
|
|
||||||
|
grammar = \"\"\"
|
||||||
|
start: expr
|
||||||
|
expr: term (SP ADD SP term)* -> add
|
||||||
|
| term
|
||||||
|
term: factor (SP MUL SP factor)* -> mul
|
||||||
|
| factor
|
||||||
|
factor: INT
|
||||||
|
SP: " "
|
||||||
|
ADD: "+"
|
||||||
|
MUL: "*"
|
||||||
|
%import common.INT
|
||||||
|
\"\"\"
|
||||||
|
|
||||||
|
format = {"type": "grammar", "syntax": "lark", "definition": grammar}
|
||||||
|
|
||||||
|
# highlight-next-line
|
||||||
|
@custom_tool(format=format)
|
||||||
|
def do_math(input_string: str) -> str:
|
||||||
|
\"\"\"Do a mathematical operation.\"\"\"
|
||||||
|
return "27"
|
||||||
|
|
||||||
|
|
||||||
|
llm = ChatOpenAI(model="gpt-5", output_version="responses/v1")
|
||||||
|
|
||||||
|
agent = create_react_agent(llm, [do_math])
|
||||||
|
|
||||||
|
input_message = {"role": "user", "content": "Use the tool to calculate 3^3."}
|
||||||
|
for step in agent.stream(
|
||||||
|
{"messages": [input_message]},
|
||||||
|
stream_mode="values",
|
||||||
|
):
|
||||||
|
step["messages"][-1].pretty_print()
|
||||||
|
"""
|
||||||
|
|
||||||
|
def decorator(func: Callable[..., Any]) -> Any:
|
||||||
|
metadata = {"type": "custom_tool"}
|
||||||
|
if "format" in kwargs:
|
||||||
|
metadata["format"] = kwargs.pop("format")
|
||||||
|
tool_obj = tool(infer_schema=False, **kwargs)(func)
|
||||||
|
tool_obj.metadata = metadata
|
||||||
|
tool_obj.description = func.__doc__
|
||||||
|
if inspect.iscoroutinefunction(func):
|
||||||
|
tool_obj.coroutine = _make_wrapped_coroutine(func)
|
||||||
|
else:
|
||||||
|
tool_obj.func = _make_wrapped_func(func)
|
||||||
|
return tool_obj
|
||||||
|
|
||||||
|
if args and callable(args[0]) and not kwargs:
|
||||||
|
return decorator(args[0])
|
||||||
|
|
||||||
|
return decorator
|
BIN
libs/partners/openai/tests/cassettes/test_custom_tool.yaml.gz
Normal file
BIN
libs/partners/openai/tests/cassettes/test_custom_tool.yaml.gz
Normal file
Binary file not shown.
@ -32,7 +32,7 @@ from pydantic import BaseModel, Field
|
|||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
from tests.unit_tests.fake.callbacks import FakeCallbackHandler
|
from tests.unit_tests.fake.callbacks import FakeCallbackHandler
|
||||||
|
|
||||||
MAX_TOKEN_COUNT = 16
|
MAX_TOKEN_COUNT = 100
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@ -219,7 +219,7 @@ async def test_openai_abatch_tags(use_responses_api: bool) -> None:
|
|||||||
def test_openai_invoke() -> None:
|
def test_openai_invoke() -> None:
|
||||||
"""Test invoke tokens from ChatOpenAI."""
|
"""Test invoke tokens from ChatOpenAI."""
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="o4-mini",
|
model="gpt-5-nano",
|
||||||
service_tier="flex", # Also test service_tier
|
service_tier="flex", # Also test service_tier
|
||||||
max_retries=3, # Add retries for 503 capacity errors
|
max_retries=3, # Add retries for 503 capacity errors
|
||||||
)
|
)
|
||||||
@ -418,7 +418,7 @@ class MakeASandwich(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
def test_tool_use() -> None:
|
def test_tool_use() -> None:
|
||||||
llm = ChatOpenAI(model="gpt-4-turbo", temperature=0)
|
llm = ChatOpenAI(model="gpt-5-nano", temperature=0)
|
||||||
llm_with_tool = llm.bind_tools(tools=[GenerateUsername], tool_choice=True)
|
llm_with_tool = llm.bind_tools(tools=[GenerateUsername], tool_choice=True)
|
||||||
msgs: list = [HumanMessage("Sally has green hair, what would her username be?")]
|
msgs: list = [HumanMessage("Sally has green hair, what would her username be?")]
|
||||||
ai_msg = llm_with_tool.invoke(msgs)
|
ai_msg = llm_with_tool.invoke(msgs)
|
||||||
@ -462,7 +462,7 @@ def test_tool_use() -> None:
|
|||||||
def test_manual_tool_call_msg(use_responses_api: bool) -> None:
|
def test_manual_tool_call_msg(use_responses_api: bool) -> None:
|
||||||
"""Test passing in manually construct tool call message."""
|
"""Test passing in manually construct tool call message."""
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="gpt-3.5-turbo-0125", temperature=0, use_responses_api=use_responses_api
|
model="gpt-5-nano", temperature=0, use_responses_api=use_responses_api
|
||||||
)
|
)
|
||||||
llm_with_tool = llm.bind_tools(tools=[GenerateUsername])
|
llm_with_tool = llm.bind_tools(tools=[GenerateUsername])
|
||||||
msgs: list = [
|
msgs: list = [
|
||||||
@ -510,7 +510,7 @@ def test_manual_tool_call_msg(use_responses_api: bool) -> None:
|
|||||||
def test_bind_tools_tool_choice(use_responses_api: bool) -> None:
|
def test_bind_tools_tool_choice(use_responses_api: bool) -> None:
|
||||||
"""Test passing in manually construct tool call message."""
|
"""Test passing in manually construct tool call message."""
|
||||||
llm = ChatOpenAI(
|
llm = ChatOpenAI(
|
||||||
model="gpt-3.5-turbo-0125", temperature=0, use_responses_api=use_responses_api
|
model="gpt-5-nano", temperature=0, use_responses_api=use_responses_api
|
||||||
)
|
)
|
||||||
for tool_choice in ("any", "required"):
|
for tool_choice in ("any", "required"):
|
||||||
llm_with_tools = llm.bind_tools(
|
llm_with_tools = llm.bind_tools(
|
||||||
@ -525,7 +525,7 @@ def test_bind_tools_tool_choice(use_responses_api: bool) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def test_disable_parallel_tool_calling() -> None:
|
def test_disable_parallel_tool_calling() -> None:
|
||||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
llm = ChatOpenAI(model="gpt-5-nano")
|
||||||
llm_with_tools = llm.bind_tools([GenerateUsername], parallel_tool_calls=False)
|
llm_with_tools = llm.bind_tools([GenerateUsername], parallel_tool_calls=False)
|
||||||
result = llm_with_tools.invoke(
|
result = llm_with_tools.invoke(
|
||||||
"Use the GenerateUsername tool to generate user names for:\n\n"
|
"Use the GenerateUsername tool to generate user names for:\n\n"
|
||||||
@ -536,7 +536,7 @@ def test_disable_parallel_tool_calling() -> None:
|
|||||||
assert len(result.tool_calls) == 1
|
assert len(result.tool_calls) == 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("model", ["gpt-4o-mini", "o1", "gpt-4"])
|
@pytest.mark.parametrize("model", ["gpt-4o-mini", "o1", "gpt-4", "gpt-5-nano"])
|
||||||
def test_openai_structured_output(model: str) -> None:
|
def test_openai_structured_output(model: str) -> None:
|
||||||
class MyModel(BaseModel):
|
class MyModel(BaseModel):
|
||||||
"""A Person"""
|
"""A Person"""
|
||||||
@ -696,7 +696,7 @@ def test_tool_calling_strict(use_responses_api: bool) -> None:
|
|||||||
input: Optional[int] = Field(default=None)
|
input: Optional[int] = Field(default=None)
|
||||||
|
|
||||||
model = ChatOpenAI(
|
model = ChatOpenAI(
|
||||||
model="gpt-4.1", temperature=0, use_responses_api=use_responses_api
|
model="gpt-5-nano", temperature=0, use_responses_api=use_responses_api
|
||||||
)
|
)
|
||||||
# N.B. magic_function adds metadata to schema (min/max for number fields)
|
# N.B. magic_function adds metadata to schema (min/max for number fields)
|
||||||
model_with_tools = model.bind_tools([magic_function], strict=True)
|
model_with_tools = model.bind_tools([magic_function], strict=True)
|
||||||
@ -820,7 +820,7 @@ def test_json_schema_openai_format(
|
|||||||
strict: bool, method: Literal["json_schema", "function_calling"]
|
strict: bool, method: Literal["json_schema", "function_calling"]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test we can pass in OpenAI schema format specifying strict."""
|
"""Test we can pass in OpenAI schema format specifying strict."""
|
||||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
llm = ChatOpenAI(model="gpt-5-nano")
|
||||||
schema = {
|
schema = {
|
||||||
"name": "get_weather",
|
"name": "get_weather",
|
||||||
"description": "Fetches the weather in the given location",
|
"description": "Fetches the weather in the given location",
|
||||||
@ -941,7 +941,7 @@ def test_prediction_tokens() -> None:
|
|||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
llm = ChatOpenAI(model="gpt-4o")
|
llm = ChatOpenAI(model="gpt-4.1-nano")
|
||||||
query = (
|
query = (
|
||||||
"Replace the Username property with an Email property. "
|
"Replace the Username property with an Email property. "
|
||||||
"Respond only with code, and with no markdown formatting."
|
"Respond only with code, and with no markdown formatting."
|
||||||
@ -983,7 +983,7 @@ class Foo(BaseModel):
|
|||||||
def test_stream_response_format() -> None:
|
def test_stream_response_format() -> None:
|
||||||
full: Optional[BaseMessageChunk] = None
|
full: Optional[BaseMessageChunk] = None
|
||||||
chunks = []
|
chunks = []
|
||||||
for chunk in ChatOpenAI(model="gpt-4o-mini").stream(
|
for chunk in ChatOpenAI(model="gpt-5-nano").stream(
|
||||||
"how are ya", response_format=Foo
|
"how are ya", response_format=Foo
|
||||||
):
|
):
|
||||||
chunks.append(chunk)
|
chunks.append(chunk)
|
||||||
@ -1000,7 +1000,7 @@ def test_stream_response_format() -> None:
|
|||||||
async def test_astream_response_format() -> None:
|
async def test_astream_response_format() -> None:
|
||||||
full: Optional[BaseMessageChunk] = None
|
full: Optional[BaseMessageChunk] = None
|
||||||
chunks = []
|
chunks = []
|
||||||
async for chunk in ChatOpenAI(model="gpt-4o-mini").astream(
|
async for chunk in ChatOpenAI(model="gpt-5-nano").astream(
|
||||||
"how are ya", response_format=Foo
|
"how are ya", response_format=Foo
|
||||||
):
|
):
|
||||||
chunks.append(chunk)
|
chunks.append(chunk)
|
||||||
@ -1044,7 +1044,7 @@ def test_o1_stream_default_works() -> None:
|
|||||||
|
|
||||||
|
|
||||||
def test_multi_party_conversation() -> None:
|
def test_multi_party_conversation() -> None:
|
||||||
llm = ChatOpenAI(model="gpt-4o")
|
llm = ChatOpenAI(model="gpt-5-nano")
|
||||||
messages = [
|
messages = [
|
||||||
HumanMessage("Hi, I have black hair.", name="Alice"),
|
HumanMessage("Hi, I have black hair.", name="Alice"),
|
||||||
HumanMessage("Hi, I have brown hair.", name="Bob"),
|
HumanMessage("Hi, I have brown hair.", name="Bob"),
|
||||||
@ -1059,7 +1059,7 @@ def test_structured_output_and_tools() -> None:
|
|||||||
response: str
|
response: str
|
||||||
explanation: str
|
explanation: str
|
||||||
|
|
||||||
llm = ChatOpenAI(model="gpt-4o-mini").bind_tools(
|
llm = ChatOpenAI(model="gpt-5-nano").bind_tools(
|
||||||
[GenerateUsername], strict=True, response_format=ResponseFormat
|
[GenerateUsername], strict=True, response_format=ResponseFormat
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1084,7 +1084,7 @@ def test_tools_and_structured_output() -> None:
|
|||||||
response: str
|
response: str
|
||||||
explanation: str
|
explanation: str
|
||||||
|
|
||||||
llm = ChatOpenAI(model="gpt-4o-mini").with_structured_output(
|
llm = ChatOpenAI(model="gpt-5-nano").with_structured_output(
|
||||||
ResponseFormat, strict=True, include_raw=True, tools=[GenerateUsername]
|
ResponseFormat, strict=True, include_raw=True, tools=[GenerateUsername]
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1116,8 +1116,8 @@ def test_tools_and_structured_output() -> None:
|
|||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
def test_prompt_cache_key_invoke() -> None:
|
def test_prompt_cache_key_invoke() -> None:
|
||||||
"""Test that prompt_cache_key works with invoke calls."""
|
"""Test that `prompt_cache_key` works with invoke calls."""
|
||||||
chat = ChatOpenAI(model="gpt-4o-mini", max_completion_tokens=20)
|
chat = ChatOpenAI(model="gpt-5-nano", max_completion_tokens=500)
|
||||||
messages = [HumanMessage("Say hello")]
|
messages = [HumanMessage("Say hello")]
|
||||||
|
|
||||||
# Test that invoke works with prompt_cache_key parameter
|
# Test that invoke works with prompt_cache_key parameter
|
||||||
@ -1137,18 +1137,18 @@ def test_prompt_cache_key_invoke() -> None:
|
|||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
def test_prompt_cache_key_usage_methods_integration() -> None:
|
def test_prompt_cache_key_usage_methods_integration() -> None:
|
||||||
"""Integration test for prompt_cache_key usage methods."""
|
"""Integration test for `prompt_cache_key` usage methods."""
|
||||||
messages = [HumanMessage("Say hi")]
|
messages = [HumanMessage("Say hi")]
|
||||||
|
|
||||||
# Test keyword argument method
|
# Test keyword argument method
|
||||||
chat = ChatOpenAI(model="gpt-4o-mini", max_completion_tokens=10)
|
chat = ChatOpenAI(model="gpt-5-nano", max_completion_tokens=10)
|
||||||
response = chat.invoke(messages, prompt_cache_key="integration-test-v1")
|
response = chat.invoke(messages, prompt_cache_key="integration-test-v1")
|
||||||
assert isinstance(response, AIMessage)
|
assert isinstance(response, AIMessage)
|
||||||
assert isinstance(response.content, str)
|
assert isinstance(response.content, str)
|
||||||
|
|
||||||
# Test model-level via model_kwargs
|
# Test model-level via model_kwargs
|
||||||
chat_model_level = ChatOpenAI(
|
chat_model_level = ChatOpenAI(
|
||||||
model="gpt-4o-mini",
|
model="gpt-5-nano",
|
||||||
max_completion_tokens=10,
|
max_completion_tokens=10,
|
||||||
model_kwargs={"prompt_cache_key": "integration-model-level-v1"},
|
model_kwargs={"prompt_cache_key": "integration-model-level-v1"},
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ from langchain_core.v1.messages import HumanMessage as HumanMessageV1
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI, custom_tool
|
||||||
from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1
|
from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1
|
||||||
|
|
||||||
MODEL_NAME = "gpt-4o-mini"
|
MODEL_NAME = "gpt-4o-mini"
|
||||||
@ -1130,3 +1130,56 @@ def test_image_generation_multi_turn_v1() -> None:
|
|||||||
if isinstance(block, dict) and block["type"] == "image"
|
if isinstance(block, dict) and block["type"] == "image"
|
||||||
)
|
)
|
||||||
assert set(expected_keys).issubset(tool_output.keys())
|
assert set(expected_keys).issubset(tool_output.keys())
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.xfail(
|
||||||
|
reason="verbosity parameter not yet supported by OpenAI Responses API"
|
||||||
|
)
|
||||||
|
def test_verbosity_parameter() -> None:
|
||||||
|
"""Test verbosity parameter with Responses API.
|
||||||
|
|
||||||
|
TODO: This test is expected to fail until OpenAI enables verbosity support
|
||||||
|
in the Responses API for available models. The parameter is properly implemented
|
||||||
|
in the codebase but the API currently returns 'Unknown parameter: verbosity'.
|
||||||
|
Remove @pytest.mark.xfail when OpenAI adds support.
|
||||||
|
"""
|
||||||
|
llm = ChatOpenAI(
|
||||||
|
model=MODEL_NAME,
|
||||||
|
verbosity="medium",
|
||||||
|
use_responses_api=True,
|
||||||
|
output_version="responses/v1",
|
||||||
|
)
|
||||||
|
response = llm.invoke([HumanMessage(content="Hello, explain quantum computing.")])
|
||||||
|
|
||||||
|
assert isinstance(response, AIMessage)
|
||||||
|
assert response.content
|
||||||
|
# When verbosity works, we expect the response to respect the verbosity level
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr()
|
||||||
|
def test_custom_tool() -> None:
|
||||||
|
@custom_tool
|
||||||
|
def execute_code(code: str) -> str:
|
||||||
|
"""Execute python code."""
|
||||||
|
return "27"
|
||||||
|
|
||||||
|
llm = ChatOpenAI(model="gpt-5", output_version="responses/v1").bind_tools(
|
||||||
|
[execute_code]
|
||||||
|
)
|
||||||
|
|
||||||
|
input_message = {"role": "user", "content": "Use the tool to evaluate 3^3."}
|
||||||
|
tool_call_message = llm.invoke([input_message])
|
||||||
|
assert isinstance(tool_call_message, AIMessage)
|
||||||
|
assert len(tool_call_message.tool_calls) == 1
|
||||||
|
tool_call = tool_call_message.tool_calls[0]
|
||||||
|
tool_message = execute_code.invoke(tool_call)
|
||||||
|
response = llm.invoke([input_message, tool_call_message, tool_message])
|
||||||
|
assert isinstance(response, AIMessage)
|
||||||
|
|
||||||
|
# Test streaming
|
||||||
|
full: Optional[BaseMessageChunk] = None
|
||||||
|
for chunk in llm.stream([input_message]):
|
||||||
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
|
full = chunk if full is None else full + chunk
|
||||||
|
assert isinstance(full, AIMessageChunk)
|
||||||
|
assert len(full.tool_calls) == 1
|
||||||
|
@ -879,8 +879,13 @@ def test_get_num_tokens_from_messages() -> None:
|
|||||||
),
|
),
|
||||||
ToolMessage("foobar", tool_call_id="foo"),
|
ToolMessage("foobar", tool_call_id="foo"),
|
||||||
]
|
]
|
||||||
expected = 176
|
expected = 431 # Updated to match token count with mocked 100x100 image
|
||||||
actual = llm.get_num_tokens_from_messages(messages)
|
|
||||||
|
# Mock _url_to_size to avoid PIL dependency in unit tests
|
||||||
|
with patch("langchain_openai.chat_models.base._url_to_size") as mock_url_to_size:
|
||||||
|
mock_url_to_size.return_value = (100, 100) # 100x100 pixel image
|
||||||
|
actual = llm.get_num_tokens_from_messages(messages)
|
||||||
|
|
||||||
assert expected == actual
|
assert expected == actual
|
||||||
|
|
||||||
# Test file inputs
|
# Test file inputs
|
||||||
@ -1136,6 +1141,73 @@ def test_init_o1() -> None:
|
|||||||
assert len(record) == 0
|
assert len(record) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_init_minimal_reasoning_effort() -> None:
|
||||||
|
with pytest.warns(None) as record: # type: ignore[call-overload]
|
||||||
|
ChatOpenAI(model="gpt-5", reasoning_effort="minimal")
|
||||||
|
assert len(record) == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("use_responses_api", [False, True])
|
||||||
|
@pytest.mark.parametrize("use_max_completion_tokens", [True, False])
|
||||||
|
def test_minimal_reasoning_effort_payload(
|
||||||
|
use_max_completion_tokens: bool, use_responses_api: bool
|
||||||
|
) -> None:
|
||||||
|
"""Test that minimal reasoning effort is included in request payload."""
|
||||||
|
if use_max_completion_tokens:
|
||||||
|
kwargs = {"max_completion_tokens": 100}
|
||||||
|
else:
|
||||||
|
kwargs = {"max_tokens": 100}
|
||||||
|
|
||||||
|
init_kwargs: dict[str, Any] = {
|
||||||
|
"model": "gpt-5",
|
||||||
|
"reasoning_effort": "minimal",
|
||||||
|
"use_responses_api": use_responses_api,
|
||||||
|
**kwargs,
|
||||||
|
}
|
||||||
|
|
||||||
|
if use_responses_api:
|
||||||
|
init_kwargs["output_version"] = "responses/v1"
|
||||||
|
|
||||||
|
llm = ChatOpenAI(**init_kwargs)
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{"role": "developer", "content": "respond with just 'test'"},
|
||||||
|
{"role": "user", "content": "hello"},
|
||||||
|
]
|
||||||
|
|
||||||
|
payload = llm._get_request_payload(messages, stop=None)
|
||||||
|
|
||||||
|
# When using responses API, reasoning_effort becomes reasoning.effort
|
||||||
|
if use_responses_api:
|
||||||
|
assert "reasoning" in payload
|
||||||
|
assert payload["reasoning"]["effort"] == "minimal"
|
||||||
|
# For responses API, tokens param becomes max_output_tokens
|
||||||
|
assert payload["max_output_tokens"] == 100
|
||||||
|
else:
|
||||||
|
# For non-responses API, reasoning_effort remains as is
|
||||||
|
assert payload["reasoning_effort"] == "minimal"
|
||||||
|
if use_max_completion_tokens:
|
||||||
|
assert payload["max_completion_tokens"] == 100
|
||||||
|
else:
|
||||||
|
# max_tokens gets converted to max_completion_tokens in non-responses API
|
||||||
|
assert payload["max_completion_tokens"] == 100
|
||||||
|
|
||||||
|
|
||||||
|
def test_verbosity_parameter_payload() -> None:
|
||||||
|
"""Test verbosity parameter is included in request payload for Responses API."""
|
||||||
|
llm = ChatOpenAI(
|
||||||
|
model="gpt-5",
|
||||||
|
verbosity="high",
|
||||||
|
use_responses_api=True,
|
||||||
|
output_version="responses/v1",
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": "hello"}]
|
||||||
|
payload = llm._get_request_payload(messages, stop=None)
|
||||||
|
|
||||||
|
assert payload["verbosity"] == "high"
|
||||||
|
|
||||||
|
|
||||||
def test_structured_output_old_model() -> None:
|
def test_structured_output_old_model() -> None:
|
||||||
class Output(TypedDict):
|
class Output(TypedDict):
|
||||||
"""output."""
|
"""output."""
|
||||||
@ -2203,7 +2275,9 @@ def test__construct_responses_api_input_multiple_message_types() -> None:
|
|||||||
assert messages_copy == messages
|
assert messages_copy == messages
|
||||||
|
|
||||||
# Test dict messages
|
# Test dict messages
|
||||||
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
|
llm = ChatOpenAI(
|
||||||
|
model="o4-mini", use_responses_api=True, output_version="responses/v1"
|
||||||
|
)
|
||||||
message_dicts: list = [
|
message_dicts: list = [
|
||||||
{"role": "developer", "content": "This is a developer message."},
|
{"role": "developer", "content": "This is a developer message."},
|
||||||
{
|
{
|
||||||
@ -2244,7 +2318,9 @@ class FakeTracer(BaseTracer):
|
|||||||
|
|
||||||
def test_mcp_tracing() -> None:
|
def test_mcp_tracing() -> None:
|
||||||
# Test we exclude sensitive information from traces
|
# Test we exclude sensitive information from traces
|
||||||
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
|
llm = ChatOpenAI(
|
||||||
|
model="o4-mini", use_responses_api=True, output_version="responses/v1"
|
||||||
|
)
|
||||||
|
|
||||||
tracer = FakeTracer()
|
tracer = FakeTracer()
|
||||||
mock_client = MagicMock()
|
mock_client = MagicMock()
|
||||||
@ -2687,7 +2763,9 @@ def test_get_last_messages() -> None:
|
|||||||
|
|
||||||
def test_get_request_payload_use_previous_response_id() -> None:
|
def test_get_request_payload_use_previous_response_id() -> None:
|
||||||
# Default - don't use previous_response ID
|
# Default - don't use previous_response ID
|
||||||
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
|
llm = ChatOpenAI(
|
||||||
|
model="o4-mini", use_responses_api=True, output_version="responses/v1"
|
||||||
|
)
|
||||||
messages = [
|
messages = [
|
||||||
HumanMessage("Hello"),
|
HumanMessage("Hello"),
|
||||||
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
|
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
|
||||||
|
@ -71,7 +71,10 @@ def test_prompt_cache_key_model_kwargs() -> None:
|
|||||||
def test_prompt_cache_key_responses_api() -> None:
|
def test_prompt_cache_key_responses_api() -> None:
|
||||||
"""Test that prompt_cache_key works with Responses API."""
|
"""Test that prompt_cache_key works with Responses API."""
|
||||||
chat = ChatOpenAI(
|
chat = ChatOpenAI(
|
||||||
model="gpt-4o-mini", use_responses_api=True, max_completion_tokens=10
|
model="gpt-4o-mini",
|
||||||
|
use_responses_api=True,
|
||||||
|
output_version="responses/v1",
|
||||||
|
max_completion_tokens=10,
|
||||||
)
|
)
|
||||||
|
|
||||||
messages = [HumanMessage("Hello")]
|
messages = [HumanMessage("Hello")]
|
||||||
|
@ -8,6 +8,7 @@ EXPECTED_ALL = [
|
|||||||
"AzureOpenAI",
|
"AzureOpenAI",
|
||||||
"AzureChatOpenAI",
|
"AzureChatOpenAI",
|
||||||
"AzureOpenAIEmbeddings",
|
"AzureOpenAIEmbeddings",
|
||||||
|
"custom_tool",
|
||||||
]
|
]
|
||||||
|
|
||||||
EXPECTED_ALL_V1 = ["ChatOpenAI"]
|
EXPECTED_ALL_V1 = ["ChatOpenAI"]
|
||||||
|
124
libs/partners/openai/tests/unit_tests/test_tools.py
Normal file
124
libs/partners/openai/tests/unit_tests/test_tools.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
||||||
|
from langchain_core.tools import Tool
|
||||||
|
|
||||||
|
from langchain_openai import ChatOpenAI, custom_tool
|
||||||
|
|
||||||
|
|
||||||
|
def test_custom_tool() -> None:
|
||||||
|
@custom_tool
|
||||||
|
def my_tool(x: str) -> str:
|
||||||
|
"""Do thing."""
|
||||||
|
return "a" + x
|
||||||
|
|
||||||
|
# Test decorator
|
||||||
|
assert isinstance(my_tool, Tool)
|
||||||
|
assert my_tool.metadata == {"type": "custom_tool"}
|
||||||
|
assert my_tool.description == "Do thing."
|
||||||
|
|
||||||
|
result = my_tool.invoke(
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"name": "my_tool",
|
||||||
|
"args": {"whatever": "b"},
|
||||||
|
"id": "abc",
|
||||||
|
"extras": {"type": "custom_tool_call"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
assert result == ToolMessage(
|
||||||
|
[{"type": "custom_tool_call_output", "output": "ab"}],
|
||||||
|
name="my_tool",
|
||||||
|
tool_call_id="abc",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test tool schema
|
||||||
|
## Test with format
|
||||||
|
@custom_tool(format={"type": "grammar", "syntax": "lark", "definition": "..."})
|
||||||
|
def another_tool(x: str) -> None:
|
||||||
|
"""Do thing."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
llm = ChatOpenAI(
|
||||||
|
model="gpt-4.1", use_responses_api=True, output_version="responses/v1"
|
||||||
|
).bind_tools([another_tool])
|
||||||
|
assert llm.kwargs == { # type: ignore[attr-defined]
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"type": "custom",
|
||||||
|
"name": "another_tool",
|
||||||
|
"description": "Do thing.",
|
||||||
|
"format": {"type": "grammar", "syntax": "lark", "definition": "..."},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
llm = ChatOpenAI(
|
||||||
|
model="gpt-4.1", use_responses_api=True, output_version="responses/v1"
|
||||||
|
).bind_tools([my_tool])
|
||||||
|
assert llm.kwargs == { # type: ignore[attr-defined]
|
||||||
|
"tools": [{"type": "custom", "name": "my_tool", "description": "Do thing."}]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test passing messages back
|
||||||
|
message_history = [
|
||||||
|
HumanMessage("Use the tool"),
|
||||||
|
AIMessage(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "custom_tool_call",
|
||||||
|
"id": "ctc_abc123",
|
||||||
|
"call_id": "abc",
|
||||||
|
"name": "my_tool",
|
||||||
|
"input": "a",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"name": "my_tool",
|
||||||
|
"args": {"__arg1": "a"},
|
||||||
|
"id": "abc",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
),
|
||||||
|
result,
|
||||||
|
]
|
||||||
|
payload = llm._get_request_payload(message_history) # type: ignore[attr-defined]
|
||||||
|
expected_input = [
|
||||||
|
{"content": "Use the tool", "role": "user"},
|
||||||
|
{
|
||||||
|
"type": "custom_tool_call",
|
||||||
|
"id": "ctc_abc123",
|
||||||
|
"call_id": "abc",
|
||||||
|
"name": "my_tool",
|
||||||
|
"input": "a",
|
||||||
|
},
|
||||||
|
{"type": "custom_tool_call_output", "call_id": "abc", "output": "ab"},
|
||||||
|
]
|
||||||
|
assert payload["input"] == expected_input
|
||||||
|
|
||||||
|
|
||||||
|
async def test_async_custom_tool() -> None:
|
||||||
|
@custom_tool
|
||||||
|
async def my_async_tool(x: str) -> str:
|
||||||
|
"""Do async thing."""
|
||||||
|
return "a" + x
|
||||||
|
|
||||||
|
# Test decorator
|
||||||
|
assert isinstance(my_async_tool, Tool)
|
||||||
|
assert my_async_tool.metadata == {"type": "custom_tool"}
|
||||||
|
assert my_async_tool.description == "Do async thing."
|
||||||
|
|
||||||
|
result = await my_async_tool.ainvoke(
|
||||||
|
{
|
||||||
|
"type": "tool_call",
|
||||||
|
"name": "my_async_tool",
|
||||||
|
"args": {"whatever": "b"},
|
||||||
|
"id": "abc",
|
||||||
|
"extras": {"type": "custom_tool_call"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
assert result == ToolMessage(
|
||||||
|
[{"type": "custom_tool_call_output", "output": "ab"}],
|
||||||
|
name="my_async_tool",
|
||||||
|
tool_call_id="abc",
|
||||||
|
)
|
Loading…
Reference in New Issue
Block a user