From f9fdca6cc29a3b8eee9bc357630c40e470d5e6aa Mon Sep 17 00:00:00 2001 From: ccurme Date: Mon, 10 Jun 2024 13:44:43 -0400 Subject: [PATCH] openai: add `parallel_tool_calls` to api ref (#22746) ![Screenshot 2024-06-10 at 1 41 24 PM](https://github.com/langchain-ai/langchain/assets/26529506/2626bf9c-41c6-4431-b2e1-f59de1e4e468) --- .../langchain_openai/chat_models/base.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 91e650c403a..d4f37f53988 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -1281,6 +1281,28 @@ class ChatOpenAI(BaseChatOpenAI): 'args': {'location': 'New York, NY'}, 'id': 'call_6ghfKxV264jEfe1mRIkS3PE7'}] + Note that ``openai >= 1.32`` supports a ``parallel_tool_calls`` parameter + that defaults to ``True``. This parameter can be set to ``False`` to + disable parallel tool calls: + + .. code-block:: python + + ai_msg = llm_with_tools.invoke( + "What is the weather in LA and NY?", + parallel_tool_calls=False, + ) + ai_msg.tool_calls + + .. code-block:: python + + [{'name': 'GetWeather', + 'args': {'location': 'Los Angeles, CA'}, + 'id': 'call_4OoY0ZR99iEvC7fevsH8Uhtz'}] + + Like other runtime parameters, ``parallel_tool_calls`` can be bound to a model + using ``llm.bind(parallel_tool_calls=False)`` or during instantiation by + setting ``model_kwargs``. + See ``ChatOpenAI.bind_tools()`` method for more. Structured output: