From 12190ad7284f9cff6757a7dc7a133623c2e6368a Mon Sep 17 00:00:00 2001 From: Yuki Oshima <39944763+os1ma@users.noreply.github.com> Date: Thu, 11 Apr 2024 01:51:38 +0900 Subject: [PATCH] openai[patch]: Fix langchain-openai unknown parameter error with gpt-4-turbo (#20271) **Description:** I fixed langchain-openai unknown parameter error with gpt-4-turbo. It seems that the behavior of the Chat Completions API implicitly changed when using the latest gpt-4-turbo model, differing from previous models. It now appears to reject parameters that are not listed in the [API Reference](https://platform.openai.com/docs/api-reference/chat/create). So I found some errors and fixed them. **Issue:** https://github.com/langchain-ai/langchain/issues/20264 **Dependencies:** none **Twitter handle:** https://twitter.com/oshima_123 --- .../openai/langchain_openai/chat_models/base.py | 11 ++++++++++- .../integration_tests/chat_models/test_base.py | 15 +++++++++++---- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 09e6ac1052a..ea4df089692 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -177,6 +177,12 @@ def _convert_message_to_dict(message: BaseMessage) -> dict: # If tool calls only, content is None not empty string if message_dict["content"] == "": message_dict["content"] = None + + tool_call_supported_props = {"id", "type", "function"} + message_dict["tool_calls"] = [ + {k: v for k, v in tool_call.items() if k in tool_call_supported_props} + for tool_call in message_dict["tool_calls"] + ] elif isinstance(message, SystemMessage): message_dict["role"] = "system" elif isinstance(message, FunctionMessage): @@ -808,7 +814,10 @@ class ChatOpenAI(BaseChatModel): "function": {"name": tool_choice}, } elif isinstance(tool_choice, bool): - tool_choice = formatted_tools[0] + tool_choice = { + "type": "function", + "function": {"name": formatted_tools[0]["function"]["name"]}, + } elif isinstance(tool_choice, dict): if ( formatted_tools[0]["function"]["name"] diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index c400831e9aa..03cdcbee6c6 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -479,7 +479,7 @@ class GenerateUsername(BaseModel): def test_tool_use() -> None: - llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) + llm = ChatOpenAI(model="gpt-4-turbo", temperature=0) llm_with_tool = llm.bind_tools(tools=[GenerateUsername], tool_choice=True) msgs: List = [HumanMessage("Sally has green hair, what would her username be?")] ai_msg = llm_with_tool.invoke(msgs) @@ -490,6 +490,12 @@ def test_tool_use() -> None: tool_call = ai_msg.tool_calls[0] assert "args" in tool_call + tool_msg = ToolMessage( + "sally_green_hair", tool_call_id=ai_msg.additional_kwargs["tool_calls"][0]["id"] + ) + msgs.extend([ai_msg, tool_msg]) + llm_with_tool.invoke(msgs) + # Test streaming ai_messages = llm_with_tool.stream(msgs) first = True @@ -505,10 +511,11 @@ def test_tool_use() -> None: tool_call_chunk = gathered.tool_call_chunks[0] assert "args" in tool_call_chunk - tool_msg = ToolMessage( - "sally_green_hair", tool_call_id=ai_msg.additional_kwargs["tool_calls"][0]["id"] + streaming_tool_msg = ToolMessage( + "sally_green_hair", + tool_call_id=gathered.additional_kwargs["tool_calls"][0]["id"], ) - msgs.extend([ai_msg, tool_msg]) + msgs.extend([gathered, streaming_tool_msg]) llm_with_tool.invoke(msgs)