mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-16 09:48:04 +00:00
openai[patch]: Fix langchain-openai unknown parameter error with gpt-4-turbo (#20271)
**Description:** I fixed langchain-openai unknown parameter error with gpt-4-turbo. It seems that the behavior of the Chat Completions API implicitly changed when using the latest gpt-4-turbo model, differing from previous models. It now appears to reject parameters that are not listed in the [API Reference](https://platform.openai.com/docs/api-reference/chat/create). So I found some errors and fixed them. **Issue:** https://github.com/langchain-ai/langchain/issues/20264 **Dependencies:** none **Twitter handle:** https://twitter.com/oshima_123
This commit is contained in:
parent
21c1ce0bc1
commit
12190ad728
@ -177,6 +177,12 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
# If tool calls only, content is None not empty string
|
||||
if message_dict["content"] == "":
|
||||
message_dict["content"] = None
|
||||
|
||||
tool_call_supported_props = {"id", "type", "function"}
|
||||
message_dict["tool_calls"] = [
|
||||
{k: v for k, v in tool_call.items() if k in tool_call_supported_props}
|
||||
for tool_call in message_dict["tool_calls"]
|
||||
]
|
||||
elif isinstance(message, SystemMessage):
|
||||
message_dict["role"] = "system"
|
||||
elif isinstance(message, FunctionMessage):
|
||||
@ -808,7 +814,10 @@ class ChatOpenAI(BaseChatModel):
|
||||
"function": {"name": tool_choice},
|
||||
}
|
||||
elif isinstance(tool_choice, bool):
|
||||
tool_choice = formatted_tools[0]
|
||||
tool_choice = {
|
||||
"type": "function",
|
||||
"function": {"name": formatted_tools[0]["function"]["name"]},
|
||||
}
|
||||
elif isinstance(tool_choice, dict):
|
||||
if (
|
||||
formatted_tools[0]["function"]["name"]
|
||||
|
@ -479,7 +479,7 @@ class GenerateUsername(BaseModel):
|
||||
|
||||
|
||||
def test_tool_use() -> None:
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
|
||||
llm = ChatOpenAI(model="gpt-4-turbo", temperature=0)
|
||||
llm_with_tool = llm.bind_tools(tools=[GenerateUsername], tool_choice=True)
|
||||
msgs: List = [HumanMessage("Sally has green hair, what would her username be?")]
|
||||
ai_msg = llm_with_tool.invoke(msgs)
|
||||
@ -490,6 +490,12 @@ def test_tool_use() -> None:
|
||||
tool_call = ai_msg.tool_calls[0]
|
||||
assert "args" in tool_call
|
||||
|
||||
tool_msg = ToolMessage(
|
||||
"sally_green_hair", tool_call_id=ai_msg.additional_kwargs["tool_calls"][0]["id"]
|
||||
)
|
||||
msgs.extend([ai_msg, tool_msg])
|
||||
llm_with_tool.invoke(msgs)
|
||||
|
||||
# Test streaming
|
||||
ai_messages = llm_with_tool.stream(msgs)
|
||||
first = True
|
||||
@ -505,10 +511,11 @@ def test_tool_use() -> None:
|
||||
tool_call_chunk = gathered.tool_call_chunks[0]
|
||||
assert "args" in tool_call_chunk
|
||||
|
||||
tool_msg = ToolMessage(
|
||||
"sally_green_hair", tool_call_id=ai_msg.additional_kwargs["tool_calls"][0]["id"]
|
||||
streaming_tool_msg = ToolMessage(
|
||||
"sally_green_hair",
|
||||
tool_call_id=gathered.additional_kwargs["tool_calls"][0]["id"],
|
||||
)
|
||||
msgs.extend([ai_msg, tool_msg])
|
||||
msgs.extend([gathered, streaming_tool_msg])
|
||||
llm_with_tool.invoke(msgs)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user