mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-14 07:07:34 +00:00
openai[patch]: format system content blocks for Responses API (#31096)
```python from langchain_core.messages import HumanMessage, SystemMessage from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-4.1", use_responses_api=True) messages = [ SystemMessage("test"), # Works HumanMessage("test"), # Works SystemMessage([{"type": "text", "text": "test"}]), # Bug in this case HumanMessage([{"type": "text", "text": "test"}]), # Works SystemMessage([{"type": "input_text", "text": "test"}]) # Works ] llm._get_request_payload(messages) ```
This commit is contained in:
parent
26ad239669
commit
94139ffcd3
@ -3191,7 +3191,7 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
|||||||
input_.append(msg)
|
input_.append(msg)
|
||||||
input_.extend(function_calls)
|
input_.extend(function_calls)
|
||||||
input_.extend(computer_calls)
|
input_.extend(computer_calls)
|
||||||
elif msg["role"] == "user":
|
elif msg["role"] in ("user", "system", "developer"):
|
||||||
if isinstance(msg["content"], list):
|
if isinstance(msg["content"], list):
|
||||||
new_blocks = []
|
new_blocks = []
|
||||||
for block in msg["content"]:
|
for block in msg["content"]:
|
||||||
|
@ -956,13 +956,17 @@ def test__get_request_payload() -> None:
|
|||||||
messages: list = [
|
messages: list = [
|
||||||
SystemMessage("hello"),
|
SystemMessage("hello"),
|
||||||
SystemMessage("bye", additional_kwargs={"__openai_role__": "developer"}),
|
SystemMessage("bye", additional_kwargs={"__openai_role__": "developer"}),
|
||||||
|
SystemMessage(content=[{"type": "text", "text": "hello!"}]),
|
||||||
{"role": "human", "content": "how are you"},
|
{"role": "human", "content": "how are you"},
|
||||||
|
{"role": "user", "content": [{"type": "text", "text": "feeling today"}]},
|
||||||
]
|
]
|
||||||
expected = {
|
expected = {
|
||||||
"messages": [
|
"messages": [
|
||||||
{"role": "system", "content": "hello"},
|
{"role": "system", "content": "hello"},
|
||||||
{"role": "developer", "content": "bye"},
|
{"role": "developer", "content": "bye"},
|
||||||
|
{"role": "system", "content": [{"type": "text", "text": "hello!"}]},
|
||||||
{"role": "user", "content": "how are you"},
|
{"role": "user", "content": "how are you"},
|
||||||
|
{"role": "user", "content": [{"type": "text", "text": "feeling today"}]},
|
||||||
],
|
],
|
||||||
"model": "gpt-4o-2024-08-06",
|
"model": "gpt-4o-2024-08-06",
|
||||||
"stream": False,
|
"stream": False,
|
||||||
@ -977,7 +981,9 @@ def test__get_request_payload() -> None:
|
|||||||
"messages": [
|
"messages": [
|
||||||
{"role": "developer", "content": "hello"},
|
{"role": "developer", "content": "hello"},
|
||||||
{"role": "developer", "content": "bye"},
|
{"role": "developer", "content": "bye"},
|
||||||
|
{"role": "developer", "content": [{"type": "text", "text": "hello!"}]},
|
||||||
{"role": "user", "content": "how are you"},
|
{"role": "user", "content": "how are you"},
|
||||||
|
{"role": "user", "content": [{"type": "text", "text": "feeling today"}]},
|
||||||
],
|
],
|
||||||
"model": "o3-mini",
|
"model": "o3-mini",
|
||||||
"stream": False,
|
"stream": False,
|
||||||
@ -1658,6 +1664,9 @@ def test__construct_responses_api_input_multiple_message_types() -> None:
|
|||||||
"""Test conversion of a conversation with multiple message types."""
|
"""Test conversion of a conversation with multiple message types."""
|
||||||
messages = [
|
messages = [
|
||||||
SystemMessage(content="You are a helpful assistant."),
|
SystemMessage(content="You are a helpful assistant."),
|
||||||
|
SystemMessage(
|
||||||
|
content=[{"type": "text", "text": "You are a very helpful assistant!"}]
|
||||||
|
),
|
||||||
HumanMessage(content="What's the weather in San Francisco?"),
|
HumanMessage(content="What's the weather in San Francisco?"),
|
||||||
HumanMessage(
|
HumanMessage(
|
||||||
content=[{"type": "text", "text": "What's the weather in San Francisco?"}]
|
content=[{"type": "text", "text": "What's the weather in San Francisco?"}]
|
||||||
@ -1698,31 +1707,36 @@ def test__construct_responses_api_input_multiple_message_types() -> None:
|
|||||||
assert result[0]["role"] == "system"
|
assert result[0]["role"] == "system"
|
||||||
assert result[0]["content"] == "You are a helpful assistant."
|
assert result[0]["content"] == "You are a helpful assistant."
|
||||||
|
|
||||||
|
assert result[1]["role"] == "system"
|
||||||
|
assert result[1]["content"] == [
|
||||||
|
{"type": "input_text", "text": "You are a very helpful assistant!"}
|
||||||
|
]
|
||||||
|
|
||||||
# Check human message
|
# Check human message
|
||||||
assert result[1]["role"] == "user"
|
|
||||||
assert result[1]["content"] == "What's the weather in San Francisco?"
|
|
||||||
assert result[2]["role"] == "user"
|
assert result[2]["role"] == "user"
|
||||||
assert result[2]["content"] == [
|
assert result[2]["content"] == "What's the weather in San Francisco?"
|
||||||
|
assert result[3]["role"] == "user"
|
||||||
|
assert result[3]["content"] == [
|
||||||
{"type": "input_text", "text": "What's the weather in San Francisco?"}
|
{"type": "input_text", "text": "What's the weather in San Francisco?"}
|
||||||
]
|
]
|
||||||
|
|
||||||
# Check function call
|
# Check function call
|
||||||
assert result[3]["type"] == "function_call"
|
assert result[4]["type"] == "function_call"
|
||||||
assert result[3]["name"] == "get_weather"
|
assert result[4]["name"] == "get_weather"
|
||||||
assert result[3]["arguments"] == '{"location": "San Francisco"}'
|
assert result[4]["arguments"] == '{"location": "San Francisco"}'
|
||||||
assert result[3]["call_id"] == "call_123"
|
assert result[4]["call_id"] == "call_123"
|
||||||
assert result[3]["id"] == "func_456"
|
assert result[4]["id"] == "func_456"
|
||||||
|
|
||||||
# Check function call output
|
# Check function call output
|
||||||
assert result[4]["type"] == "function_call_output"
|
assert result[5]["type"] == "function_call_output"
|
||||||
assert result[4]["output"] == '{"temperature": 72, "conditions": "sunny"}'
|
assert result[5]["output"] == '{"temperature": 72, "conditions": "sunny"}'
|
||||||
assert result[4]["call_id"] == "call_123"
|
assert result[5]["call_id"] == "call_123"
|
||||||
|
|
||||||
assert result[5]["role"] == "assistant"
|
|
||||||
assert result[5]["content"] == "The weather in San Francisco is 72°F and sunny."
|
|
||||||
|
|
||||||
assert result[6]["role"] == "assistant"
|
assert result[6]["role"] == "assistant"
|
||||||
assert result[6]["content"] == [
|
assert result[6]["content"] == "The weather in San Francisco is 72°F and sunny."
|
||||||
|
|
||||||
|
assert result[7]["role"] == "assistant"
|
||||||
|
assert result[7]["content"] == [
|
||||||
{
|
{
|
||||||
"type": "output_text",
|
"type": "output_text",
|
||||||
"text": "The weather in San Francisco is 72°F and sunny.",
|
"text": "The weather in San Francisco is 72°F and sunny.",
|
||||||
@ -1733,6 +1747,25 @@ def test__construct_responses_api_input_multiple_message_types() -> None:
|
|||||||
# assert no mutation has occurred
|
# assert no mutation has occurred
|
||||||
assert messages_copy == messages
|
assert messages_copy == messages
|
||||||
|
|
||||||
|
# Test dict messages
|
||||||
|
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
|
||||||
|
message_dicts: list = [
|
||||||
|
{"role": "developer", "content": "This is a developer message."},
|
||||||
|
{
|
||||||
|
"role": "developer",
|
||||||
|
"content": [{"type": "text", "text": "This is a developer message!"}],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
payload = llm._get_request_payload(message_dicts)
|
||||||
|
result = payload["input"]
|
||||||
|
assert len(result) == 2
|
||||||
|
assert result[0]["role"] == "developer"
|
||||||
|
assert result[0]["content"] == "This is a developer message."
|
||||||
|
assert result[1]["role"] == "developer"
|
||||||
|
assert result[1]["content"] == [
|
||||||
|
{"type": "input_text", "text": "This is a developer message!"}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_service_tier() -> None:
|
def test_service_tier() -> None:
|
||||||
llm = ChatOpenAI(model="o4-mini", service_tier="flex")
|
llm = ChatOpenAI(model="o4-mini", service_tier="flex")
|
||||||
|
Loading…
Reference in New Issue
Block a user