add back standard_outputs

This commit is contained in:
Mason Daugherty
2025-07-28 10:47:26 -04:00
parent 5e9eb19a83
commit ef9b5a9e18
39 changed files with 4201 additions and 373 deletions

View File

@@ -473,6 +473,7 @@ def test_manual_tool_call_msg(use_responses_api: bool) -> None:
name="GenerateUsername",
args={"name": "Sally", "hair_color": "green"},
id="foo",
type="tool_call",
)
],
),
@@ -494,6 +495,7 @@ def test_manual_tool_call_msg(use_responses_api: bool) -> None:
name="GenerateUsername",
args={"name": "Sally", "hair_color": "green"},
id="bar",
type="tool_call",
)
],
),

View File

@@ -52,9 +52,11 @@ def _check_response(response: Optional[BaseMessage]) -> None:
assert response.response_metadata["service_tier"]
@pytest.mark.default_cassette("test_web_search.yaml.gz")
@pytest.mark.vcr
def test_web_search() -> None:
llm = ChatOpenAI(model=MODEL_NAME, output_version="responses/v1")
@pytest.mark.parametrize("output_version", ["responses/v1", "v1"])
def test_web_search(output_version: Literal["responses/v1", "v1"]) -> None:
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
first_response = llm.invoke(
"What was a positive news story from today?",
tools=[{"type": "web_search_preview"}],
@@ -110,7 +112,10 @@ def test_web_search() -> None:
for msg in [first_response, full, response]:
assert isinstance(msg, AIMessage)
block_types = [block["type"] for block in msg.content] # type: ignore[index]
assert block_types == ["web_search_call", "text"]
if output_version == "responses/v1":
assert block_types == ["web_search_call", "text"]
else:
assert block_types == ["web_search_call", "web_search_result", "text"]
@pytest.mark.flaky(retries=3, delay=1)
@@ -141,13 +146,15 @@ async def test_web_search_async() -> None:
assert tool_output["type"] == "web_search_call"
@pytest.mark.flaky(retries=3, delay=1)
def test_function_calling() -> None:
@pytest.mark.default_cassette("test_function_calling.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
def test_function_calling(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
def multiply(x: int, y: int) -> int:
"""return x * y"""
return x * y
llm = ChatOpenAI(model=MODEL_NAME)
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}])
ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
assert len(ai_msg.tool_calls) == 1
@@ -174,8 +181,15 @@ class FooDict(TypedDict):
response: str
def test_parsed_pydantic_schema() -> None:
llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True)
@pytest.mark.default_cassette("test_parsed_pydantic_schema.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
def test_parsed_pydantic_schema(
output_version: Literal["v0", "responses/v1", "v1"],
) -> None:
llm = ChatOpenAI(
model=MODEL_NAME, use_responses_api=True, output_version=output_version
)
response = llm.invoke("how are ya", response_format=Foo)
parsed = Foo(**json.loads(response.text()))
assert parsed == response.additional_kwargs["parsed"]
@@ -297,8 +311,8 @@ def test_function_calling_and_structured_output() -> None:
@pytest.mark.default_cassette("test_reasoning.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
def test_reasoning(output_version: Literal["v0", "responses/v1"]) -> None:
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
def test_reasoning(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
llm = ChatOpenAI(
model="o4-mini", use_responses_api=True, output_version=output_version
)
@@ -358,27 +372,32 @@ def test_computer_calls() -> None:
def test_file_search() -> None:
pytest.skip() # TODO: set up infra
llm = ChatOpenAI(model=MODEL_NAME)
llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True)
tool = {
"type": "file_search",
"vector_store_ids": [os.environ["OPENAI_VECTOR_STORE_ID"]],
}
response = llm.invoke("What is deep research by OpenAI?", tools=[tool])
input_message = {"role": "user", "content": "What is deep research by OpenAI?"}
response = llm.invoke([input_message], tools=[tool])
_check_response(response)
full: Optional[BaseMessageChunk] = None
for chunk in llm.stream("What is deep research by OpenAI?", tools=[tool]):
for chunk in llm.stream([input_message], tools=[tool]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
_check_response(full)
next_message = {"role": "user", "content": "Thank you."}
_ = llm.invoke([input_message, full, next_message])
@pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "responses/v1"])
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
def test_stream_reasoning_summary(
output_version: Literal["v0", "responses/v1"],
output_version: Literal["v0", "responses/v1", "v1"],
) -> None:
llm = ChatOpenAI(
model="o4-mini",
@@ -398,20 +417,39 @@ def test_stream_reasoning_summary(
if output_version == "v0":
reasoning = response_1.additional_kwargs["reasoning"]
assert set(reasoning.keys()) == {"id", "type", "summary"}
else:
summary = reasoning["summary"]
assert isinstance(summary, list)
for block in summary:
assert isinstance(block, dict)
assert isinstance(block["type"], str)
assert isinstance(block["text"], str)
assert block["text"]
elif output_version == "responses/v1":
reasoning = next(
block
for block in response_1.content
if block["type"] == "reasoning" # type: ignore[index]
)
assert set(reasoning.keys()) == {"id", "type", "summary", "index"}
summary = reasoning["summary"]
assert isinstance(summary, list)
for block in summary:
assert isinstance(block, dict)
assert isinstance(block["type"], str)
assert isinstance(block["text"], str)
assert block["text"]
summary = reasoning["summary"]
assert isinstance(summary, list)
for block in summary:
assert isinstance(block, dict)
assert isinstance(block["type"], str)
assert isinstance(block["text"], str)
assert block["text"]
else:
# v1
total_reasoning_blocks = 0
for block in response_1.content:
if block["type"] == "reasoning":
total_reasoning_blocks += 1
assert isinstance(block["id"], str) and block["id"].startswith("rs_")
assert isinstance(block["reasoning"], str)
assert isinstance(block["index"], int)
assert (
total_reasoning_blocks > 1
) # This query typically generates multiple reasoning blocks
# Check we can pass back summaries
message_2 = {"role": "user", "content": "Thank you."}
@@ -419,9 +457,13 @@ def test_stream_reasoning_summary(
assert isinstance(response_2, AIMessage)
@pytest.mark.default_cassette("test_code_interpreter.yaml.gz")
@pytest.mark.vcr
def test_code_interpreter() -> None:
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
def test_code_interpreter(output_version: Literal["v0", "responses/v1", "v1"]) -> None:
llm = ChatOpenAI(
model="o4-mini", use_responses_api=True, output_version=output_version
)
llm_with_tools = llm.bind_tools(
[{"type": "code_interpreter", "container": {"type": "auto"}}]
)
@@ -430,15 +472,38 @@ def test_code_interpreter() -> None:
"content": "Write and run code to answer the question: what is 3^3?",
}
response = llm_with_tools.invoke([input_message])
assert isinstance(response, AIMessage)
_check_response(response)
tool_outputs = response.additional_kwargs["tool_outputs"]
assert tool_outputs
assert any(output["type"] == "code_interpreter_call" for output in tool_outputs)
if output_version == "v0":
tool_outputs = [
item
for item in response.additional_kwargs["tool_outputs"]
if item["type"] == "code_interpreter_call"
]
elif output_version == "responses/v1":
tool_outputs = [
item
for item in response.content
if isinstance(item, dict) and item["type"] == "code_interpreter_call"
]
else:
# v1
tool_outputs = [
item
for item in response.content
if isinstance(item, dict) and item["type"] == "code_interpreter_call"
]
code_interpreter_result = next(
item
for item in response.content
if isinstance(item, dict) and item["type"] == "code_interpreter_result"
)
assert tool_outputs
assert code_interpreter_result
assert len(tool_outputs) == 1
# Test streaming
# Use same container
tool_outputs = response.additional_kwargs["tool_outputs"]
assert len(tool_outputs) == 1
container_id = tool_outputs[0]["container_id"]
llm_with_tools = llm.bind_tools(
[{"type": "code_interpreter", "container": container_id}]
@@ -449,9 +514,32 @@ def test_code_interpreter() -> None:
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
tool_outputs = full.additional_kwargs["tool_outputs"]
if output_version == "v0":
tool_outputs = [
item
for item in response.additional_kwargs["tool_outputs"]
if item["type"] == "code_interpreter_call"
]
elif output_version == "responses/v1":
tool_outputs = [
item
for item in response.content
if isinstance(item, dict) and item["type"] == "code_interpreter_call"
]
else:
code_interpreter_call = next(
item
for item in response.content
if isinstance(item, dict) and item["type"] == "code_interpreter_call"
)
code_interpreter_result = next(
item
for item in response.content
if isinstance(item, dict) and item["type"] == "code_interpreter_result"
)
assert code_interpreter_call
assert code_interpreter_result
assert tool_outputs
assert any(output["type"] == "code_interpreter_call" for output in tool_outputs)
# Test we can pass back in
next_message = {"role": "user", "content": "Please add more comments to the code."}
@@ -546,10 +634,14 @@ def test_mcp_builtin_zdr() -> None:
_ = llm_with_tools.invoke([input_message, full, approval_message])
@pytest.mark.vcr()
def test_image_generation_streaming() -> None:
@pytest.mark.default_cassette("test_image_generation_streaming.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
def test_image_generation_streaming(output_version: str) -> None:
"""Test image generation streaming."""
llm = ChatOpenAI(model="gpt-4.1", use_responses_api=True)
llm = ChatOpenAI(
model="gpt-4.1", use_responses_api=True, output_version=output_version
)
tool = {
"type": "image_generation",
# For testing purposes let's keep the quality low, so the test runs faster.
@@ -596,15 +688,37 @@ def test_image_generation_streaming() -> None:
# At the moment, the streaming API does not pick up annotations fully.
# So the following check is commented out.
# _check_response(complete_ai_message)
tool_output = complete_ai_message.additional_kwargs["tool_outputs"][0]
assert set(tool_output.keys()).issubset(expected_keys)
if output_version == "v0":
assert complete_ai_message.additional_kwargs["tool_outputs"]
tool_output = complete_ai_message.additional_kwargs["tool_outputs"][0]
assert set(tool_output.keys()).issubset(expected_keys)
elif output_version == "responses/v1":
tool_output = next(
block
for block in complete_ai_message.content
if isinstance(block, dict) and block["type"] == "image_generation_call"
)
assert set(tool_output.keys()).issubset(expected_keys)
else:
# v1
standard_keys = {"type", "base64", "id", "status", "index"}
tool_output = next(
block
for block in complete_ai_message.content
if isinstance(block, dict) and block["type"] == "image"
)
assert set(standard_keys).issubset(tool_output.keys())
@pytest.mark.vcr()
def test_image_generation_multi_turn() -> None:
@pytest.mark.default_cassette("test_image_generation_multi_turn.yaml.gz")
@pytest.mark.vcr
@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"])
def test_image_generation_multi_turn(output_version: str) -> None:
"""Test multi-turn editing of image generation by passing in history."""
# Test multi-turn
llm = ChatOpenAI(model="gpt-4.1", use_responses_api=True)
llm = ChatOpenAI(
model="gpt-4.1", use_responses_api=True, output_version=output_version
)
# Test invocation
tool = {
"type": "image_generation",
@@ -620,10 +734,41 @@ def test_image_generation_multi_turn() -> None:
{"role": "user", "content": "Draw a random short word in green font."}
]
ai_message = llm_with_tools.invoke(chat_history)
assert isinstance(ai_message, AIMessage)
_check_response(ai_message)
tool_output = ai_message.additional_kwargs["tool_outputs"][0]
# Example tool output for an image
expected_keys = {
"id",
"background",
"output_format",
"quality",
"result",
"revised_prompt",
"size",
"status",
"type",
}
if output_version == "v0":
tool_output = ai_message.additional_kwargs["tool_outputs"][0]
assert set(tool_output.keys()).issubset(expected_keys)
elif output_version == "responses/v1":
tool_output = next(
block
for block in ai_message.content
if isinstance(block, dict) and block["type"] == "image_generation_call"
)
assert set(tool_output.keys()).issubset(expected_keys)
else:
standard_keys = {"type", "base64", "id", "status"}
tool_output = next(
block
for block in ai_message.content
if isinstance(block, dict) and block["type"] == "image"
)
assert set(standard_keys).issubset(tool_output.keys())
# Example tool output for an image (v0)
# {
# "background": "opaque",
# "id": "ig_683716a8ddf0819888572b20621c7ae4029ec8c11f8dacf8",
@@ -639,20 +784,6 @@ def test_image_generation_multi_turn() -> None:
# "result": # base64 encode image data
# }
expected_keys = {
"id",
"background",
"output_format",
"quality",
"result",
"revised_prompt",
"size",
"status",
"type",
}
assert set(tool_output.keys()).issubset(expected_keys)
chat_history.extend(
[
# AI message with tool output
@@ -669,6 +800,24 @@ def test_image_generation_multi_turn() -> None:
)
ai_message2 = llm_with_tools.invoke(chat_history)
assert isinstance(ai_message2, AIMessage)
_check_response(ai_message2)
tool_output2 = ai_message2.additional_kwargs["tool_outputs"][0]
assert set(tool_output2.keys()).issubset(expected_keys)
if output_version == "v0":
tool_output = ai_message2.additional_kwargs["tool_outputs"][0]
assert set(tool_output.keys()).issubset(expected_keys)
elif output_version == "responses/v1":
tool_output = next(
block
for block in ai_message2.content
if isinstance(block, dict) and block["type"] == "image_generation_call"
)
assert set(tool_output.keys()).issubset(expected_keys)
else:
standard_keys = {"type", "base64", "id", "status"}
tool_output = next(
block
for block in ai_message2.content
if isinstance(block, dict) and block["type"] == "image"
)
assert set(standard_keys).issubset(tool_output.keys())