From ab8b4003be6b8dd477cda23c49b73d2a616ec8f4 Mon Sep 17 00:00:00 2001 From: ccurme Date: Tue, 27 May 2025 15:11:31 -0400 Subject: [PATCH] openai[patch]: add test case for code interpreter (#31383) --- .../chat_models/test_responses_api.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py index c20a7f4563b..6accecb9d88 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py @@ -386,9 +386,11 @@ def test_code_interpreter() -> None: llm_with_tools = llm.bind_tools( [{"type": "code_interpreter", "container": {"type": "auto"}}] ) - response = llm_with_tools.invoke( - "Write and run code to answer the question: what is 3^3?" - ) + input_message = { + "role": "user", + "content": "Write and run code to answer the question: what is 3^3?", + } + response = llm_with_tools.invoke([input_message]) _check_response(response) tool_outputs = response.additional_kwargs["tool_outputs"] assert tool_outputs @@ -404,9 +406,7 @@ def test_code_interpreter() -> None: ) full: Optional[BaseMessageChunk] = None - for chunk in llm_with_tools.stream( - "Write and run code to answer the question: what is 3^3?" - ): + for chunk in llm_with_tools.stream([input_message]): assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk assert isinstance(full, AIMessageChunk) @@ -414,6 +414,10 @@ def test_code_interpreter() -> None: assert tool_outputs assert any(output["type"] == "code_interpreter_call" for output in tool_outputs) + # Test we can pass back in + next_message = {"role": "user", "content": "Please add more comments to the code."} + _ = llm_with_tools.invoke([input_message, full, next_message]) + def test_mcp_builtin() -> None: pytest.skip() # TODO: set up VCR