diff --git a/libs/partners/openai/langchain_openai/chat_models/_compat.py b/libs/partners/openai/langchain_openai/chat_models/_compat.py index e9dac4bf0cd..c025beafcc7 100644 --- a/libs/partners/openai/langchain_openai/chat_models/_compat.py +++ b/libs/partners/openai/langchain_openai/chat_models/_compat.py @@ -482,10 +482,16 @@ def _implode_reasoning_blocks(blocks: list[dict[str, Any]]) -> Iterable[dict[str block = blocks[i] # Ordinary block – just yield a shallow copy - if block.get("type") != "reasoning" or "reasoning" not in block: + if block.get("type") != "reasoning": yield dict(block) i += 1 continue + elif "reasoning" not in block: + yield {**block, "summary": []} + i += 1 + continue + else: + pass summary: list[dict[str, str]] = [ {"type": "summary_text", "text": block.get("reasoning", "")} diff --git a/libs/partners/openai/tests/cassettes/test_code_interpreter.yaml.gz b/libs/partners/openai/tests/cassettes/test_code_interpreter.yaml.gz index 924ecc6cddd..b8268a20966 100644 Binary files a/libs/partners/openai/tests/cassettes/test_code_interpreter.yaml.gz and b/libs/partners/openai/tests/cassettes/test_code_interpreter.yaml.gz differ diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py index 7be381bc6cd..abe7afb706a 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py @@ -455,9 +455,15 @@ def test_stream_reasoning_summary( assert isinstance(response_2, AIMessage) +@pytest.mark.default_cassette("test_code_interpreter.yaml.gz") @pytest.mark.vcr -def test_code_interpreter() -> None: - llm = ChatOpenAI(model="o4-mini", use_responses_api=True) +@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"]) +def test_code_interpreter( + output_version: Literal["v0", "responses/v1", "v1"], +) -> None: + llm = ChatOpenAI( + model="o4-mini", use_responses_api=True, output_version=output_version + ) llm_with_tools = llm.bind_tools( [{"type": "code_interpreter", "container": {"type": "auto"}}] ) @@ -467,14 +473,26 @@ def test_code_interpreter() -> None: } response = llm_with_tools.invoke([input_message]) _check_response(response) - tool_outputs = response.additional_kwargs["tool_outputs"] - assert tool_outputs - assert any(output["type"] == "code_interpreter_call" for output in tool_outputs) + if output_version == "v0": + tool_outputs = [ + item + for item in response.additional_kwargs["tool_outputs"] + if item["type"] == "code_interpreter_call" + ] + elif output_version == "responses/v1": + tool_outputs = [ + item for item in response.content if item["type"] == "code_interpreter_call" + ] + else: + # v1 + tool_outputs = [ + item["value"] for item in response.content if item["type"] == "non_standard" + ] + assert tool_outputs[0]["type"] == "code_interpreter_call" + assert len(tool_outputs) == 1 # Test streaming # Use same container - tool_outputs = response.additional_kwargs["tool_outputs"] - assert len(tool_outputs) == 1 container_id = tool_outputs[0]["container_id"] llm_with_tools = llm.bind_tools( [{"type": "code_interpreter", "container": container_id}] @@ -485,9 +503,22 @@ def test_code_interpreter() -> None: assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk assert isinstance(full, AIMessageChunk) - tool_outputs = full.additional_kwargs["tool_outputs"] + if output_version == "v0": + tool_outputs = [ + item + for item in response.additional_kwargs["tool_outputs"] + if item["type"] == "code_interpreter_call" + ] + elif output_version == "responses/v1": + tool_outputs = [ + item for item in response.content if item["type"] == "code_interpreter_call" + ] + else: + tool_outputs = [ + item["value"] for item in response.content if item["type"] == "non_standard" + ] + assert tool_outputs[0]["type"] == "code_interpreter_call" assert tool_outputs - assert any(output["type"] == "code_interpreter_call" for output in tool_outputs) # Test we can pass back in next_message = {"role": "user", "content": "Please add more comments to the code."}