From 5da74b50530c018fc5b412b328f4c3754f46c45d Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Tue, 11 Mar 2025 18:56:47 -0400 Subject: [PATCH] add tool output to response metadata --- libs/partners/openai/langchain_openai/chat_models/base.py | 6 ++++++ .../tests/integration_tests/chat_models/test_base.py | 8 +++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 2e98aad3515..a92c2c7f5cb 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -1025,6 +1025,12 @@ class BaseChatOpenAI(BaseChatModel): generation_info["status"] = output.status gen = ChatGeneration(message=message, generation_info=generation_info) generations.append(gen) + else: + tool_output = output.model_dump() + if "tool_outputs" in generation_info: + generation_info["tool_outputs"].append(tool_output) + else: + generation_info["tool_outputs"] = [tool_output] llm_output = {"model_name": response.model} return ChatResult(generations=generations, llm_output=llm_output) diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 30551ce67d8..0780e98ee2f 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -2,6 +2,7 @@ import base64 import json +import os from pathlib import Path from textwrap import dedent from typing import Any, AsyncIterator, List, Literal, Optional, cast @@ -1248,6 +1249,10 @@ def _check_response(response: Optional[BaseMessage]) -> None: assert response.usage_metadata["output_tokens"] > 0 assert response.usage_metadata["total_tokens"] > 0 assert response.response_metadata["model_name"] + for tool_output in response.response_metadata["tool_outputs"]: + assert tool_output["id"] + assert tool_output["status"] + assert tool_output["type"] def test_web_search() -> None: @@ -1279,6 +1284,7 @@ async def test_web_search_async() -> None: _check_response(response) assert response.response_metadata["status"] + # Test streaming full: Optional[BaseMessageChunk] = None async for chunk in llm.astream( "What was a positive news story from today?", @@ -1287,4 +1293,4 @@ async def test_web_search_async() -> None: assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk assert isinstance(full, AIMessageChunk) - _check_response(response) + _check_response(full)