From c51eadd54f27efedcd286f6f6e48de9308a40e78 Mon Sep 17 00:00:00 2001 From: ccurme Date: Thu, 1 May 2025 13:50:48 -0400 Subject: [PATCH] openai[patch]: propagate service_tier to response metadata (#31089) --- libs/partners/openai/langchain_openai/chat_models/base.py | 5 +++++ .../openai/tests/integration_tests/chat_models/test_base.py | 6 ++++-- .../integration_tests/chat_models/test_responses_api.py | 1 + 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index f1b3bc2104d..7b13029f501 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -736,6 +736,8 @@ class BaseChatOpenAI(BaseChatModel): generation_info["model_name"] = model_name if system_fingerprint := chunk.get("system_fingerprint"): generation_info["system_fingerprint"] = system_fingerprint + if service_tier := chunk.get("service_tier"): + generation_info["service_tier"] = service_tier logprobs = choice.get("logprobs") if logprobs: @@ -1020,6 +1022,8 @@ class BaseChatOpenAI(BaseChatModel): } if "id" in response_dict: llm_output["id"] = response_dict["id"] + if "service_tier" in response_dict: + llm_output["service_tier"] = response_dict["service_tier"] if isinstance(response, openai.BaseModel) and getattr( response, "choices", None @@ -3243,6 +3247,7 @@ def _construct_lc_result_from_responses_api( "status", "user", "model", + "service_tier", ) } if metadata: diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index c0cbba20f0b..005c5e8b8ff 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -350,6 +350,7 @@ def test_response_metadata() -> None: "logprobs", "system_fingerprint", "finish_reason", + "service_tier", ) ) assert "content" in result.response_metadata["logprobs"] @@ -367,6 +368,7 @@ async def test_async_response_metadata() -> None: "logprobs", "system_fingerprint", "finish_reason", + "service_tier", ) ) assert "content" in result.response_metadata["logprobs"] @@ -380,7 +382,7 @@ def test_response_metadata_streaming() -> None: full = chunk if full is None else full + chunk assert all( k in cast(BaseMessageChunk, full).response_metadata - for k in ("logprobs", "finish_reason") + for k in ("logprobs", "finish_reason", "service_tier") ) assert "content" in cast(BaseMessageChunk, full).response_metadata["logprobs"] @@ -393,7 +395,7 @@ async def test_async_response_metadata_streaming() -> None: full = chunk if full is None else full + chunk assert all( k in cast(BaseMessageChunk, full).response_metadata - for k in ("logprobs", "finish_reason") + for k in ("logprobs", "finish_reason", "service_tier") ) assert "content" in cast(BaseMessageChunk, full).response_metadata["logprobs"] diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py index 449854cca5f..2b22e6d4c30 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py @@ -47,6 +47,7 @@ def _check_response(response: Optional[BaseMessage]) -> None: assert response.usage_metadata["output_tokens"] > 0 assert response.usage_metadata["total_tokens"] > 0 assert response.response_metadata["model_name"] + assert response.response_metadata["service_tier"] for tool_output in response.additional_kwargs["tool_outputs"]: assert tool_output["id"] assert tool_output["status"]