From 5c6f8fe0a63442e594c7ea0c996e37c3de6def65 Mon Sep 17 00:00:00 2001 From: Mattijs Ugen <144798+akaIDIOT@users.noreply.github.com> Date: Thu, 19 Feb 2026 03:06:43 +0100 Subject: [PATCH] fix(openai): accept valid responses that are falsy at runtime (#35307) --- .../langchain_openai/chat_models/base.py | 2 +- .../tests/unit_tests/chat_models/test_base.py | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 3a654a6451f..c74c40f3c56 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -3765,7 +3765,7 @@ def _convert_to_openai_response_format( def _oai_structured_outputs_parser( ai_msg: AIMessage, schema: type[_BM] ) -> PydanticBaseModel | None: - if parsed := ai_msg.additional_kwargs.get("parsed"): + if (parsed := ai_msg.additional_kwargs.get("parsed")) is not None: if isinstance(parsed, dict): return schema(**parsed) return parsed diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index c11ee325b57..438a1c4b60f 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -1388,6 +1388,26 @@ def test_structured_outputs_parser() -> None: assert result == parsed_response +def test_structured_outputs_parser_valid_falsy_response() -> None: + class LunchBox(BaseModel): + sandwiches: list[str] + + def __len__(self) -> int: + return len(self.sandwiches) + + # prepare a valid *but falsy* response object, an empty LunchBox + parsed_response = LunchBox(sandwiches=[]) + assert len(parsed_response) == 0 + llm_output = AIMessage( + content='{"sandwiches": []}', additional_kwargs={"parsed": parsed_response} + ) + output_parser = RunnableLambda( + partial(_oai_structured_outputs_parser, schema=LunchBox) + ) + result = output_parser.invoke(llm_output) + assert result == parsed_response + + def test__construct_lc_result_from_responses_api_error_handling() -> None: """Test that errors in the response are properly raised.""" response = Response(