diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index fb93df89d3d..4351bd529b7 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -1548,7 +1548,13 @@ class BaseChatOpenAI(BaseChatModel): generations = [] response_dict = ( - response if isinstance(response, dict) else response.model_dump() + response + if isinstance(response, dict) + # `parsed` may hold arbitrary Pydantic models from structured output. + # Exclude it from this dump and copy it from the typed response below. + else response.model_dump( + exclude={"choices": {"__all__": {"message": {"parsed"}}}} + ) ) # Sometimes the AI Model calling will get error, we should raise it (this is # typically followed by a null value for `choices`, which we raise for diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index 438a1c4b60f..0f2e6ec7129 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -1388,6 +1388,49 @@ def test_structured_outputs_parser() -> None: assert result == parsed_response +def test_create_chat_result_avoids_parsed_model_dump_warning() -> None: + class ModelOutput(BaseModel): + output: str + + class MockParsedMessage(openai.BaseModel): + role: Literal["assistant"] = "assistant" + content: str = '{"output": "Paris"}' + parsed: None = None + refusal: str | None = None + + class MockChoice(openai.BaseModel): + index: int = 0 + finish_reason: Literal["stop"] = "stop" + message: MockParsedMessage + + class MockChatCompletion(openai.BaseModel): + id: str = "chatcmpl-1" + object: str = "chat.completion" + created: int = 0 + model: str = "gpt-4o-mini" + choices: list[MockChoice] + usage: dict[str, int] | None = None + + parsed_response = ModelOutput(output="Paris") + response = MockChatCompletion.model_construct( + choices=[ + MockChoice.model_construct( + message=MockParsedMessage.model_construct(parsed=parsed_response) + ) + ], + usage={"prompt_tokens": 1, "completion_tokens": 1, "total_tokens": 2}, + ) + + llm = ChatOpenAI(model="gpt-4o-mini") + with warnings.catch_warnings(record=True) as caught_warnings: + warnings.simplefilter("always") + result = llm._create_chat_result(response) + + warning_messages = [str(warning.message) for warning in caught_warnings] + assert not any("field_name='parsed'" in message for message in warning_messages) + assert result.generations[0].message.additional_kwargs["parsed"] == parsed_response + + def test_structured_outputs_parser_valid_falsy_response() -> None: class LunchBox(BaseModel): sandwiches: list[str]