mirror of
https://github.com/hwchase17/langchain.git
synced 2026-03-18 02:53:16 +00:00
fix(openai): avoid PydanticSerializationUnexpectedValue for structured output (#35543)
This commit is contained in:
@@ -1548,7 +1548,13 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
generations = []
|
||||
|
||||
response_dict = (
|
||||
response if isinstance(response, dict) else response.model_dump()
|
||||
response
|
||||
if isinstance(response, dict)
|
||||
# `parsed` may hold arbitrary Pydantic models from structured output.
|
||||
# Exclude it from this dump and copy it from the typed response below.
|
||||
else response.model_dump(
|
||||
exclude={"choices": {"__all__": {"message": {"parsed"}}}}
|
||||
)
|
||||
)
|
||||
# Sometimes the AI Model calling will get error, we should raise it (this is
|
||||
# typically followed by a null value for `choices`, which we raise for
|
||||
|
||||
@@ -1388,6 +1388,49 @@ def test_structured_outputs_parser() -> None:
|
||||
assert result == parsed_response
|
||||
|
||||
|
||||
def test_create_chat_result_avoids_parsed_model_dump_warning() -> None:
|
||||
class ModelOutput(BaseModel):
|
||||
output: str
|
||||
|
||||
class MockParsedMessage(openai.BaseModel):
|
||||
role: Literal["assistant"] = "assistant"
|
||||
content: str = '{"output": "Paris"}'
|
||||
parsed: None = None
|
||||
refusal: str | None = None
|
||||
|
||||
class MockChoice(openai.BaseModel):
|
||||
index: int = 0
|
||||
finish_reason: Literal["stop"] = "stop"
|
||||
message: MockParsedMessage
|
||||
|
||||
class MockChatCompletion(openai.BaseModel):
|
||||
id: str = "chatcmpl-1"
|
||||
object: str = "chat.completion"
|
||||
created: int = 0
|
||||
model: str = "gpt-4o-mini"
|
||||
choices: list[MockChoice]
|
||||
usage: dict[str, int] | None = None
|
||||
|
||||
parsed_response = ModelOutput(output="Paris")
|
||||
response = MockChatCompletion.model_construct(
|
||||
choices=[
|
||||
MockChoice.model_construct(
|
||||
message=MockParsedMessage.model_construct(parsed=parsed_response)
|
||||
)
|
||||
],
|
||||
usage={"prompt_tokens": 1, "completion_tokens": 1, "total_tokens": 2},
|
||||
)
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
with warnings.catch_warnings(record=True) as caught_warnings:
|
||||
warnings.simplefilter("always")
|
||||
result = llm._create_chat_result(response)
|
||||
|
||||
warning_messages = [str(warning.message) for warning in caught_warnings]
|
||||
assert not any("field_name='parsed'" in message for message in warning_messages)
|
||||
assert result.generations[0].message.additional_kwargs["parsed"] == parsed_response
|
||||
|
||||
|
||||
def test_structured_outputs_parser_valid_falsy_response() -> None:
|
||||
class LunchBox(BaseModel):
|
||||
sandwiches: list[str]
|
||||
|
||||
Reference in New Issue
Block a user