mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-13 13:36:15 +00:00
fix(openai): structured output (#32551)
This commit is contained in:
@@ -3586,13 +3586,6 @@ def _construct_responses_api_payload(
|
||||
|
||||
# Structured output
|
||||
if schema := payload.pop("response_format", None):
|
||||
if payload.get("text"):
|
||||
text = payload["text"]
|
||||
raise ValueError(
|
||||
"Can specify at most one of 'response_format' or 'text', received both:"
|
||||
f"\n{schema=}\n{text=}"
|
||||
)
|
||||
|
||||
# For pydantic + non-streaming case, we use responses.parse.
|
||||
# Otherwise, we use responses.create.
|
||||
strict = payload.pop("strict", None)
|
||||
@@ -3605,6 +3598,9 @@ def _construct_responses_api_payload(
|
||||
else:
|
||||
schema_dict = schema
|
||||
if schema_dict == {"type": "json_object"}: # JSON mode
|
||||
if "text" in payload and isinstance(payload["text"], dict):
|
||||
payload["text"]["format"] = {"type": "json_object"}
|
||||
else:
|
||||
payload["text"] = {"format": {"type": "json_object"}}
|
||||
elif (
|
||||
(
|
||||
@@ -3615,17 +3611,20 @@ def _construct_responses_api_payload(
|
||||
and (isinstance(response_format, dict))
|
||||
and (response_format["type"] == "json_schema")
|
||||
):
|
||||
payload["text"] = {
|
||||
"format": {"type": "json_schema", **response_format["json_schema"]}
|
||||
}
|
||||
format_value = {"type": "json_schema", **response_format["json_schema"]}
|
||||
if "text" in payload and isinstance(payload["text"], dict):
|
||||
payload["text"]["format"] = format_value
|
||||
else:
|
||||
payload["text"] = {"format": format_value}
|
||||
else:
|
||||
pass
|
||||
|
||||
verbosity = payload.pop("verbosity", None)
|
||||
if verbosity is not None:
|
||||
if "text" not in payload:
|
||||
payload["text"] = {"format": {"type": "text"}}
|
||||
if "text" in payload and isinstance(payload["text"], dict):
|
||||
payload["text"]["verbosity"] = verbosity
|
||||
else:
|
||||
payload["text"] = {"verbosity": verbosity}
|
||||
|
||||
return payload
|
||||
|
||||
|
@@ -23,6 +23,7 @@ from langchain_core.messages import (
|
||||
from langchain_core.messages.ai import UsageMetadata
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
from langchain_core.runnables.base import RunnableBinding, RunnableSequence
|
||||
from langchain_core.tracers.base import BaseTracer
|
||||
from langchain_core.tracers.schemas import Run
|
||||
from openai.types.responses import ResponseOutputMessage, ResponseReasoningItem
|
||||
@@ -1201,7 +1202,6 @@ def test_verbosity_parameter_payload() -> None:
|
||||
payload = llm._get_request_payload(messages, stop=None)
|
||||
|
||||
assert payload["text"]["verbosity"] == "high"
|
||||
assert payload["text"]["format"]["type"] == "text"
|
||||
|
||||
|
||||
def test_structured_output_old_model() -> None:
|
||||
@@ -2718,6 +2718,52 @@ def test_extra_body_with_model_kwargs() -> None:
|
||||
assert payload["temperature"] == 0.5
|
||||
|
||||
|
||||
@pytest.mark.parametrize("verbosity_format", ["model_kwargs", "top_level"])
|
||||
@pytest.mark.parametrize("streaming", [False, True])
|
||||
@pytest.mark.parametrize("schema_format", ["pydantic", "dict"])
|
||||
def test_structured_output_verbosity(
|
||||
verbosity_format: str, streaming: bool, schema_format: str
|
||||
) -> None:
|
||||
class MySchema(BaseModel):
|
||||
foo: str
|
||||
|
||||
if verbosity_format == "model_kwargs":
|
||||
init_params: dict[str, Any] = {"model_kwargs": {"text": {"verbosity": "high"}}}
|
||||
else:
|
||||
init_params = {"verbosity": "high"}
|
||||
|
||||
if streaming:
|
||||
init_params["streaming"] = True
|
||||
|
||||
llm = ChatOpenAI(model="gpt-5", use_responses_api=True, **init_params)
|
||||
|
||||
if schema_format == "pydantic":
|
||||
schema: Any = MySchema
|
||||
else:
|
||||
schema = MySchema.model_json_schema()
|
||||
|
||||
structured_llm = llm.with_structured_output(schema)
|
||||
sequence = cast(RunnableSequence, structured_llm)
|
||||
binding = cast(RunnableBinding, sequence.first)
|
||||
bound_llm = cast(ChatOpenAI, binding.bound)
|
||||
bound_kwargs = binding.kwargs
|
||||
|
||||
messages = [HumanMessage(content="Hello")]
|
||||
payload = bound_llm._get_request_payload(messages, **bound_kwargs)
|
||||
|
||||
# Verify that verbosity is present in `text` param
|
||||
assert "text" in payload
|
||||
assert "verbosity" in payload["text"]
|
||||
assert payload["text"]["verbosity"] == "high"
|
||||
|
||||
# Verify that schema is passed correctly
|
||||
if schema_format == "pydantic" and not streaming:
|
||||
assert payload["text_format"] == schema
|
||||
else:
|
||||
assert "format" in payload["text"]
|
||||
assert payload["text"]["format"]["type"] == "json_schema"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("use_responses_api", [False, True])
|
||||
def test_gpt_5_temperature(use_responses_api: bool) -> None:
|
||||
llm = ChatOpenAI(
|
||||
|
Reference in New Issue
Block a user