openai[patch]: accept json schema response format directly (#27623)

fix #25460

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Bagatur
2024-10-24 11:19:15 -07:00
committed by GitHub
parent 20b56a0233
commit 655ced84d7
2 changed files with 64 additions and 3 deletions

View File

@@ -1458,7 +1458,6 @@ class BaseChatOpenAI(BaseChatModel):
"schema must be specified when method is not 'json_mode'. " "schema must be specified when method is not 'json_mode'. "
"Received None." "Received None."
) )
strict = strict if strict is not None else True
response_format = _convert_to_openai_response_format(schema, strict=strict) response_format = _convert_to_openai_response_format(schema, strict=strict)
llm = self.bind(response_format=response_format) llm = self.bind(response_format=response_format)
if is_pydantic_schema: if is_pydantic_schema:
@@ -2139,14 +2138,32 @@ def _resize(width: int, height: int) -> Tuple[int, int]:
def _convert_to_openai_response_format( def _convert_to_openai_response_format(
schema: Union[Dict[str, Any], Type], strict: bool schema: Union[Dict[str, Any], Type], *, strict: Optional[bool] = None
) -> Union[Dict, TypeBaseModel]: ) -> Union[Dict, TypeBaseModel]:
if isinstance(schema, type) and is_basemodel_subclass(schema): if isinstance(schema, type) and is_basemodel_subclass(schema):
return schema return schema
if "json_schema" in schema and schema.get("type") == "json_schema":
response_format = schema
elif "name" in schema and "schema" in schema:
response_format = {"type": "json_schema", "json_schema": schema}
else: else:
strict = strict if strict is not None else True
function = convert_to_openai_function(schema, strict=strict) function = convert_to_openai_function(schema, strict=strict)
function["schema"] = function.pop("parameters") function["schema"] = function.pop("parameters")
return {"type": "json_schema", "json_schema": function} response_format = {"type": "json_schema", "json_schema": function}
if strict is not None and strict is not response_format["json_schema"].get(
"strict"
):
msg = (
f"Output schema already has 'strict' value set to "
f"{schema['json_schema']['strict']} but 'strict' also passed in to "
f"with_structured_output as {strict}. Please make sure that "
f"'strict' is only specified in one place."
)
raise ValueError(msg)
return response_format
@chain @chain

View File

@@ -23,6 +23,7 @@ from langchain_openai import ChatOpenAI
from langchain_openai.chat_models.base import ( from langchain_openai.chat_models.base import (
_convert_dict_to_message, _convert_dict_to_message,
_convert_message_to_dict, _convert_message_to_dict,
_convert_to_openai_response_format,
_create_usage_metadata, _create_usage_metadata,
_format_message_content, _format_message_content,
) )
@@ -761,3 +762,46 @@ def test__create_usage_metadata() -> None:
input_token_details={}, input_token_details={},
output_token_details={}, output_token_details={},
) )
def test__convert_to_openai_response_format() -> None:
# Test response formats that aren't tool-like.
response_format: dict = {
"type": "json_schema",
"json_schema": {
"name": "math_reasoning",
"schema": {
"type": "object",
"properties": {
"steps": {
"type": "array",
"items": {
"type": "object",
"properties": {
"explanation": {"type": "string"},
"output": {"type": "string"},
},
"required": ["explanation", "output"],
"additionalProperties": False,
},
},
"final_answer": {"type": "string"},
},
"required": ["steps", "final_answer"],
"additionalProperties": False,
},
"strict": True,
},
}
actual = _convert_to_openai_response_format(response_format)
assert actual == response_format
actual = _convert_to_openai_response_format(response_format["json_schema"])
assert actual == response_format
actual = _convert_to_openai_response_format(response_format, strict=True)
assert actual == response_format
with pytest.raises(ValueError):
_convert_to_openai_response_format(response_format, strict=False)