diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index a1b7e989e71..52cb1e1912d 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -1458,7 +1458,6 @@ class BaseChatOpenAI(BaseChatModel): "schema must be specified when method is not 'json_mode'. " "Received None." ) - strict = strict if strict is not None else True response_format = _convert_to_openai_response_format(schema, strict=strict) llm = self.bind(response_format=response_format) if is_pydantic_schema: @@ -2139,14 +2138,32 @@ def _resize(width: int, height: int) -> Tuple[int, int]: def _convert_to_openai_response_format( - schema: Union[Dict[str, Any], Type], strict: bool + schema: Union[Dict[str, Any], Type], *, strict: Optional[bool] = None ) -> Union[Dict, TypeBaseModel]: if isinstance(schema, type) and is_basemodel_subclass(schema): return schema + + if "json_schema" in schema and schema.get("type") == "json_schema": + response_format = schema + elif "name" in schema and "schema" in schema: + response_format = {"type": "json_schema", "json_schema": schema} else: + strict = strict if strict is not None else True function = convert_to_openai_function(schema, strict=strict) function["schema"] = function.pop("parameters") - return {"type": "json_schema", "json_schema": function} + response_format = {"type": "json_schema", "json_schema": function} + + if strict is not None and strict is not response_format["json_schema"].get( + "strict" + ): + msg = ( + f"Output schema already has 'strict' value set to " + f"{schema['json_schema']['strict']} but 'strict' also passed in to " + f"with_structured_output as {strict}. Please make sure that " + f"'strict' is only specified in one place." + ) + raise ValueError(msg) + return response_format @chain diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index c6118a414a4..8b96b89e083 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -23,6 +23,7 @@ from langchain_openai import ChatOpenAI from langchain_openai.chat_models.base import ( _convert_dict_to_message, _convert_message_to_dict, + _convert_to_openai_response_format, _create_usage_metadata, _format_message_content, ) @@ -761,3 +762,46 @@ def test__create_usage_metadata() -> None: input_token_details={}, output_token_details={}, ) + + +def test__convert_to_openai_response_format() -> None: + # Test response formats that aren't tool-like. + response_format: dict = { + "type": "json_schema", + "json_schema": { + "name": "math_reasoning", + "schema": { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "explanation": {"type": "string"}, + "output": {"type": "string"}, + }, + "required": ["explanation", "output"], + "additionalProperties": False, + }, + }, + "final_answer": {"type": "string"}, + }, + "required": ["steps", "final_answer"], + "additionalProperties": False, + }, + "strict": True, + }, + } + + actual = _convert_to_openai_response_format(response_format) + assert actual == response_format + + actual = _convert_to_openai_response_format(response_format["json_schema"]) + assert actual == response_format + + actual = _convert_to_openai_response_format(response_format, strict=True) + assert actual == response_format + + with pytest.raises(ValueError): + _convert_to_openai_response_format(response_format, strict=False)