diff --git a/libs/partners/fireworks/tests/integration_tests/test_standard.py b/libs/partners/fireworks/tests/integration_tests/test_standard.py index 7a595e4c7da..b40b8476d88 100644 --- a/libs/partners/fireworks/tests/integration_tests/test_standard.py +++ b/libs/partners/fireworks/tests/integration_tests/test_standard.py @@ -29,3 +29,7 @@ class TestFireworksStandard(ChatModelIntegrationTests): self, model: BaseChatModel, my_adder_tool: BaseTool ) -> None: super().test_tool_message_histories_list_content(model, my_adder_tool) + + @property + def supports_json_mode(self) -> bool: + return True diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index b97b8c10422..b62982e4ec9 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -26,6 +26,10 @@ class BaseTestGroq(ChatModelIntegrationTests): ) -> None: super().test_tool_message_histories_list_content(model, my_adder_tool) + @property + def supports_json_mode(self) -> bool: + return True + class TestGroqLlama(BaseTestGroq): @property @@ -41,6 +45,10 @@ class TestGroqLlama(BaseTestGroq): """Value to use for tool choice when used in tests.""" return "any" + @property + def supports_json_mode(self) -> bool: + return False # Not supported in streaming mode + @pytest.mark.xfail( reason=("Fails with 'Failed to call a function. Please adjust your prompt.'") ) diff --git a/libs/partners/mistralai/tests/integration_tests/test_standard.py b/libs/partners/mistralai/tests/integration_tests/test_standard.py index 822f2284abc..b95e36735a9 100644 --- a/libs/partners/mistralai/tests/integration_tests/test_standard.py +++ b/libs/partners/mistralai/tests/integration_tests/test_standard.py @@ -19,6 +19,10 @@ class TestMistralStandard(ChatModelIntegrationTests): def chat_model_params(self) -> dict: return {"model": "mistral-large-latest", "temperature": 0} + @property + def supports_json_mode(self) -> bool: + return True + @property def tool_choice_value(self) -> Optional[str]: """Value to use for tool choice when used in tests.""" diff --git a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py index 476640ddd79..4b8feccc8af 100644 --- a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py +++ b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py @@ -22,6 +22,10 @@ class TestChatOllama(ChatModelIntegrationTests): def supports_image_inputs(self) -> bool: return True + @property + def supports_json_mode(self) -> bool: + return True + @pytest.mark.xfail( reason=( "Fails with 'AssertionError'. Ollama does not support 'tool_choice' yet." diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py b/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py index fff0599963c..acf44a5ac0b 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py @@ -31,6 +31,10 @@ class TestAzureOpenAIStandard(ChatModelIntegrationTests): def supports_image_inputs(self) -> bool: return True + @property + def supports_json_mode(self) -> bool: + return True + @pytest.mark.xfail(reason="Not yet supported.") def test_usage_metadata_streaming(self, model: BaseChatModel) -> None: super().test_usage_metadata_streaming(model) diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py index 890e7026808..f131dbaa2bb 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py @@ -25,6 +25,10 @@ class TestOpenAIStandard(ChatModelIntegrationTests): def supports_image_inputs(self) -> bool: return True + @property + def supports_json_mode(self) -> bool: + return True + @property def supported_usage_metadata_details( self, diff --git a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py index 7a787563b26..f5691350044 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py @@ -178,7 +178,7 @@ class ChatModelIntegrationTests(ChatModelTests): output. By default, this is determined by whether the chat model's - `with_structured_output` method is overridden. If the base implementation is + ``with_structured_output`` method is overridden. If the base implementation is intended to be used, this method should be overridden. See: https://python.langchain.com/docs/concepts/structured_outputs/ @@ -191,6 +191,21 @@ class ChatModelIntegrationTests(ChatModelTests): def has_structured_output(self) -> bool: return True + .. dropdown:: supports_json_mode + + Boolean property indicating whether the chat model supports JSON mode in + ``with_structured_output``. + + See: https://python.langchain.com/docs/concepts/structured_outputs/#json-mode + + Example: + + .. code-block:: python + + @property + def supports_json_mode(self) -> bool: + return True + .. dropdown:: supports_image_inputs Boolean property indicating whether the chat model supports image inputs. @@ -1295,6 +1310,68 @@ class ChatModelIntegrationTests(ChatModelTests): joke_result = chat.invoke("Give me a joke about cats, include the punchline.") assert isinstance(joke_result, Joke) + def test_json_mode(self, model: BaseChatModel) -> None: + """Test structured output via `JSON mode. `_ + + This test is optional and should be skipped if the model does not support + the JSON mode feature (see Configuration below). + + .. dropdown:: Configuration + + To disable this test, set ``supports_json_mode`` to False in your + test class: + + .. code-block:: python + + class TestMyChatModelIntegration(ChatModelIntegrationTests): + @property + def supports_json_mode(self) -> bool: + return False + + .. dropdown:: Troubleshooting + + See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + """ # noqa: E501 + if not self.supports_json_mode: + pytest.skip("Test requires json mode support.") + + from pydantic import BaseModel as BaseModelProper + from pydantic import Field as FieldProper + + class Joke(BaseModelProper): + """Joke to tell user.""" + + setup: str = FieldProper(description="question to set up a joke") + punchline: str = FieldProper(description="answer to resolve the joke") + + # Pydantic class + # Type ignoring since the interface only officially supports pydantic 1 + # or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2. + # We'll need to do a pass updating the type signatures. + chat = model.with_structured_output(Joke, method="json_mode") # type: ignore[arg-type] + msg = ( + "Tell me a joke about cats. Return the result as a JSON with 'setup' and " + "'punchline' keys. Return nothing other than JSON." + ) + result = chat.invoke(msg) + assert isinstance(result, Joke) + + for chunk in chat.stream(msg): + assert isinstance(chunk, Joke) + + # Schema + chat = model.with_structured_output( + Joke.model_json_schema(), method="json_mode" + ) + result = chat.invoke(msg) + assert isinstance(result, dict) + assert set(result.keys()) == {"setup", "punchline"} + + for chunk in chat.stream(msg): + assert isinstance(chunk, dict) + assert isinstance(chunk, dict) # for mypy + assert set(chunk.keys()) == {"setup", "punchline"} + def test_tool_message_histories_string_content( self, model: BaseChatModel, my_adder_tool: BaseTool ) -> None: diff --git a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py index 9d3b20de4ce..766367f7359 100644 --- a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py @@ -132,6 +132,11 @@ class ChatModelTests(BaseStandardTests): is not BaseChatModel.with_structured_output ) + @property + def supports_json_mode(self) -> bool: + """(bool) whether the chat model supports JSON mode.""" + return False + @property def supports_image_inputs(self) -> bool: """(bool) whether the chat model supports image inputs, defaults to @@ -281,7 +286,7 @@ class ChatModelUnitTests(ChatModelTests): output. By default, this is determined by whether the chat model's - `with_structured_output` method is overridden. If the base implementation is + ``with_structured_output`` method is overridden. If the base implementation is intended to be used, this method should be overridden. See: https://python.langchain.com/docs/concepts/structured_outputs/ @@ -294,6 +299,21 @@ class ChatModelUnitTests(ChatModelTests): def has_structured_output(self) -> bool: return True + .. dropdown:: supports_json_mode + + Boolean property indicating whether the chat model supports JSON mode in + ``with_structured_output``. + + See: https://python.langchain.com/docs/concepts/structured_outputs/#json-mode + + Example: + + .. code-block:: python + + @property + def supports_json_mode(self) -> bool: + return True + .. dropdown:: supports_image_inputs Boolean property indicating whether the chat model supports image inputs.