diff --git a/libs/partners/openai/tests/cassettes/test_parsed_pydantic_schema.yaml.gz b/libs/partners/openai/tests/cassettes/test_parsed_pydantic_schema.yaml.gz new file mode 100644 index 00000000000..13c0b8896de Binary files /dev/null and b/libs/partners/openai/tests/cassettes/test_parsed_pydantic_schema.yaml.gz differ diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py index 3568b3dd136..6d9602e1702 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py @@ -181,8 +181,13 @@ class FooDict(TypedDict): response: str -def test_parsed_pydantic_schema() -> None: - llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True) +@pytest.mark.default_cassette("test_parsed_pydantic_schema.yaml.gz") +@pytest.mark.vcr +@pytest.mark.parametrize("output_version", ["v0", "responses/v1", "v1"]) +def test_parsed_pydantic_schema(output_version: Literal["v0", "responses/v1", "v1"]) -> None: + llm = ChatOpenAI( + model=MODEL_NAME, use_responses_api=True, output_version=output_version + ) response = llm.invoke("how are ya", response_format=Foo) parsed = Foo(**json.loads(response.text())) assert parsed == response.additional_kwargs["parsed"]