From 12daba63ff3f3fab77a872e0228183ae4195ff82 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Thu, 25 Sep 2025 12:57:33 -0400 Subject: [PATCH] test(openai): raise token limit for o1 test (#33118) `test_o1[False-False]` was sometimes failing because the OpenAI o1 model was hitting a token limit with only 100 tokens --- .../openai/tests/integration_tests/chat_models/test_base.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 67177a2ef5b..e0064e73e21 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -1017,10 +1017,12 @@ async def test_astream_response_format() -> None: @pytest.mark.parametrize("use_responses_api", [False, True]) @pytest.mark.parametrize("use_max_completion_tokens", [True, False]) def test_o1(use_max_completion_tokens: bool, use_responses_api: bool) -> None: + # o1 models need higher token limits for reasoning + o1_token_limit = 1000 if use_max_completion_tokens: - kwargs: dict = {"max_completion_tokens": MAX_TOKEN_COUNT} + kwargs: dict = {"max_completion_tokens": o1_token_limit} else: - kwargs = {"max_tokens": MAX_TOKEN_COUNT} + kwargs = {"max_tokens": o1_token_limit} response = ChatOpenAI( model="o1", reasoning_effort="low",