mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-28 15:00:23 +00:00
test(openai): raise token limit for o1 test (#33118)
`test_o1[False-False]` was sometimes failing because the OpenAI o1 model was hitting a token limit with only 100 tokens
This commit is contained in:
@@ -1017,10 +1017,12 @@ async def test_astream_response_format() -> None:
|
||||
@pytest.mark.parametrize("use_responses_api", [False, True])
|
||||
@pytest.mark.parametrize("use_max_completion_tokens", [True, False])
|
||||
def test_o1(use_max_completion_tokens: bool, use_responses_api: bool) -> None:
|
||||
# o1 models need higher token limits for reasoning
|
||||
o1_token_limit = 1000
|
||||
if use_max_completion_tokens:
|
||||
kwargs: dict = {"max_completion_tokens": MAX_TOKEN_COUNT}
|
||||
kwargs: dict = {"max_completion_tokens": o1_token_limit}
|
||||
else:
|
||||
kwargs = {"max_tokens": MAX_TOKEN_COUNT}
|
||||
kwargs = {"max_tokens": o1_token_limit}
|
||||
response = ChatOpenAI(
|
||||
model="o1",
|
||||
reasoning_effort="low",
|
||||
|
Reference in New Issue
Block a user