From 851fd438cf068ff78aac7348ff4b5dcddd9e1a2c Mon Sep 17 00:00:00 2001 From: ccurme Date: Thu, 22 May 2025 12:14:53 -0400 Subject: [PATCH] openai[patch]: relax Azure llm streaming callback test (#31319) Effectively reverts https://github.com/langchain-ai/langchain/pull/29302, but check that counts are "less than" instead of equal to an expected count. --- .../openai/tests/integration_tests/llms/test_azure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/partners/openai/tests/integration_tests/llms/test_azure.py b/libs/partners/openai/tests/integration_tests/llms/test_azure.py index 3af71e31d55..4d601c0f2ae 100644 --- a/libs/partners/openai/tests/integration_tests/llms/test_azure.py +++ b/libs/partners/openai/tests/integration_tests/llms/test_azure.py @@ -149,7 +149,7 @@ def test_openai_streaming_callback() -> None: verbose=True, ) llm.invoke("Write me a sentence with 100 words.") - assert callback_handler.llm_streams == 12 + assert callback_handler.llm_streams < 15 @pytest.mark.scheduled @@ -172,5 +172,5 @@ async def test_openai_async_streaming_callback() -> None: verbose=True, ) result = await llm.agenerate(["Write me a sentence with 100 words."]) - assert callback_handler.llm_streams == 12 + assert callback_handler.llm_streams < 15 assert isinstance(result, LLMResult)