fix(core): implement sleep delay in FakeMessagesListChatModel _generate (#32014)

implement sleep delay in FakeMessagesListChatModel._generate so the
sleep parameter is respected, matching the documented behavior. This
adds artificial latency between responses for testing purposes.

Issue: closes
[#31974](https://github.com/langchain-ai/langchain/issues/31974)
following
[docs](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.fake_chat_models.FakeMessagesListChatModel.html#langchain_core.language_models.fake_chat_models.FakeMessagesListChatModel.sleep)

Dependencies: none

Twitter handle: [@siddarthreddyg2](https://x.com/siddarthreddyg2)

---------

Signed-off-by: Siddarthreddygsr <siddarthreddygsr@gmail.com>
This commit is contained in:
Gurram Siddarth Reddy 2025-07-19 01:24:28 +05:30 committed by GitHub
parent 50a12a7ee5
commit 427d2d6397
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 20 additions and 1 deletions

View File

@ -36,6 +36,8 @@ class FakeMessagesListChatModel(BaseChatModel):
run_manager: Optional[CallbackManagerForLLMRun] = None, run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any, **kwargs: Any,
) -> ChatResult: ) -> ChatResult:
if self.sleep is not None:
time.sleep(self.sleep)
response = self.responses[self.i] response = self.responses[self.i]
if self.i < len(self.responses) - 1: if self.i < len(self.responses) - 1:
self.i += 1 self.i += 1

View File

@ -1,5 +1,6 @@
"""Tests for verifying that testing utility code works as expected.""" """Tests for verifying that testing utility code works as expected."""
import time
from itertools import cycle from itertools import cycle
from typing import Any, Optional, Union from typing import Any, Optional, Union
from uuid import UUID from uuid import UUID
@ -9,10 +10,11 @@ from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import ( from langchain_core.language_models import (
FakeListChatModel, FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel, GenericFakeChatModel,
ParrotFakeChatModel, ParrotFakeChatModel,
) )
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
from tests.unit_tests.stubs import ( from tests.unit_tests.stubs import (
_any_id_ai_message, _any_id_ai_message,
@ -230,3 +232,18 @@ def test_fake_list_chat_model_batch() -> None:
fake = FakeListChatModel(responses=["a", "b", "c"]) fake = FakeListChatModel(responses=["a", "b", "c"])
resp = fake.batch(["1", "2", "3"]) resp = fake.batch(["1", "2", "3"])
assert resp == expected assert resp == expected
def test_fake_messages_list_chat_model_sleep_delay() -> None:
sleep_time = 0.1
model = FakeMessagesListChatModel(
responses=[AIMessage(content="A"), AIMessage(content="B")],
sleep=sleep_time,
)
messages = [HumanMessage(content="C")]
start = time.time()
model.invoke(messages)
elapsed = time.time() - start
assert elapsed >= sleep_time