mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-03 20:16:52 +00:00
core[minor]: Add InMemoryRateLimiter (#21992)
This PR introduces the following Runnables: 1. BaseRateLimiter: an abstraction for specifying a time based rate limiter as a Runnable 2. InMemoryRateLimiter: Provides an in-memory implementation of a rate limiter ## Example ```python from langchain_core.runnables import InMemoryRateLimiter, RunnableLambda from datetime import datetime foo = InMemoryRateLimiter(requests_per_second=0.5) def meow(x): print(datetime.now().strftime("%H:%M:%S.%f")) return x chain = foo | meow for _ in range(10): print(chain.invoke('hello')) ``` Produces: ``` 17:12:07.530151 hello 17:12:09.537932 hello 17:12:11.548375 hello 17:12:13.558383 hello 17:12:15.568348 hello 17:12:17.578171 hello 17:12:19.587508 hello 17:12:21.597877 hello 17:12:23.607707 hello 17:12:25.617978 hello ```  ## Interface The rate limiter uses the following interface for acquiring a token: ```python class BaseRateLimiter(Runnable[Input, Output], abc.ABC): @abc.abstractmethod def acquire(self, *, blocking: bool = True) -> bool: """Attempt to acquire the necessary tokens for the rate limiter.``` ``` The flag `blocking` has been added to the abstraction to allow supporting streaming (which is easier if blocking=False). ## Limitations - The rate limiter is not designed to work across different processes. It is an in-memory rate limiter, but it is thread safe. - The rate limiter only supports time-based rate limiting. It does not take into account the size of the request or any other factors. - The current implementation does not handle streaming inputs well and will consume all inputs even if the rate limit has been reached. Better support for streaming inputs will be added in the future. - When the rate limiter is combined with another runnable via a RunnableSequence, usage of .batch() or .abatch() will only respect the average rate limit. There will be bursty behavior as .batch() and .abatch() wait for each step to complete before starting the next step. One way to mitigate this is to use batch_as_completed() or abatch_as_completed(). ## Bursty behavior in `batch` and `abatch` When the rate limiter is combined with another runnable via a RunnableSequence, usage of .batch() or .abatch() will only respect the average rate limit. There will be bursty behavior as .batch() and .abatch() wait for each step to complete before starting the next step. This becomes a problem if users are using `batch` and `abatch` with many inputs (e.g., 100). In this case, there will be a burst of 100 inputs into the batch of the rate limited runnable. 1. Using a RunnableBinding The API would look like: ```python from langchain_core.runnables import InMemoryRateLimiter, RunnableLambda rate_limiter = InMemoryRateLimiter(requests_per_second=0.5) def meow(x): return x rate_limited_meow = RunnableLambda(meow).with_rate_limiter(rate_limiter) ``` 2. Another option is to add some init option to RunnableSequence that changes `.batch()` to be depth first (e.g., by delegating to `batch_as_completed`) ```python RunnableSequence(first=rate_limiter, last=model, how='batch-depth-first') ``` Pros: Does not require Runnable Binding Cons: Feels over-complicated
This commit is contained in:
@@ -11,6 +11,7 @@ EXPECTED_ALL = [
|
||||
"run_in_executor",
|
||||
"patch_config",
|
||||
"RouterInput",
|
||||
"InMemoryRateLimiter",
|
||||
"RouterRunnable",
|
||||
"Runnable",
|
||||
"RunnableSerializable",
|
||||
|
145
libs/core/tests/unit_tests/runnables/test_rate_limiter.py
Normal file
145
libs/core/tests/unit_tests/runnables/test_rate_limiter.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Test rate limiter."""
|
||||
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from freezegun import freeze_time
|
||||
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
from langchain_core.runnables.rate_limiter import InMemoryRateLimiter
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def rate_limiter() -> InMemoryRateLimiter:
|
||||
"""Return an instance of InMemoryRateLimiter."""
|
||||
return InMemoryRateLimiter(
|
||||
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=2
|
||||
)
|
||||
|
||||
|
||||
def test_initial_state(rate_limiter: InMemoryRateLimiter) -> None:
|
||||
"""Test the initial state of the rate limiter."""
|
||||
assert rate_limiter.available_tokens == 0.0
|
||||
|
||||
|
||||
def test_sync_wait(rate_limiter: InMemoryRateLimiter) -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter.last = time.time()
|
||||
assert not rate_limiter.acquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not rate_limiter.acquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not rate_limiter.acquire(blocking=False)
|
||||
frozen_time.tick(1.8)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1.0
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 0
|
||||
frozen_time.tick(2.1)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
frozen_time.tick(0.9)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
|
||||
# Check max bucket size
|
||||
frozen_time.tick(100)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
|
||||
|
||||
async def test_async_wait(rate_limiter: InMemoryRateLimiter) -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter.last = time.time()
|
||||
assert not await rate_limiter.aacquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not await rate_limiter.aacquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not await rate_limiter.aacquire(blocking=False)
|
||||
frozen_time.tick(1.8)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1.0
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 0
|
||||
frozen_time.tick(2.1)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
frozen_time.tick(0.9)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
|
||||
|
||||
def test_sync_wait_max_bucket_size() -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=500
|
||||
)
|
||||
rate_limiter.last = time.time()
|
||||
frozen_time.tick(100) # Increment by 100 seconds
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
# After 100 seconds we manage to refill the bucket with 200 tokens
|
||||
# After consuming 1 token, we should have 199 tokens left
|
||||
assert rate_limiter.available_tokens == 199.0
|
||||
frozen_time.tick(10000)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 499.0
|
||||
# Assert that sync wait can proceed without blocking
|
||||
# since we have enough tokens
|
||||
rate_limiter.acquire(blocking=True)
|
||||
|
||||
|
||||
async def test_async_wait_max_bucket_size() -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=500
|
||||
)
|
||||
rate_limiter.last = time.time()
|
||||
frozen_time.tick(100) # Increment by 100 seconds
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
# After 100 seconds we manage to refill the bucket with 200 tokens
|
||||
# After consuming 1 token, we should have 199 tokens left
|
||||
assert rate_limiter.available_tokens == 199.0
|
||||
frozen_time.tick(10000)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 499.0
|
||||
# Assert that sync wait can proceed without blocking
|
||||
# since we have enough tokens
|
||||
await rate_limiter.aacquire(blocking=True)
|
||||
|
||||
|
||||
def test_add_rate_limiter() -> None:
|
||||
"""Add rate limiter."""
|
||||
|
||||
def foo(x: int) -> int:
|
||||
"""Return x."""
|
||||
return x
|
||||
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=100, check_every_n_seconds=0.1, max_bucket_size=10
|
||||
)
|
||||
|
||||
foo_ = RunnableLambda(foo)
|
||||
chain = rate_limiter | foo_
|
||||
assert chain.invoke(1) == 1
|
||||
|
||||
|
||||
async def test_async_add_rate_limiter() -> None:
|
||||
"""Add rate limiter."""
|
||||
|
||||
async def foo(x: int) -> int:
|
||||
"""Return x."""
|
||||
return x
|
||||
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=100, check_every_n_seconds=0.1, max_bucket_size=10
|
||||
)
|
||||
|
||||
# mypy is unable to follow the type information when
|
||||
# RunnableLambda is used with an async function
|
||||
foo_ = RunnableLambda(foo) # type: ignore
|
||||
chain = rate_limiter | foo_
|
||||
assert (await chain.ainvoke(1)) == 1
|
Reference in New Issue
Block a user