mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-05 12:48:12 +00:00
chat_models.openai: Set tenacity timeout to openai's recommendation (#2768)
[OpenAI's cookbook](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_handle_rate_limits.ipynb) suggest a tenacity backoff between 1 and 60 seconds. Currently langchain's backoff is between 4 and 10 seconds, which causes frequent timeout errors on my end. This PR changes the timeout to the suggested values.
This commit is contained in:
parent
705596b46a
commit
1cc7ea333c
@ -32,8 +32,8 @@ logger = logging.getLogger(__file__)
|
||||
def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
|
||||
import openai
|
||||
|
||||
min_seconds = 4
|
||||
max_seconds = 10
|
||||
min_seconds = 1
|
||||
max_seconds = 60
|
||||
# Wait 2^x * 1 second between each retry starting with
|
||||
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
||||
return retry(
|
||||
@ -199,8 +199,8 @@ class ChatOpenAI(BaseChatModel):
|
||||
def _create_retry_decorator(self) -> Callable[[Any], Any]:
|
||||
import openai
|
||||
|
||||
min_seconds = 4
|
||||
max_seconds = 10
|
||||
min_seconds = 1
|
||||
max_seconds = 60
|
||||
# Wait 2^x * 1 second between each retry starting with
|
||||
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
||||
return retry(
|
||||
|
Loading…
Reference in New Issue
Block a user