From 1cc7ea333c0a9899241eb9e3247d5e333eb8948d Mon Sep 17 00:00:00 2001 From: rafael Date: Fri, 14 Apr 2023 07:08:46 +0200 Subject: [PATCH] chat_models.openai: Set tenacity timeout to openai's recommendation (#2768) [OpenAI's cookbook](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_handle_rate_limits.ipynb) suggest a tenacity backoff between 1 and 60 seconds. Currently langchain's backoff is between 4 and 10 seconds, which causes frequent timeout errors on my end. This PR changes the timeout to the suggested values. --- langchain/chat_models/openai.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index 69e3f9fd2a6..644ed261dcb 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -32,8 +32,8 @@ logger = logging.getLogger(__file__) def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]: import openai - min_seconds = 4 - max_seconds = 10 + min_seconds = 1 + max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( @@ -199,8 +199,8 @@ class ChatOpenAI(BaseChatModel): def _create_retry_decorator(self) -> Callable[[Any], Any]: import openai - min_seconds = 4 - max_seconds = 10 + min_seconds = 1 + max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry(