From f3aa988e5c7fbab8e1d30c29b294adb60a807a88 Mon Sep 17 00:00:00 2001 From: Stephen Gresham Date: Fri, 15 Mar 2024 15:57:13 +1100 Subject: [PATCH] added request_timeout to ollama, default set to 30.0 in settings.yaml and settings-ollama.yaml --- private_gpt/components/llm/llm_component.py | 1 + settings-ollama.yaml | 1 + settings.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index d4e13a58..a61d285d 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -131,6 +131,7 @@ class LLMComponent: temperature=settings.llm.temperature, context_window=settings.llm.context_window, additional_kwargs=settings_kwargs, + request_timeout=ollama_settings.request_timeout ) case "mock": self.llm = MockLLM() diff --git a/settings-ollama.yaml b/settings-ollama.yaml index 9a0aaed0..19a84070 100644 --- a/settings-ollama.yaml +++ b/settings-ollama.yaml @@ -19,6 +19,7 @@ ollama: top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) repeat_last_n: 64 # Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) repeat_penalty: 1.2 # Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + request_timeout: 30.0 # Time elapsed until ollama times out the request. Default is 30s. Format is float. vectorstore: database: qdrant diff --git a/settings.yaml b/settings.yaml index a9a676bd..7ada0159 100644 --- a/settings.yaml +++ b/settings.yaml @@ -86,3 +86,4 @@ ollama: llm_model: llama2 embedding_model: nomic-embed-text api_base: http://localhost:11434 + request_timeout: 30.0