From 2123b82a8424ae61ae1283607f923622777e9a80 Mon Sep 17 00:00:00 2001 From: rboone Date: Wed, 27 Mar 2024 10:36:37 +0100 Subject: [PATCH] ollama settings: ability to keep LLM in memory for a longer time + ability to run ollama embedding on another instance --- private_gpt/components/embedding/embedding_component.py | 2 +- private_gpt/components/llm/llm_component.py | 1 + private_gpt/settings/settings.py | 9 ++++++++- settings-ollama.yaml | 2 ++ settings.yaml | 2 ++ 5 files changed, 14 insertions(+), 2 deletions(-) diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index 2967c38b..77e8c3d4 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -70,7 +70,7 @@ class EmbeddingComponent: ollama_settings = settings.ollama self.embedding_model = OllamaEmbedding( model_name=ollama_settings.embedding_model, - base_url=ollama_settings.api_base, + base_url=ollama_settings.embedding_api_base, ) case "azopenai": try: diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 4e46c250..40c33c2d 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -132,6 +132,7 @@ class LLMComponent: context_window=settings.llm.context_window, additional_kwargs=settings_kwargs, request_timeout=ollama_settings.request_timeout, + keep_alive = ollama_settings.keep_alive, ) case "azopenai": try: diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 5896f00d..8f036910 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -209,6 +209,10 @@ class OllamaSettings(BaseModel): "http://localhost:11434", description="Base URL of Ollama API. Example: 'https://localhost:11434'.", ) + embedding_api_base: str = Field( + api_base, # default is same as api_base, unless specified differently + description="Base URL of Ollama embedding API. Defaults to the same value as api_base", + ) llm_model: str = Field( None, description="Model to use. Example: 'llama2-uncensored'.", @@ -217,6 +221,10 @@ class OllamaSettings(BaseModel): None, description="Model to use. Example: 'nomic-embed-text'.", ) + keep_alive: str = Field( + "5m", + description="Time the model will stay loaded in memory after a request. examples: 5m, 5h, '-1' ", + ) tfs_z: float = Field( 1.0, description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.", @@ -246,7 +254,6 @@ class OllamaSettings(BaseModel): description="Time elapsed until ollama times out the request. Default is 120s. Format is float. ", ) - class AzureOpenAISettings(BaseModel): api_key: str azure_endpoint: str diff --git a/settings-ollama.yaml b/settings-ollama.yaml index d7e1a12c..4f0be4ff 100644 --- a/settings-ollama.yaml +++ b/settings-ollama.yaml @@ -14,6 +14,8 @@ ollama: llm_model: mistral embedding_model: nomic-embed-text api_base: http://localhost:11434 + keep_alive: 5m + # embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) diff --git a/settings.yaml b/settings.yaml index 87a63ef4..c2207d46 100644 --- a/settings.yaml +++ b/settings.yaml @@ -95,6 +95,8 @@ ollama: llm_model: llama2 embedding_model: nomic-embed-text api_base: http://localhost:11434 + keep_alive: 5m + # embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama request_timeout: 120.0 azopenai: