ollama settings: ability to keep LLM in memory for a longer time + ability to run ollama embedding on another instance

This commit is contained in:
rboone 2024-03-27 10:36:37 +01:00
parent 087cb0b7b7
commit 2123b82a84
5 changed files with 14 additions and 2 deletions

View File

@ -70,7 +70,7 @@ class EmbeddingComponent:
ollama_settings = settings.ollama
self.embedding_model = OllamaEmbedding(
model_name=ollama_settings.embedding_model,
base_url=ollama_settings.api_base,
base_url=ollama_settings.embedding_api_base,
)
case "azopenai":
try:

View File

@ -132,6 +132,7 @@ class LLMComponent:
context_window=settings.llm.context_window,
additional_kwargs=settings_kwargs,
request_timeout=ollama_settings.request_timeout,
keep_alive = ollama_settings.keep_alive,
)
case "azopenai":
try:

View File

@ -209,6 +209,10 @@ class OllamaSettings(BaseModel):
"http://localhost:11434",
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
)
embedding_api_base: str = Field(
api_base, # default is same as api_base, unless specified differently
description="Base URL of Ollama embedding API. Defaults to the same value as api_base",
)
llm_model: str = Field(
None,
description="Model to use. Example: 'llama2-uncensored'.",
@ -217,6 +221,10 @@ class OllamaSettings(BaseModel):
None,
description="Model to use. Example: 'nomic-embed-text'.",
)
keep_alive: str = Field(
"5m",
description="Time the model will stay loaded in memory after a request. examples: 5m, 5h, '-1' ",
)
tfs_z: float = Field(
1.0,
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",
@ -246,7 +254,6 @@ class OllamaSettings(BaseModel):
description="Time elapsed until ollama times out the request. Default is 120s. Format is float. ",
)
class AzureOpenAISettings(BaseModel):
api_key: str
azure_endpoint: str

View File

@ -14,6 +14,8 @@ ollama:
llm_model: mistral
embedding_model: nomic-embed-text
api_base: http://localhost:11434
keep_alive: 5m
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)

View File

@ -95,6 +95,8 @@ ollama:
llm_model: llama2
embedding_model: nomic-embed-text
api_base: http://localhost:11434
keep_alive: 5m
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
request_timeout: 120.0
azopenai: