feat(llm): Ollama LLM-Embeddings decouple + longer keep_alive settings (#1800)

This commit is contained in:
Robin Boone
2024-04-02 16:23:10 +02:00
committed by GitHub
parent 83adc12a8e
commit b3b0140e24
5 changed files with 33 additions and 1 deletions

View File

@@ -209,6 +209,10 @@ class OllamaSettings(BaseModel):
"http://localhost:11434",
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
)
embedding_api_base: str = Field(
api_base, # default is same as api_base, unless specified differently
description="Base URL of Ollama embedding API. Defaults to the same value as api_base",
)
llm_model: str = Field(
None,
description="Model to use. Example: 'llama2-uncensored'.",
@@ -217,6 +221,10 @@ class OllamaSettings(BaseModel):
None,
description="Model to use. Example: 'nomic-embed-text'.",
)
keep_alive: str = Field(
"5m",
description="Time the model will stay loaded in memory after a request. examples: 5m, 5h, '-1' ",
)
tfs_z: float = Field(
1.0,
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",