mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-07-04 11:07:23 +00:00
feat(llm): Ollama LLM-Embeddings decouple + longer keep_alive settings (#1800)
This commit is contained in:
parent
83adc12a8e
commit
b3b0140e24
@ -70,7 +70,7 @@ class EmbeddingComponent:
|
|||||||
ollama_settings = settings.ollama
|
ollama_settings = settings.ollama
|
||||||
self.embedding_model = OllamaEmbedding(
|
self.embedding_model = OllamaEmbedding(
|
||||||
model_name=ollama_settings.embedding_model,
|
model_name=ollama_settings.embedding_model,
|
||||||
base_url=ollama_settings.api_base,
|
base_url=ollama_settings.embedding_api_base,
|
||||||
)
|
)
|
||||||
case "azopenai":
|
case "azopenai":
|
||||||
try:
|
try:
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
import logging
|
import logging
|
||||||
|
from collections.abc import Callable
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from injector import inject, singleton
|
from injector import inject, singleton
|
||||||
from llama_index.core.llms import LLM, MockLLM
|
from llama_index.core.llms import LLM, MockLLM
|
||||||
@ -133,6 +135,24 @@ class LLMComponent:
|
|||||||
additional_kwargs=settings_kwargs,
|
additional_kwargs=settings_kwargs,
|
||||||
request_timeout=ollama_settings.request_timeout,
|
request_timeout=ollama_settings.request_timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
ollama_settings.keep_alive
|
||||||
|
!= ollama_settings.model_fields["keep_alive"].default
|
||||||
|
):
|
||||||
|
# Modify Ollama methods to use the "keep_alive" field.
|
||||||
|
def add_keep_alive(func: Callable[..., Any]) -> Callable[..., Any]:
|
||||||
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||||
|
kwargs["keep_alive"] = ollama_settings.keep_alive
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
Ollama.chat = add_keep_alive(Ollama.chat)
|
||||||
|
Ollama.stream_chat = add_keep_alive(Ollama.stream_chat)
|
||||||
|
Ollama.complete = add_keep_alive(Ollama.complete)
|
||||||
|
Ollama.stream_complete = add_keep_alive(Ollama.stream_complete)
|
||||||
|
|
||||||
case "azopenai":
|
case "azopenai":
|
||||||
try:
|
try:
|
||||||
from llama_index.llms.azure_openai import ( # type: ignore
|
from llama_index.llms.azure_openai import ( # type: ignore
|
||||||
|
@ -209,6 +209,10 @@ class OllamaSettings(BaseModel):
|
|||||||
"http://localhost:11434",
|
"http://localhost:11434",
|
||||||
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
|
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
|
||||||
)
|
)
|
||||||
|
embedding_api_base: str = Field(
|
||||||
|
api_base, # default is same as api_base, unless specified differently
|
||||||
|
description="Base URL of Ollama embedding API. Defaults to the same value as api_base",
|
||||||
|
)
|
||||||
llm_model: str = Field(
|
llm_model: str = Field(
|
||||||
None,
|
None,
|
||||||
description="Model to use. Example: 'llama2-uncensored'.",
|
description="Model to use. Example: 'llama2-uncensored'.",
|
||||||
@ -217,6 +221,10 @@ class OllamaSettings(BaseModel):
|
|||||||
None,
|
None,
|
||||||
description="Model to use. Example: 'nomic-embed-text'.",
|
description="Model to use. Example: 'nomic-embed-text'.",
|
||||||
)
|
)
|
||||||
|
keep_alive: str = Field(
|
||||||
|
"5m",
|
||||||
|
description="Time the model will stay loaded in memory after a request. examples: 5m, 5h, '-1' ",
|
||||||
|
)
|
||||||
tfs_z: float = Field(
|
tfs_z: float = Field(
|
||||||
1.0,
|
1.0,
|
||||||
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",
|
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",
|
||||||
|
@ -14,6 +14,8 @@ ollama:
|
|||||||
llm_model: mistral
|
llm_model: mistral
|
||||||
embedding_model: nomic-embed-text
|
embedding_model: nomic-embed-text
|
||||||
api_base: http://localhost:11434
|
api_base: http://localhost:11434
|
||||||
|
keep_alive: 5m
|
||||||
|
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
|
||||||
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
|
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
|
||||||
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
|
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
|
||||||
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
|
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
|
||||||
|
@ -99,6 +99,8 @@ ollama:
|
|||||||
llm_model: llama2
|
llm_model: llama2
|
||||||
embedding_model: nomic-embed-text
|
embedding_model: nomic-embed-text
|
||||||
api_base: http://localhost:11434
|
api_base: http://localhost:11434
|
||||||
|
keep_alive: 5m
|
||||||
|
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
|
||||||
request_timeout: 120.0
|
request_timeout: 120.0
|
||||||
|
|
||||||
azopenai:
|
azopenai:
|
||||||
|
Loading…
Reference in New Issue
Block a user