mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-07-13 15:14:09 +00:00
Changed similarity value to work with RAG Settings instead of LLM Settings
This commit is contained in:
parent
89a8e2795e
commit
b78a68f40f
@ -118,7 +118,7 @@ class ChatService:
|
|||||||
node_postprocessors=[
|
node_postprocessors=[
|
||||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
||||||
SimilarityPostprocessor(
|
SimilarityPostprocessor(
|
||||||
similarity_cutoff=settings.llm.similarity_value
|
similarity_cutoff=settings.rag.similarity_value
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -107,6 +107,12 @@ class LLMSettings(BaseModel):
|
|||||||
description="If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.",
|
description="If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
class RagSettings(BaseModel):
|
||||||
|
similarity_value: float = Field(
|
||||||
|
None,
|
||||||
|
description="If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class VectorstoreSettings(BaseModel):
|
class VectorstoreSettings(BaseModel):
|
||||||
database: Literal["chroma", "qdrant", "pgvector"]
|
database: Literal["chroma", "qdrant", "pgvector"]
|
||||||
@ -346,6 +352,7 @@ class Settings(BaseModel):
|
|||||||
data: DataSettings
|
data: DataSettings
|
||||||
ui: UISettings
|
ui: UISettings
|
||||||
llm: LLMSettings
|
llm: LLMSettings
|
||||||
|
rag: RagSettings
|
||||||
embedding: EmbeddingSettings
|
embedding: EmbeddingSettings
|
||||||
llamacpp: LlamaCPPSettings
|
llamacpp: LlamaCPPSettings
|
||||||
huggingface: HuggingFaceSettings
|
huggingface: HuggingFaceSettings
|
||||||
|
@ -40,7 +40,9 @@ llm:
|
|||||||
max_new_tokens: 512
|
max_new_tokens: 512
|
||||||
context_window: 3900
|
context_window: 3900
|
||||||
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
|
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
|
||||||
#similarity_value: 0.4 #If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.
|
|
||||||
|
rag:
|
||||||
|
#similarity_value: 0.4 #Optional - If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.
|
||||||
|
|
||||||
llamacpp:
|
llamacpp:
|
||||||
prompt_style: "mistral"
|
prompt_style: "mistral"
|
||||||
|
Loading…
Reference in New Issue
Block a user