mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-07-02 02:03:01 +00:00
Removed keyword include and exclude functionality per request from Ivan
This commit is contained in:
parent
eab8799174
commit
846414720e
@ -9,7 +9,6 @@ from llama_index.core.indices import VectorStoreIndex
|
|||||||
from llama_index.core.indices.postprocessor import MetadataReplacementPostProcessor
|
from llama_index.core.indices.postprocessor import MetadataReplacementPostProcessor
|
||||||
from llama_index.core.llms import ChatMessage, MessageRole
|
from llama_index.core.llms import ChatMessage, MessageRole
|
||||||
from llama_index.core.postprocessor import (
|
from llama_index.core.postprocessor import (
|
||||||
KeywordNodePostprocessor,
|
|
||||||
SimilarityPostprocessor,
|
SimilarityPostprocessor,
|
||||||
)
|
)
|
||||||
from llama_index.core.storage import StorageContext
|
from llama_index.core.storage import StorageContext
|
||||||
@ -107,38 +106,17 @@ class ChatService:
|
|||||||
vector_index_retriever = self.vector_store_component.get_retriever(
|
vector_index_retriever = self.vector_store_component.get_retriever(
|
||||||
index=self.index, context_filter=context_filter
|
index=self.index, context_filter=context_filter
|
||||||
)
|
)
|
||||||
|
return ContextChatEngine.from_defaults(
|
||||||
# To use the keyword search, you must install spacy:
|
system_prompt=system_prompt,
|
||||||
# pip install spacy
|
retriever=vector_index_retriever,
|
||||||
if settings().llm.keywords_include or settings().llm.keywords_exclude:
|
llm=self.llm_component.llm, # Takes no effect at the moment
|
||||||
return ContextChatEngine.from_defaults(
|
node_postprocessors=[
|
||||||
system_prompt=system_prompt,
|
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
||||||
retriever=vector_index_retriever,
|
SimilarityPostprocessor(
|
||||||
llm=self.llm_component.llm, # Takes no effect at the moment
|
similarity_cutoff=settings().llm.similarity_value
|
||||||
node_postprocessors=[
|
),
|
||||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
],
|
||||||
SimilarityPostprocessor(
|
)
|
||||||
similarity_cutoff=settings().llm.similarity_value
|
|
||||||
),
|
|
||||||
KeywordNodePostprocessor(
|
|
||||||
required_keywords=settings().llm.keywords_include,
|
|
||||||
exclude_keywords=settings().llm.keywords_exclude,
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return ContextChatEngine.from_defaults(
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
retriever=vector_index_retriever,
|
|
||||||
llm=self.llm_component.llm, # Takes no effect at the moment
|
|
||||||
node_postprocessors=[
|
|
||||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
|
||||||
SimilarityPostprocessor(
|
|
||||||
similarity_cutoff=settings().llm.similarity_value
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return SimpleChatEngine.from_defaults(
|
return SimpleChatEngine.from_defaults(
|
||||||
system_prompt=system_prompt,
|
system_prompt=system_prompt,
|
||||||
|
@ -106,14 +106,6 @@ class LLMSettings(BaseModel):
|
|||||||
None,
|
None,
|
||||||
description="If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.",
|
description="If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.",
|
||||||
)
|
)
|
||||||
keywords_include: list[str] = Field(
|
|
||||||
[],
|
|
||||||
description="If set, any documents retrieved from the RAG Must include this keyword",
|
|
||||||
)
|
|
||||||
keywords_exclude: list[str] = Field(
|
|
||||||
[],
|
|
||||||
description="If set, any documents retrieved from the RAG Must exclude this keyword",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VectorstoreSettings(BaseModel):
|
class VectorstoreSettings(BaseModel):
|
||||||
|
@ -41,8 +41,6 @@ llm:
|
|||||||
context_window: 3900
|
context_window: 3900
|
||||||
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
|
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
|
||||||
#similarity_value: 0.4 #If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.
|
#similarity_value: 0.4 #If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.
|
||||||
#keywords_include: ["Apples","Bananas"] #Optional - requires spacy package: pip install spacy
|
|
||||||
#keywords_exclude: ["Pears","Mangos"] #Optional - requires spacy package: pip install spacy
|
|
||||||
|
|
||||||
llamacpp:
|
llamacpp:
|
||||||
prompt_style: "mistral"
|
prompt_style: "mistral"
|
||||||
|
Loading…
Reference in New Issue
Block a user