diff --git a/private_gpt/server/chat/chat_service.py b/private_gpt/server/chat/chat_service.py index 16e464dc..6ae771d4 100644 --- a/private_gpt/server/chat/chat_service.py +++ b/private_gpt/server/chat/chat_service.py @@ -9,7 +9,6 @@ from llama_index.core.indices import VectorStoreIndex from llama_index.core.indices.postprocessor import MetadataReplacementPostProcessor from llama_index.core.llms import ChatMessage, MessageRole from llama_index.core.postprocessor import ( - KeywordNodePostprocessor, SimilarityPostprocessor, ) from llama_index.core.storage import StorageContext @@ -107,38 +106,17 @@ class ChatService: vector_index_retriever = self.vector_store_component.get_retriever( index=self.index, context_filter=context_filter ) - - # To use the keyword search, you must install spacy: - # pip install spacy - if settings().llm.keywords_include or settings().llm.keywords_exclude: - return ContextChatEngine.from_defaults( - system_prompt=system_prompt, - retriever=vector_index_retriever, - llm=self.llm_component.llm, # Takes no effect at the moment - node_postprocessors=[ - MetadataReplacementPostProcessor(target_metadata_key="window"), - SimilarityPostprocessor( - similarity_cutoff=settings().llm.similarity_value - ), - KeywordNodePostprocessor( - required_keywords=settings().llm.keywords_include, - exclude_keywords=settings().llm.keywords_exclude, - ), - ], - ) - else: - return ContextChatEngine.from_defaults( - system_prompt=system_prompt, - retriever=vector_index_retriever, - llm=self.llm_component.llm, # Takes no effect at the moment - node_postprocessors=[ - MetadataReplacementPostProcessor(target_metadata_key="window"), - SimilarityPostprocessor( - similarity_cutoff=settings().llm.similarity_value - ), - ], - ) - + return ContextChatEngine.from_defaults( + system_prompt=system_prompt, + retriever=vector_index_retriever, + llm=self.llm_component.llm, # Takes no effect at the moment + node_postprocessors=[ + MetadataReplacementPostProcessor(target_metadata_key="window"), + SimilarityPostprocessor( + similarity_cutoff=settings().llm.similarity_value + ), + ], + ) else: return SimpleChatEngine.from_defaults( system_prompt=system_prompt, diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index b345c738..6f8d2809 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -106,14 +106,6 @@ class LLMSettings(BaseModel): None, description="If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.", ) - keywords_include: list[str] = Field( - [], - description="If set, any documents retrieved from the RAG Must include this keyword", - ) - keywords_exclude: list[str] = Field( - [], - description="If set, any documents retrieved from the RAG Must exclude this keyword", - ) class VectorstoreSettings(BaseModel): diff --git a/settings.yaml b/settings.yaml index 6ee5d35c..8eb5c839 100644 --- a/settings.yaml +++ b/settings.yaml @@ -41,8 +41,6 @@ llm: context_window: 3900 temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1) #similarity_value: 0.4 #If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1. - #keywords_include: ["Apples","Bananas"] #Optional - requires spacy package: pip install spacy - #keywords_exclude: ["Pears","Mangos"] #Optional - requires spacy package: pip install spacy llamacpp: prompt_style: "mistral"