Removed keyword include and exclude functionality per request from Ivan

This commit is contained in:
Wesley Stewart 2024-03-12 20:07:07 +00:00
parent eab8799174
commit 846414720e
3 changed files with 11 additions and 43 deletions

View File

@ -9,7 +9,6 @@ from llama_index.core.indices import VectorStoreIndex
from llama_index.core.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core.postprocessor import (
KeywordNodePostprocessor,
SimilarityPostprocessor,
)
from llama_index.core.storage import StorageContext
@ -107,26 +106,6 @@ class ChatService:
vector_index_retriever = self.vector_store_component.get_retriever(
index=self.index, context_filter=context_filter
)
# To use the keyword search, you must install spacy:
# pip install spacy
if settings().llm.keywords_include or settings().llm.keywords_exclude:
return ContextChatEngine.from_defaults(
system_prompt=system_prompt,
retriever=vector_index_retriever,
llm=self.llm_component.llm, # Takes no effect at the moment
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window"),
SimilarityPostprocessor(
similarity_cutoff=settings().llm.similarity_value
),
KeywordNodePostprocessor(
required_keywords=settings().llm.keywords_include,
exclude_keywords=settings().llm.keywords_exclude,
),
],
)
else:
return ContextChatEngine.from_defaults(
system_prompt=system_prompt,
retriever=vector_index_retriever,
@ -138,7 +117,6 @@ class ChatService:
),
],
)
else:
return SimpleChatEngine.from_defaults(
system_prompt=system_prompt,

View File

@ -106,14 +106,6 @@ class LLMSettings(BaseModel):
None,
description="If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.",
)
keywords_include: list[str] = Field(
[],
description="If set, any documents retrieved from the RAG Must include this keyword",
)
keywords_exclude: list[str] = Field(
[],
description="If set, any documents retrieved from the RAG Must exclude this keyword",
)
class VectorstoreSettings(BaseModel):

View File

@ -41,8 +41,6 @@ llm:
context_window: 3900
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
#similarity_value: 0.4 #If set, any documents retrieved from the RAG must meet a certain score. Acceptable values are between 0 and 1.
#keywords_include: ["Apples","Bananas"] #Optional - requires spacy package: pip install spacy
#keywords_exclude: ["Pears","Mangos"] #Optional - requires spacy package: pip install spacy
llamacpp:
prompt_style: "mistral"