mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-06-29 16:58:00 +00:00
Fixed erroneous file to the correct file
This commit is contained in:
parent
4ebf8e8814
commit
6bb4d18ade
@ -7,11 +7,9 @@ from llama_index.core.chat_engine.types import (
|
||||
)
|
||||
from llama_index.core.indices import VectorStoreIndex
|
||||
from llama_index.core.indices.postprocessor import MetadataReplacementPostProcessor
|
||||
from llama_index.core.postprocessor import SimilarityPostprocessor
|
||||
from llama_index.core.postprocessor import KeywordNodePostprocessor
|
||||
from llama_index.core.llms import ChatMessage, MessageRole
|
||||
from llama_index.core.postprocessor import (
|
||||
KeywordNodePostprocessor,
|
||||
SimilarityPostprocessor,
|
||||
)
|
||||
from llama_index.core.storage import StorageContext
|
||||
from llama_index.core.types import TokenGen
|
||||
from pydantic import BaseModel
|
||||
@ -26,7 +24,6 @@ from private_gpt.open_ai.extensions.context_filter import ContextFilter
|
||||
from private_gpt.server.chunks.chunks_service import Chunk
|
||||
from private_gpt.settings.settings import settings
|
||||
|
||||
|
||||
class Completion(BaseModel):
|
||||
response: str
|
||||
sources: list[Chunk] | None = None
|
||||
@ -107,45 +104,31 @@ class ChatService:
|
||||
vector_index_retriever = self.vector_store_component.get_retriever(
|
||||
index=self.index, context_filter=context_filter
|
||||
)
|
||||
# Initialize node_postporcessors
|
||||
node_postprocessors_tmp: list[
|
||||
MetadataReplacementPostProcessor
|
||||
| SimilarityPostprocessor
|
||||
| KeywordNodePostprocessor
|
||||
]
|
||||
node_postprocessors_tmp = [
|
||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
||||
]
|
||||
# If similarity value is set, use it. If not, dont add it
|
||||
if settings().llm.similarity_value is not None:
|
||||
node_postprocessors_tmp.append(
|
||||
SimilarityPostprocessor(
|
||||
similarity_cutoff=settings().llm.similarity_value
|
||||
)
|
||||
)
|
||||
|
||||
# If similarity value is set, use it. If not, dont add it
|
||||
# if settings().llm.keywords_include is not empty or
|
||||
# settings().llm.keywords_exclude is not empty
|
||||
#To use the keyword search, you must install spacy:
|
||||
# pip install spacy
|
||||
if settings().llm.keywords_include or settings().llm.keywords_exclude:
|
||||
node_postprocessors_tmp.append(
|
||||
KeywordNodePostprocessor(
|
||||
required_keywords=settings().llm.keywords_include,
|
||||
exclude_keywords=settings().llm.keywords_exclude,
|
||||
)
|
||||
return ContextChatEngine.from_defaults(
|
||||
system_prompt=system_prompt,
|
||||
retriever=vector_index_retriever,
|
||||
llm=self.llm_component.llm, # Takes no effect at the moment
|
||||
node_postprocessors=[
|
||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
||||
SimilarityPostprocessor(similarity_cutoff=settings().llm.similarity_value),
|
||||
KeywordNodePostprocessor(required_keywords=settings().llm.keywords_include, exclude_keywords=settings().llm.keywords_exclude),
|
||||
],
|
||||
)
|
||||
else:
|
||||
return ContextChatEngine.from_defaults(
|
||||
system_prompt=system_prompt,
|
||||
retriever=vector_index_retriever,
|
||||
llm=self.llm_component.llm, # Takes no effect at the moment
|
||||
node_postprocessors=[
|
||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
||||
SimilarityPostprocessor(similarity_cutoff=settings().llm.similarity_value),
|
||||
],
|
||||
)
|
||||
|
||||
return ContextChatEngine.from_defaults(
|
||||
system_prompt=system_prompt,
|
||||
retriever=vector_index_retriever,
|
||||
llm=self.llm_component.llm, # Takes no effect at the moment
|
||||
node_postprocessors=[
|
||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
||||
SimilarityPostprocessor(
|
||||
similarity_cutoff=settings().llm.similarity_value
|
||||
),
|
||||
],
|
||||
)
|
||||
else:
|
||||
return SimpleChatEngine.from_defaults(
|
||||
system_prompt=system_prompt,
|
||||
|
Loading…
Reference in New Issue
Block a user