mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-09-02 07:45:39 +00:00
Updated with semantic node parser and CondensePlusContextChatEngine and new system prompt
This commit is contained in:
777
poetry.lock
generated
777
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
from injector import inject, singleton
|
from injector import inject, singleton
|
||||||
from llama_index.core.chat_engine import ContextChatEngine, SimpleChatEngine
|
from llama_index.core.chat_engine import ContextChatEngine, SimpleChatEngine, CondensePlusContextChatEngine
|
||||||
from llama_index.core.chat_engine.types import (
|
from llama_index.core.chat_engine.types import (
|
||||||
BaseChatEngine,
|
BaseChatEngine,
|
||||||
)
|
)
|
||||||
@@ -126,7 +126,7 @@ class ChatService:
|
|||||||
)
|
)
|
||||||
node_postprocessors.append(rerank_postprocessor)
|
node_postprocessors.append(rerank_postprocessor)
|
||||||
|
|
||||||
return ContextChatEngine.from_defaults(
|
return CondensePlusContextChatEngine.from_defaults(
|
||||||
system_prompt=system_prompt,
|
system_prompt=system_prompt,
|
||||||
retriever=vector_index_retriever,
|
retriever=vector_index_retriever,
|
||||||
llm=self.llm_component.llm, # Takes no effect at the moment
|
llm=self.llm_component.llm, # Takes no effect at the moment
|
||||||
@@ -209,6 +209,7 @@ class ChatService:
|
|||||||
use_context=use_context,
|
use_context=use_context,
|
||||||
context_filter=context_filter,
|
context_filter=context_filter,
|
||||||
)
|
)
|
||||||
|
# chat_engine = chat_engine.as_chat_engine(chat_mode="react", llm=self.llm_component.llm, verbose=True) # configuring ReAct Chat engine
|
||||||
wrapped_response = chat_engine.chat(
|
wrapped_response = chat_engine.chat(
|
||||||
message=last_message if last_message is not None else "",
|
message=last_message if last_message is not None else "",
|
||||||
chat_history=chat_history,
|
chat_history=chat_history,
|
||||||
|
@@ -4,7 +4,7 @@ from pathlib import Path
|
|||||||
from typing import TYPE_CHECKING, AnyStr, BinaryIO
|
from typing import TYPE_CHECKING, AnyStr, BinaryIO
|
||||||
|
|
||||||
from injector import inject, singleton
|
from injector import inject, singleton
|
||||||
from llama_index.core.node_parser import SentenceWindowNodeParser
|
from llama_index.core.node_parser import SentenceWindowNodeParser, SemanticSplitterNodeParser
|
||||||
from llama_index.core.storage import StorageContext
|
from llama_index.core.storage import StorageContext
|
||||||
|
|
||||||
from private_gpt.components.embedding.embedding_component import EmbeddingComponent
|
from private_gpt.components.embedding.embedding_component import EmbeddingComponent
|
||||||
@@ -39,7 +39,9 @@ class IngestService:
|
|||||||
docstore=node_store_component.doc_store,
|
docstore=node_store_component.doc_store,
|
||||||
index_store=node_store_component.index_store,
|
index_store=node_store_component.index_store,
|
||||||
)
|
)
|
||||||
node_parser = SentenceWindowNodeParser.from_defaults()
|
node_parser = SemanticSplitterNodeParser.from_defaults(
|
||||||
|
embed_model=embedding_component.embedding_model,
|
||||||
|
)
|
||||||
|
|
||||||
self.ingest_component = get_ingestion_component(
|
self.ingest_component = get_ingestion_component(
|
||||||
self.storage_context,
|
self.storage_context,
|
||||||
|
Reference in New Issue
Block a user