Updated with semantic node parser and CondensePlusContextChatEngine and new system prompt

This commit is contained in:
Saurab-Shrestha9639*969**9858//852
2024-06-02 14:49:49 +05:45
parent 175b4e29ac
commit ebe43082cd
3 changed files with 485 additions and 303 deletions

777
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
from dataclasses import dataclass
from injector import inject, singleton
from llama_index.core.chat_engine import ContextChatEngine, SimpleChatEngine
from llama_index.core.chat_engine import ContextChatEngine, SimpleChatEngine, CondensePlusContextChatEngine
from llama_index.core.chat_engine.types import (
BaseChatEngine,
)
@@ -126,7 +126,7 @@ class ChatService:
)
node_postprocessors.append(rerank_postprocessor)
return ContextChatEngine.from_defaults(
return CondensePlusContextChatEngine.from_defaults(
system_prompt=system_prompt,
retriever=vector_index_retriever,
llm=self.llm_component.llm, # Takes no effect at the moment
@@ -209,6 +209,7 @@ class ChatService:
use_context=use_context,
context_filter=context_filter,
)
# chat_engine = chat_engine.as_chat_engine(chat_mode="react", llm=self.llm_component.llm, verbose=True) # configuring ReAct Chat engine
wrapped_response = chat_engine.chat(
message=last_message if last_message is not None else "",
chat_history=chat_history,

View File

@@ -4,7 +4,7 @@ from pathlib import Path
from typing import TYPE_CHECKING, AnyStr, BinaryIO
from injector import inject, singleton
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.core.node_parser import SentenceWindowNodeParser, SemanticSplitterNodeParser
from llama_index.core.storage import StorageContext
from private_gpt.components.embedding.embedding_component import EmbeddingComponent
@@ -39,7 +39,9 @@ class IngestService:
docstore=node_store_component.doc_store,
index_store=node_store_component.index_store,
)
node_parser = SentenceWindowNodeParser.from_defaults()
node_parser = SemanticSplitterNodeParser.from_defaults(
embed_model=embedding_component.embedding_model,
)
self.ingest_component = get_ingestion_component(
self.storage_context,