Add sources to completions APIs and UI (#1206)

This commit is contained in:
Iván Martínez
2023-11-11 21:39:15 +01:00
committed by GitHub
parent dbd99e7b4b
commit a22969ad1f
7 changed files with 159 additions and 70 deletions

View File

@@ -20,6 +20,7 @@ class ChatBody(BaseModel):
messages: list[OpenAIMessage]
use_context: bool = False
context_filter: ContextFilter | None = None
include_sources: bool = True
stream: bool = False
model_config = {
@@ -34,6 +35,7 @@ class ChatBody(BaseModel):
],
"stream": False,
"use_context": True,
"include_sources": True,
"context_filter": {
"docs_ids": ["c202d5e6-7b69-4869-81cc-dd574ee8ee11"]
},
@@ -58,6 +60,9 @@ def chat_completion(body: ChatBody) -> OpenAICompletion | StreamingResponse:
Ingested documents IDs can be found using `/ingest/list` endpoint. If you want
all ingested documents to be used, remove `context_filter` altogether.
When using `'include_sources': true`, the API will return the source Chunks used
to create the response, which come from the context provided.
When using `'stream': true`, the API will return data chunks following [OpenAI's
streaming model](https://platform.openai.com/docs/api-reference/chat/streaming):
```
@@ -71,12 +76,18 @@ def chat_completion(body: ChatBody) -> OpenAICompletion | StreamingResponse:
ChatMessage(content=m.content, role=MessageRole(m.role)) for m in body.messages
]
if body.stream:
stream = service.stream_chat(
completion_gen = service.stream_chat(
all_messages, body.use_context, body.context_filter
)
return StreamingResponse(
to_openai_sse_stream(stream), media_type="text/event-stream"
to_openai_sse_stream(
completion_gen.response,
completion_gen.sources if body.include_sources else None,
),
media_type="text/event-stream",
)
else:
response = service.chat(all_messages, body.use_context, body.context_filter)
return to_openai_response(response)
completion = service.chat(all_messages, body.use_context, body.context_filter)
return to_openai_response(
completion.response, completion.sources if body.include_sources else None
)

View File

@@ -1,13 +1,14 @@
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any
from injector import inject, singleton
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.chat_engine import ContextChatEngine
from llama_index.chat_engine.types import (
BaseChatEngine,
)
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.llm_predictor.utils import stream_chat_response_to_tokens
from llama_index.llms import ChatMessage
from llama_index.types import TokenGen
from pydantic import BaseModel
from private_gpt.components.embedding.embedding_component import EmbeddingComponent
from private_gpt.components.llm.llm_component import LLMComponent
@@ -16,12 +17,17 @@ from private_gpt.components.vector_store.vector_store_component import (
VectorStoreComponent,
)
from private_gpt.open_ai.extensions.context_filter import ContextFilter
from private_gpt.server.chunks.chunks_service import Chunk
if TYPE_CHECKING:
from llama_index.chat_engine.types import (
AgentChatResponse,
StreamingAgentChatResponse,
)
class Completion(BaseModel):
response: str
sources: list[Chunk] | None = None
class CompletionGen(BaseModel):
response: TokenGen
sources: list[Chunk] | None = None
@singleton
@@ -51,66 +57,64 @@ class ChatService:
show_progress=True,
)
def _chat_with_contex(
self,
message: str,
context_filter: ContextFilter | None = None,
chat_history: Sequence[ChatMessage] | None = None,
streaming: bool = False,
) -> Any:
def _chat_engine(
self, context_filter: ContextFilter | None = None
) -> BaseChatEngine:
vector_index_retriever = self.vector_store_component.get_retriever(
index=self.index, context_filter=context_filter
)
chat_engine = ContextChatEngine.from_defaults(
return ContextChatEngine.from_defaults(
retriever=vector_index_retriever,
service_context=self.service_context,
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window"),
],
)
if streaming:
result = chat_engine.stream_chat(message, chat_history)
else:
result = chat_engine.chat(message, chat_history)
return result
def stream_chat(
self,
messages: list[ChatMessage],
use_context: bool = False,
context_filter: ContextFilter | None = None,
) -> TokenGen:
) -> CompletionGen:
if use_context:
last_message = messages[-1].content
response: StreamingAgentChatResponse = self._chat_with_contex(
chat_engine = self._chat_engine(context_filter=context_filter)
streaming_response = chat_engine.stream_chat(
message=last_message if last_message is not None else "",
chat_history=messages[:-1],
context_filter=context_filter,
streaming=True,
)
response_gen = response.response_gen
sources = [
Chunk.from_node(node) for node in streaming_response.source_nodes
]
completion_gen = CompletionGen(
response=streaming_response.response_gen, sources=sources
)
else:
stream = self.llm_service.llm.stream_chat(messages)
response_gen = stream_chat_response_to_tokens(stream)
return response_gen
completion_gen = CompletionGen(
response=stream_chat_response_to_tokens(stream)
)
return completion_gen
def chat(
self,
messages: list[ChatMessage],
use_context: bool = False,
context_filter: ContextFilter | None = None,
) -> str:
) -> Completion:
if use_context:
last_message = messages[-1].content
wrapped_response: AgentChatResponse = self._chat_with_contex(
chat_engine = self._chat_engine(context_filter=context_filter)
wrapped_response = chat_engine.chat(
message=last_message if last_message is not None else "",
chat_history=messages[:-1],
context_filter=context_filter,
streaming=False,
)
response = wrapped_response.response
sources = [Chunk.from_node(node) for node in wrapped_response.source_nodes]
completion = Completion(response=wrapped_response.response, sources=sources)
else:
chat_response = self.llm_service.llm.chat(messages)
response_content = chat_response.message.content
response = response_content if response_content is not None else ""
return response
completion = Completion(response=response)
return completion