mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-06-03 12:39:37 +00:00
Changed response_style: from bool to str; checkbox to dropdown menu
This commit is contained in:
parent
955f86b396
commit
c0f238024f
@ -51,6 +51,17 @@ MODES: list[Modes] = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class Styles(str, Enum):
|
||||||
|
STREAMING = "Streaming"
|
||||||
|
NON_STREAMING = "Non-Streaming"
|
||||||
|
|
||||||
|
|
||||||
|
STYLES: list[Styles] = [
|
||||||
|
Styles.STREAMING,
|
||||||
|
Styles.NON_STREAMING
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class Source(BaseModel):
|
class Source(BaseModel):
|
||||||
file: str
|
file: str
|
||||||
page: str
|
page: str
|
||||||
@ -98,12 +109,13 @@ class PrivateGptUi:
|
|||||||
|
|
||||||
self._selected_filename = None
|
self._selected_filename = None
|
||||||
|
|
||||||
self._response_style = True
|
|
||||||
|
|
||||||
# Initialize system prompt based on default mode
|
# Initialize system prompt based on default mode
|
||||||
self.mode = MODES[0]
|
self.mode = MODES[0]
|
||||||
self._system_prompt = self._get_default_system_prompt(self.mode)
|
self._system_prompt = self._get_default_system_prompt(self.mode)
|
||||||
|
|
||||||
|
# Initialize default response style: Streaming
|
||||||
|
self.response_style = STYLES[0]
|
||||||
|
|
||||||
def _chat(
|
def _chat(
|
||||||
self, message: str, history: list[list[str]], mode: Modes, *_: Any
|
self, message: str, history: list[list[str]], mode: Modes, *_: Any
|
||||||
) -> Any:
|
) -> Any:
|
||||||
@ -184,28 +196,30 @@ class PrivateGptUi:
|
|||||||
docs_ids.append(ingested_document.doc_id)
|
docs_ids.append(ingested_document.doc_id)
|
||||||
context_filter = ContextFilter(docs_ids=docs_ids)
|
context_filter = ContextFilter(docs_ids=docs_ids)
|
||||||
|
|
||||||
if self._response_style:
|
match self.response_style:
|
||||||
query_stream = self._chat_service.stream_chat(
|
case Styles.STREAMING:
|
||||||
all_messages, use_context=False
|
query_stream = self._chat_service.stream_chat(
|
||||||
)
|
all_messages, use_context=False
|
||||||
yield from yield_deltas(query_stream)
|
)
|
||||||
else:
|
yield from yield_deltas(query_stream)
|
||||||
query_response = self._chat_service.chat(
|
case Styles.NON_STREAMING:
|
||||||
all_messages, use_context=False
|
query_response = self._chat_service.chat(
|
||||||
).response
|
all_messages, use_context=False
|
||||||
yield from [query_response]
|
).response
|
||||||
|
yield from [query_response]
|
||||||
|
|
||||||
case Modes.BASIC_CHAT_MODE:
|
case Modes.BASIC_CHAT_MODE:
|
||||||
if self._response_style:
|
match self.response_style:
|
||||||
llm_stream = self._chat_service.stream_chat(
|
case Styles.STREAMING:
|
||||||
all_messages, use_context=False
|
llm_stream = self._chat_service.stream_chat(
|
||||||
)
|
all_messages, use_context=False
|
||||||
yield from yield_deltas(llm_stream)
|
)
|
||||||
else:
|
yield from yield_deltas(llm_stream)
|
||||||
llm_response = self._chat_service.chat(
|
case Styles.NON_STREAMING:
|
||||||
all_messages, use_context=False
|
llm_response = self._chat_service.chat(
|
||||||
).response
|
all_messages, use_context=False
|
||||||
yield from [llm_response]
|
).response
|
||||||
|
yield from [llm_response]
|
||||||
|
|
||||||
case Modes.SEARCH_MODE:
|
case Modes.SEARCH_MODE:
|
||||||
response = self._chunks_service.retrieve_relevant(
|
response = self._chunks_service.retrieve_relevant(
|
||||||
@ -233,20 +247,21 @@ class PrivateGptUi:
|
|||||||
docs_ids.append(ingested_document.doc_id)
|
docs_ids.append(ingested_document.doc_id)
|
||||||
context_filter = ContextFilter(docs_ids=docs_ids)
|
context_filter = ContextFilter(docs_ids=docs_ids)
|
||||||
|
|
||||||
if self._response_style:
|
match self.response_style:
|
||||||
summary_stream = self._summarize_service.stream_summarize(
|
case Styles.STREAMING:
|
||||||
use_context=True,
|
summary_stream = self._summarize_service.stream_summarize(
|
||||||
context_filter=context_filter,
|
use_context=True,
|
||||||
instructions=message,
|
context_filter=context_filter,
|
||||||
)
|
instructions=message,
|
||||||
yield from yield_tokens(summary_stream)
|
)
|
||||||
else:
|
yield from yield_tokens(summary_stream)
|
||||||
summary_response = self._summarize_service.summarize(
|
case Styles.NON_STREAMING:
|
||||||
use_context=True,
|
summary_response = self._summarize_service.summarize(
|
||||||
context_filter=context_filter,
|
use_context=True,
|
||||||
instructions=message,
|
context_filter=context_filter,
|
||||||
)
|
instructions=message,
|
||||||
yield from summary_response
|
)
|
||||||
|
yield from summary_response
|
||||||
|
|
||||||
# On initialization and on mode change, this function set the system prompt
|
# On initialization and on mode change, this function set the system prompt
|
||||||
# to the default prompt based on the mode (and user settings).
|
# to the default prompt based on the mode (and user settings).
|
||||||
@ -299,8 +314,8 @@ class PrivateGptUi:
|
|||||||
gr.update(value=self._explanation_mode),
|
gr.update(value=self._explanation_mode),
|
||||||
]
|
]
|
||||||
|
|
||||||
def _set_response_style(self, response_style: bool) -> None:
|
def _set_current_response_style(self, response_style: Styles) -> Any:
|
||||||
self._response_style = response_style
|
self.response_style = response_style
|
||||||
|
|
||||||
def _list_ingested_files(self) -> list[list[str]]:
|
def _list_ingested_files(self) -> list[list[str]]:
|
||||||
files = set()
|
files = set()
|
||||||
@ -425,11 +440,14 @@ class PrivateGptUi:
|
|||||||
max_lines=3,
|
max_lines=3,
|
||||||
interactive=False,
|
interactive=False,
|
||||||
)
|
)
|
||||||
response_style = gr.Checkbox(
|
default_response_style = STYLES[0]
|
||||||
label="Response Style: Streaming", value=self._response_style
|
response_style = (
|
||||||
)
|
gr.Dropdown(
|
||||||
response_style.input(
|
[response_style.value for response_style in STYLES],
|
||||||
self._set_response_style, inputs=response_style
|
label="Response Style",
|
||||||
|
value=default_response_style,
|
||||||
|
interactive=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
upload_button = gr.components.UploadButton(
|
upload_button = gr.components.UploadButton(
|
||||||
"Upload File(s)",
|
"Upload File(s)",
|
||||||
@ -524,6 +542,10 @@ class PrivateGptUi:
|
|||||||
self._set_system_prompt,
|
self._set_system_prompt,
|
||||||
inputs=system_prompt_input,
|
inputs=system_prompt_input,
|
||||||
)
|
)
|
||||||
|
# When response style changes
|
||||||
|
response_style[0].change(
|
||||||
|
self._set_current_response_style, inputs=response_style
|
||||||
|
)
|
||||||
|
|
||||||
def get_model_label() -> str | None:
|
def get_model_label() -> str | None:
|
||||||
"""Get model label from llm mode setting YAML.
|
"""Get model label from llm mode setting YAML.
|
||||||
|
Loading…
Reference in New Issue
Block a user