Changed response_style: from bool to str; checkbox to dropdown menu

This commit is contained in:
Jason 2024-09-23 14:15:37 -04:00
parent 955f86b396
commit c0f238024f

View File

@ -51,6 +51,17 @@ MODES: list[Modes] = [
] ]
class Styles(str, Enum):
STREAMING = "Streaming"
NON_STREAMING = "Non-Streaming"
STYLES: list[Styles] = [
Styles.STREAMING,
Styles.NON_STREAMING
]
class Source(BaseModel): class Source(BaseModel):
file: str file: str
page: str page: str
@ -98,12 +109,13 @@ class PrivateGptUi:
self._selected_filename = None self._selected_filename = None
self._response_style = True
# Initialize system prompt based on default mode # Initialize system prompt based on default mode
self.mode = MODES[0] self.mode = MODES[0]
self._system_prompt = self._get_default_system_prompt(self.mode) self._system_prompt = self._get_default_system_prompt(self.mode)
# Initialize default response style: Streaming
self.response_style = STYLES[0]
def _chat( def _chat(
self, message: str, history: list[list[str]], mode: Modes, *_: Any self, message: str, history: list[list[str]], mode: Modes, *_: Any
) -> Any: ) -> Any:
@ -184,24 +196,26 @@ class PrivateGptUi:
docs_ids.append(ingested_document.doc_id) docs_ids.append(ingested_document.doc_id)
context_filter = ContextFilter(docs_ids=docs_ids) context_filter = ContextFilter(docs_ids=docs_ids)
if self._response_style: match self.response_style:
case Styles.STREAMING:
query_stream = self._chat_service.stream_chat( query_stream = self._chat_service.stream_chat(
all_messages, use_context=False all_messages, use_context=False
) )
yield from yield_deltas(query_stream) yield from yield_deltas(query_stream)
else: case Styles.NON_STREAMING:
query_response = self._chat_service.chat( query_response = self._chat_service.chat(
all_messages, use_context=False all_messages, use_context=False
).response ).response
yield from [query_response] yield from [query_response]
case Modes.BASIC_CHAT_MODE: case Modes.BASIC_CHAT_MODE:
if self._response_style: match self.response_style:
case Styles.STREAMING:
llm_stream = self._chat_service.stream_chat( llm_stream = self._chat_service.stream_chat(
all_messages, use_context=False all_messages, use_context=False
) )
yield from yield_deltas(llm_stream) yield from yield_deltas(llm_stream)
else: case Styles.NON_STREAMING:
llm_response = self._chat_service.chat( llm_response = self._chat_service.chat(
all_messages, use_context=False all_messages, use_context=False
).response ).response
@ -233,14 +247,15 @@ class PrivateGptUi:
docs_ids.append(ingested_document.doc_id) docs_ids.append(ingested_document.doc_id)
context_filter = ContextFilter(docs_ids=docs_ids) context_filter = ContextFilter(docs_ids=docs_ids)
if self._response_style: match self.response_style:
case Styles.STREAMING:
summary_stream = self._summarize_service.stream_summarize( summary_stream = self._summarize_service.stream_summarize(
use_context=True, use_context=True,
context_filter=context_filter, context_filter=context_filter,
instructions=message, instructions=message,
) )
yield from yield_tokens(summary_stream) yield from yield_tokens(summary_stream)
else: case Styles.NON_STREAMING:
summary_response = self._summarize_service.summarize( summary_response = self._summarize_service.summarize(
use_context=True, use_context=True,
context_filter=context_filter, context_filter=context_filter,
@ -299,8 +314,8 @@ class PrivateGptUi:
gr.update(value=self._explanation_mode), gr.update(value=self._explanation_mode),
] ]
def _set_response_style(self, response_style: bool) -> None: def _set_current_response_style(self, response_style: Styles) -> Any:
self._response_style = response_style self.response_style = response_style
def _list_ingested_files(self) -> list[list[str]]: def _list_ingested_files(self) -> list[list[str]]:
files = set() files = set()
@ -425,11 +440,14 @@ class PrivateGptUi:
max_lines=3, max_lines=3,
interactive=False, interactive=False,
) )
response_style = gr.Checkbox( default_response_style = STYLES[0]
label="Response Style: Streaming", value=self._response_style response_style = (
) gr.Dropdown(
response_style.input( [response_style.value for response_style in STYLES],
self._set_response_style, inputs=response_style label="Response Style",
value=default_response_style,
interactive=True,
),
) )
upload_button = gr.components.UploadButton( upload_button = gr.components.UploadButton(
"Upload File(s)", "Upload File(s)",
@ -524,6 +542,10 @@ class PrivateGptUi:
self._set_system_prompt, self._set_system_prompt,
inputs=system_prompt_input, inputs=system_prompt_input,
) )
# When response style changes
response_style[0].change(
self._set_current_response_style, inputs=response_style
)
def get_model_label() -> str | None: def get_model_label() -> str | None:
"""Get model label from llm mode setting YAML. """Get model label from llm mode setting YAML.