diff --git a/private_gpt/components/llm/prompt_helper.py b/private_gpt/components/llm/prompt_helper.py index 512b02c2..58754119 100644 --- a/private_gpt/components/llm/prompt_helper.py +++ b/private_gpt/components/llm/prompt_helper.py @@ -286,8 +286,9 @@ class ChatMLPromptStyle(AbstractPromptStyle): def get_prompt_style( - prompt_style: Literal["default", "llama2", "llama3", "tag", "mistral", "chatml"] - | None + prompt_style: ( + Literal["default", "llama2", "llama3", "tag", "mistral", "chatml"] | None + ) ) -> AbstractPromptStyle: """Get the prompt style to use from the given string. diff --git a/private_gpt/server/recipes/summarize/summarize_service.py b/private_gpt/server/recipes/summarize/summarize_service.py index 4bfd18f5..d19657cf 100644 --- a/private_gpt/server/recipes/summarize/summarize_service.py +++ b/private_gpt/server/recipes/summarize/summarize_service.py @@ -90,9 +90,9 @@ class SummarizeService: # Add context documents to summarize if use_context: # 1. Recover all ref docs - ref_docs: dict[ - str, RefDocInfo - ] | None = self.storage_context.docstore.get_all_ref_doc_info() + ref_docs: dict[str, RefDocInfo] | None = ( + self.storage_context.docstore.get_all_ref_doc_info() + ) if ref_docs is None: raise ValueError("No documents have been ingested yet.") diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 4cf192a3..5781a386 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -136,19 +136,19 @@ class LLMSettings(BaseModel): 0.1, description="The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual.", ) - prompt_style: Literal[ - "default", "llama2", "llama3", "tag", "mistral", "chatml" - ] = Field( - "llama2", - description=( - "The prompt style to use for the chat engine. " - "If `default` - use the default prompt style from the llama_index. It should look like `role: message`.\n" - "If `llama2` - use the llama2 prompt style from the llama_index. Based on ``, `[INST]` and `<>`.\n" - "If `llama3` - use the llama3 prompt style from the llama_index." - "If `tag` - use the `tag` prompt style. It should look like `<|role|>: message`. \n" - "If `mistral` - use the `mistral prompt style. It shoudl look like [INST] {System Prompt} [/INST][INST] { UserInstructions } [/INST]" - "`llama2` is the historic behaviour. `default` might work better with your custom models." - ), + prompt_style: Literal["default", "llama2", "llama3", "tag", "mistral", "chatml"] = ( + Field( + "llama2", + description=( + "The prompt style to use for the chat engine. " + "If `default` - use the default prompt style from the llama_index. It should look like `role: message`.\n" + "If `llama2` - use the llama2 prompt style from the llama_index. Based on ``, `[INST]` and `<>`.\n" + "If `llama3` - use the llama3 prompt style from the llama_index." + "If `tag` - use the `tag` prompt style. It should look like `<|role|>: message`. \n" + "If `mistral` - use the `mistral prompt style. It shoudl look like [INST] {System Prompt} [/INST][INST] { UserInstructions } [/INST]" + "`llama2` is the historic behaviour. `default` might work better with your custom models." + ), + ) ) diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index 2c1dcd3e..5b945904 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -1,4 +1,5 @@ """This file should be imported if and only if you want to run the UI locally.""" + import base64 import logging import time