mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-08-02 08:06:08 +00:00
Update poetry lock, and fix run for template prompt format
This commit is contained in:
parent
5bc5054000
commit
af1463637b
1678
poetry.lock
generated
1678
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -178,8 +178,10 @@ class AbstractPromptStyle(abc.ABC):
|
||||
class AbstractPromptStyleWithSystemPrompt(AbstractPromptStyle, abc.ABC):
|
||||
_DEFAULT_SYSTEM_PROMPT = DEFAULT_SYSTEM_PROMPT
|
||||
|
||||
def __init__(self, default_system_prompt: str | None) -> None:
|
||||
super().__init__()
|
||||
def __init__(
|
||||
self, default_system_prompt: str | None, *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
logger.debug("Got default_system_prompt='%s'", default_system_prompt)
|
||||
self.default_system_prompt = default_system_prompt
|
||||
|
||||
@ -235,9 +237,13 @@ class LlamaIndexPromptStyle(AbstractPromptStyleWithSystemPrompt):
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, default_system_prompt: str | None = None) -> None:
|
||||
def __init__(
|
||||
self, default_system_prompt: str | None = None, *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
# If no system prompt is given, the default one of the implementation is used.
|
||||
super().__init__(default_system_prompt=default_system_prompt)
|
||||
# default_system_prompt can be None here
|
||||
kwargs["default_system_prompt"] = default_system_prompt
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str:
|
||||
return messages_to_prompt(messages, self.default_system_prompt)
|
||||
@ -264,12 +270,14 @@ class VigognePromptStyle(AbstractPromptStyleWithSystemPrompt):
|
||||
self,
|
||||
default_system_prompt: str | None = None,
|
||||
add_generation_prompt: bool = True,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
# We have to define a default system prompt here as the LLM will not
|
||||
# use the default llama_utils functions.
|
||||
default_system_prompt = default_system_prompt or self._DEFAULT_SYSTEM_PROMPT
|
||||
super().__init__(default_system_prompt)
|
||||
self.system_prompt: str = default_system_prompt
|
||||
kwargs["default_system_prompt"] = default_system_prompt
|
||||
super().__init__(*args, **kwargs)
|
||||
self.add_generation_prompt = add_generation_prompt
|
||||
|
||||
def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str:
|
||||
@ -300,7 +308,11 @@ class VigognePromptStyle(AbstractPromptStyleWithSystemPrompt):
|
||||
|
||||
class LlamaCppPromptStyle(AbstractPromptStyleWithSystemPrompt):
|
||||
def __init__(
|
||||
self, prompt_style: str, default_system_prompt: str | None = None
|
||||
self,
|
||||
prompt_style: str,
|
||||
default_system_prompt: str | None = None,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Wrapper for llama_cpp_python defined prompt format.
|
||||
|
||||
@ -309,7 +321,8 @@ class LlamaCppPromptStyle(AbstractPromptStyleWithSystemPrompt):
|
||||
"""
|
||||
assert prompt_style.startswith("llama_cpp.")
|
||||
default_system_prompt = default_system_prompt or self._DEFAULT_SYSTEM_PROMPT
|
||||
super().__init__(default_system_prompt)
|
||||
kwargs["default_system_prompt"] = default_system_prompt
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.prompt_style = prompt_style[len("llama_cpp.") :]
|
||||
if self.prompt_style is None:
|
||||
@ -339,6 +352,8 @@ class TemplatePromptStyle(AbstractPromptStyleWithSystemPrompt):
|
||||
template_dir: str | None = None,
|
||||
add_generation_prompt: bool = True,
|
||||
default_system_prompt: str | None = None,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Prompt format using a Jinja template.
|
||||
|
||||
@ -350,7 +365,8 @@ class TemplatePromptStyle(AbstractPromptStyleWithSystemPrompt):
|
||||
given in the messages.
|
||||
"""
|
||||
default_system_prompt = default_system_prompt or DEFAULT_SYSTEM_PROMPT
|
||||
super().__init__(default_system_prompt)
|
||||
kwargs["default_system_prompt"] = default_system_prompt
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self._add_generation_prompt = add_generation_prompt
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
{# This template is coming from: https://huggingface.co/bofenghuang/vigogne-2-7b-chat/blob/main/tokenizer_config.json #}
|
||||
{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif true == true %}{% set loop_messages = messages %}{% set system_message = 'Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|system|>: ' + system_message + '\\n' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>: ' + message['content'].strip() + '\\n' }}{% elif message['role'] == 'assistant' %}{{ '<|assistant|>: ' + message['content'].strip() + eos_token + '\\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>:' }}{% endif %}
|
||||
{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif true == true %}{% set loop_messages = messages %}{% set system_message = 'Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|system|>: ' + system_message + '\n' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>: ' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{ '<|assistant|>: ' + message['content'].strip() + eos_token + '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>:' }}{% endif %}
|
@ -36,12 +36,13 @@ gradio = "^4.7.1"
|
||||
[tool.poetry.group.local]
|
||||
optional = true
|
||||
[tool.poetry.group.local.dependencies]
|
||||
llama-cpp-python = "^0.2.11"
|
||||
numpy = "1.26.0"
|
||||
llama-cpp-python = "^0.2.20"
|
||||
jinja2 = "^3.1.2"
|
||||
# numpy = "1.26.0"
|
||||
sentence-transformers = "^2.2.2"
|
||||
# https://stackoverflow.com/questions/76327419/valueerror-libcublas-so-0-9-not-found-in-the-system-path
|
||||
torch = ">=2.0.0, !=2.0.1, !=2.1.0"
|
||||
transformers = "^4.34.0"
|
||||
transformers = "^4.35.2"
|
||||
|
||||
[tool.poetry.extras]
|
||||
chroma = ["chromadb"]
|
||||
|
Loading…
Reference in New Issue
Block a user