mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-04-27 19:28:38 +00:00
feat: make llama3.1 as default (#2022)
* feat: change ollama default model to llama3.1 * chore: bump versions * feat: Change default model in local mode to llama3.1 * chore: make sure last poetry version is used * fix: mypy * fix: do not add BOS (with last llamacpp-python version)
This commit is contained in:
parent
e54a8fe043
commit
9027d695c1
@ -8,7 +8,7 @@ inputs:
|
||||
poetry_version:
|
||||
required: true
|
||||
type: string
|
||||
default: "1.5.1"
|
||||
default: "1.8.3"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
|
@ -2,7 +2,8 @@ FROM python:3.11.6-slim-bookworm as base
|
||||
|
||||
# Install poetry
|
||||
RUN pip install pipx
|
||||
RUN pipx install poetry
|
||||
RUN python3 -m pipx ensurepath
|
||||
RUN pipx install poetry==1.8.3
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
ENV PATH=".venv/bin/:$PATH"
|
||||
|
||||
|
@ -4,7 +4,8 @@ FROM python:3.11.6-slim-bookworm as base
|
||||
|
||||
# Install poetry
|
||||
RUN pip install pipx
|
||||
RUN pipx install poetry
|
||||
RUN python3 -m pipx ensurepath
|
||||
RUN pipx install poetry==1.8.3
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
ENV PATH=".venv/bin/:$PATH"
|
||||
|
||||
|
@ -28,6 +28,11 @@ pyenv local 3.11
|
||||
Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management:
|
||||
Follow the instructions on the official Poetry website to install it.
|
||||
|
||||
<Callout intent="warning">
|
||||
A bug exists in Poetry versions 1.7.0 and earlier. We strongly recommend upgrading to a tested version.
|
||||
To upgrade Poetry to latest tested version, run `poetry self update 1.8.3` after installing it.
|
||||
</Callout>
|
||||
|
||||
### 4. Optional: Install `make`
|
||||
To run various scripts, you need to install `make`. Follow the instructions for your operating system:
|
||||
#### macOS
|
||||
@ -135,14 +140,14 @@ Now, start Ollama service (it will start a local inference server, serving both
|
||||
ollama serve
|
||||
```
|
||||
|
||||
Install the models to be used, the default settings-ollama.yaml is configured to user mistral 7b LLM (~4GB) and nomic-embed-text Embeddings (~275MB)
|
||||
Install the models to be used, the default settings-ollama.yaml is configured to user llama3.1 8b LLM (~4GB) and nomic-embed-text Embeddings (~275MB)
|
||||
|
||||
By default, PGPT will automatically pull models as needed. This behavior can be changed by modifying the `ollama.autopull_models` property.
|
||||
|
||||
In any case, if you want to manually pull models, run the following commands:
|
||||
|
||||
```bash
|
||||
ollama pull mistral
|
||||
ollama pull llama3.1
|
||||
ollama pull nomic-embed-text
|
||||
```
|
||||
|
||||
|
@ -24,7 +24,7 @@ PrivateGPT uses the `AutoTokenizer` library to tokenize input text accurately. I
|
||||
In your `settings.yaml` file, specify the model you want to use:
|
||||
```yaml
|
||||
llm:
|
||||
tokenizer: mistralai/Mistral-7B-Instruct-v0.2
|
||||
tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||
```
|
||||
2. **Set Access Token for Gated Models:**
|
||||
If you are using a gated model, ensure the `access_token` is set as mentioned in the previous section.
|
||||
|
4581
poetry.lock
generated
4581
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -169,7 +169,7 @@ class Llama3PromptStyle(AbstractPromptStyle):
|
||||
"""
|
||||
|
||||
def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str:
|
||||
prompt = self.BOS
|
||||
prompt = ""
|
||||
has_system_message = False
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
@ -189,8 +189,7 @@ class Llama3PromptStyle(AbstractPromptStyle):
|
||||
# Add default system prompt if no system message was provided
|
||||
if not has_system_message:
|
||||
prompt = (
|
||||
f"{self.BOS}{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}"
|
||||
+ prompt[len(self.BOS) :]
|
||||
f"{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}" + prompt
|
||||
)
|
||||
|
||||
# TODO: Implement tool handling logic
|
||||
@ -199,7 +198,7 @@ class Llama3PromptStyle(AbstractPromptStyle):
|
||||
|
||||
def _completion_to_prompt(self, completion: str) -> str:
|
||||
return (
|
||||
f"{self.BOS}{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}"
|
||||
f"{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}"
|
||||
f"{self.B_INST}user{self.E_INST}\n\n{completion.strip()}{self.EOT}"
|
||||
f"{self.ASSISTANT_INST}\n\n"
|
||||
)
|
||||
|
@ -37,7 +37,8 @@ def create_app(root_injector: Injector) -> FastAPI:
|
||||
|
||||
# Add LlamaIndex simple observability
|
||||
global_handler = create_global_handler("simple")
|
||||
LlamaIndexSettings.callback_manager = CallbackManager([global_handler])
|
||||
if global_handler is not None:
|
||||
LlamaIndexSettings.callback_manager = CallbackManager([global_handler])
|
||||
|
||||
settings = root_injector.get(Settings)
|
||||
if settings.server.cors.enabled:
|
||||
|
@ -9,8 +9,8 @@ embedding:
|
||||
mode: ${PGPT_EMBED_MODE:mock}
|
||||
|
||||
llamacpp:
|
||||
llm_hf_repo_id: ${PGPT_HF_REPO_ID:TheBloke/Mistral-7B-Instruct-v0.1-GGUF}
|
||||
llm_hf_model_file: ${PGPT_HF_MODEL_FILE:mistral-7b-instruct-v0.1.Q4_K_M.gguf}
|
||||
llm_hf_repo_id: ${PGPT_HF_REPO_ID:lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF}
|
||||
llm_hf_model_file: ${PGPT_HF_MODEL_FILE:Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf}
|
||||
|
||||
huggingface:
|
||||
embedding_hf_model_name: ${PGPT_EMBEDDING_HF_MODEL_NAME:BAAI/bge-small-en-v1.5}
|
||||
@ -20,7 +20,7 @@ sagemaker:
|
||||
embedding_endpoint_name: ${PGPT_SAGEMAKER_EMBEDDING_ENDPOINT_NAME:}
|
||||
|
||||
ollama:
|
||||
llm_model: ${PGPT_OLLAMA_LLM_MODEL:mistral}
|
||||
llm_model: ${PGPT_OLLAMA_LLM_MODEL:llama3.1}
|
||||
embedding_model: ${PGPT_OLLAMA_EMBEDDING_MODEL:nomic-embed-text}
|
||||
api_base: ${PGPT_OLLAMA_API_BASE:http://ollama:11434}
|
||||
embedding_api_base: ${PGPT_OLLAMA_EMBEDDING_API_BASE:http://ollama:11434}
|
||||
|
@ -7,12 +7,12 @@ llm:
|
||||
# Should be matching the selected model
|
||||
max_new_tokens: 512
|
||||
context_window: 3900
|
||||
tokenizer: mistralai/Mistral-7B-Instruct-v0.2
|
||||
prompt_style: "mistral"
|
||||
tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||
prompt_style: "llama3"
|
||||
|
||||
llamacpp:
|
||||
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF
|
||||
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf
|
||||
llm_hf_repo_id: lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF
|
||||
llm_hf_model_file: Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf
|
||||
|
||||
embedding:
|
||||
mode: huggingface
|
||||
|
@ -14,7 +14,7 @@ embedding:
|
||||
embed_dim: 768
|
||||
|
||||
ollama:
|
||||
llm_model: mistral
|
||||
llm_model: llama3.1
|
||||
embedding_model: nomic-embed-text
|
||||
api_base: http://localhost:11434
|
||||
|
||||
|
@ -11,7 +11,7 @@ embedding:
|
||||
mode: ollama
|
||||
|
||||
ollama:
|
||||
llm_model: mistral
|
||||
llm_model: llama3.1
|
||||
embedding_model: nomic-embed-text
|
||||
api_base: http://localhost:11434
|
||||
embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama
|
||||
|
@ -4,7 +4,7 @@ server:
|
||||
llm:
|
||||
mode: openailike
|
||||
max_new_tokens: 512
|
||||
tokenizer: mistralai/Mistral-7B-Instruct-v0.2
|
||||
tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||
temperature: 0.1
|
||||
|
||||
embedding:
|
||||
|
@ -39,12 +39,12 @@ ui:
|
||||
|
||||
llm:
|
||||
mode: llamacpp
|
||||
prompt_style: "mistral"
|
||||
prompt_style: "llama3"
|
||||
# Should be matching the selected model
|
||||
max_new_tokens: 512
|
||||
context_window: 3900
|
||||
# Select your tokenizer. Llama-index tokenizer is the default.
|
||||
# tokenizer: mistralai/Mistral-7B-Instruct-v0.2
|
||||
# tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
|
||||
|
||||
rag:
|
||||
@ -65,8 +65,8 @@ clickhouse:
|
||||
database: embeddings
|
||||
|
||||
llamacpp:
|
||||
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF
|
||||
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf
|
||||
llm_hf_repo_id: lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF
|
||||
llm_hf_model_file: Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf
|
||||
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting
|
||||
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
|
||||
top_p: 1.0 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
|
||||
@ -114,7 +114,7 @@ openai:
|
||||
embedding_api_key: ${OPENAI_API_KEY:}
|
||||
|
||||
ollama:
|
||||
llm_model: llama2
|
||||
llm_model: llama3.1
|
||||
embedding_model: nomic-embed-text
|
||||
api_base: http://localhost:11434
|
||||
embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama
|
||||
|
@ -150,7 +150,7 @@ def test_llama3_prompt_style_format():
|
||||
]
|
||||
|
||||
expected_prompt = (
|
||||
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n"
|
||||
"<|start_header_id|>system<|end_header_id|>\n\n"
|
||||
"You are a helpful assistant<|eot_id|>"
|
||||
"<|start_header_id|>user<|end_header_id|>\n\n"
|
||||
"Hello, how are you doing?<|eot_id|>"
|
||||
@ -166,7 +166,7 @@ def test_llama3_prompt_style_with_default_system():
|
||||
ChatMessage(content="Hello!", role=MessageRole.USER),
|
||||
]
|
||||
expected = (
|
||||
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n"
|
||||
"<|start_header_id|>system<|end_header_id|>\n\n"
|
||||
f"{prompt_style.DEFAULT_SYSTEM_PROMPT}<|eot_id|>"
|
||||
"<|start_header_id|>user<|end_header_id|>\n\nHello!<|eot_id|>"
|
||||
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
||||
@ -185,7 +185,7 @@ def test_llama3_prompt_style_with_assistant_response():
|
||||
]
|
||||
|
||||
expected_prompt = (
|
||||
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n"
|
||||
"<|start_header_id|>system<|end_header_id|>\n\n"
|
||||
"You are a helpful assistant<|eot_id|>"
|
||||
"<|start_header_id|>user<|end_header_id|>\n\n"
|
||||
"What is the capital of France?<|eot_id|>"
|
||||
|
Loading…
Reference in New Issue
Block a user