mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-05-14 11:19:26 +00:00
* feat: change ollama default model to llama3.1 * chore: bump versions * feat: Change default model in local mode to llama3.1 * chore: make sure last poetry version is used * fix: mypy * fix: do not add BOS (with last llamacpp-python version)
28 lines
629 B
YAML
28 lines
629 B
YAML
# poetry install --extras "ui llms-llama-cpp vector-stores-qdrant embeddings-huggingface"
|
|
server:
|
|
env_name: ${APP_ENV:local}
|
|
|
|
llm:
|
|
mode: llamacpp
|
|
# Should be matching the selected model
|
|
max_new_tokens: 512
|
|
context_window: 3900
|
|
tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
|
|
prompt_style: "llama3"
|
|
|
|
llamacpp:
|
|
llm_hf_repo_id: lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF
|
|
llm_hf_model_file: Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf
|
|
|
|
embedding:
|
|
mode: huggingface
|
|
|
|
huggingface:
|
|
embedding_hf_model_name: BAAI/bge-small-en-v1.5
|
|
|
|
vectorstore:
|
|
database: qdrant
|
|
|
|
qdrant:
|
|
path: local_data/private_gpt/qdrant
|