mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-05-12 18:34:21 +00:00
* Extract optional dependencies * Separate local mode into llms-llama-cpp and embeddings-huggingface for clarity * Support Ollama embeddings * Upgrade to llamaindex 0.10.14. Remove legacy use of ServiceContext in ContextChatEngine * Fix vector retriever filters
26 lines
522 B
YAML
26 lines
522 B
YAML
server:
|
|
env_name: ${APP_ENV:local}
|
|
|
|
llm:
|
|
mode: llamacpp
|
|
# Should be matching the selected model
|
|
max_new_tokens: 512
|
|
context_window: 3900
|
|
tokenizer: mistralai/Mistral-7B-Instruct-v0.2
|
|
|
|
llamacpp:
|
|
prompt_style: "mistral"
|
|
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF
|
|
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf
|
|
|
|
embedding:
|
|
mode: huggingface
|
|
|
|
huggingface:
|
|
embedding_hf_model_name: BAAI/bge-small-en-v1.5
|
|
|
|
vectorstore:
|
|
database: qdrant
|
|
|
|
qdrant:
|
|
path: local_data/private_gpt/qdrant |