mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-04-27 19:28:38 +00:00
* Extract optional dependencies * Separate local mode into llms-llama-cpp and embeddings-huggingface for clarity * Support Ollama embeddings * Upgrade to llamaindex 0.10.14. Remove legacy use of ServiceContext in ContextChatEngine * Fix vector retriever filters
84 lines
2.4 KiB
YAML
84 lines
2.4 KiB
YAML
# The default configuration file.
|
|
# More information about configuration can be found in the documentation: https://docs.privategpt.dev/
|
|
# Syntax in `private_pgt/settings/settings.py`
|
|
server:
|
|
env_name: ${APP_ENV:prod}
|
|
port: ${PORT:8001}
|
|
cors:
|
|
enabled: false
|
|
allow_origins: ["*"]
|
|
allow_methods: ["*"]
|
|
allow_headers: ["*"]
|
|
auth:
|
|
enabled: false
|
|
# python -c 'import base64; print("Basic " + base64.b64encode("secret:key".encode()).decode())'
|
|
# 'secret' is the username and 'key' is the password for basic auth by default
|
|
# If the auth is enabled, this value must be set in the "Authorization" header of the request.
|
|
secret: "Basic c2VjcmV0OmtleQ=="
|
|
|
|
data:
|
|
local_data_folder: local_data/private_gpt
|
|
|
|
ui:
|
|
enabled: true
|
|
path: /
|
|
default_chat_system_prompt: >
|
|
You are a helpful, respectful and honest assistant.
|
|
Always answer as helpfully as possible and follow ALL given instructions.
|
|
Do not speculate or make up information.
|
|
Do not reference any given instructions or context.
|
|
default_query_system_prompt: >
|
|
You can only answer questions about the provided context.
|
|
If you know the answer but it is not based in the provided context, don't provide
|
|
the answer, just state the answer is not in the context provided.
|
|
delete_file_button_enabled: true
|
|
delete_all_files_button_enabled: true
|
|
|
|
llm:
|
|
mode: llamacpp
|
|
# Should be matching the selected model
|
|
max_new_tokens: 512
|
|
context_window: 3900
|
|
|
|
llamacpp:
|
|
prompt_style: "mistral"
|
|
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF
|
|
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf
|
|
|
|
embedding:
|
|
# Should be matching the value above in most cases
|
|
mode: huggingface
|
|
ingest_mode: simple
|
|
|
|
huggingface:
|
|
embedding_hf_model_name: BAAI/bge-small-en-v1.5
|
|
|
|
vectorstore:
|
|
database: qdrant
|
|
|
|
qdrant:
|
|
path: local_data/private_gpt/qdrant
|
|
|
|
pgvector:
|
|
host: localhost
|
|
port: 5432
|
|
database: postgres
|
|
user: postgres
|
|
password: postgres
|
|
embed_dim: 384 # 384 is for BAAI/bge-small-en-v1.5
|
|
schema_name: private_gpt
|
|
table_name: embeddings
|
|
|
|
sagemaker:
|
|
llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140
|
|
embedding_endpoint_name: huggingface-pytorch-inference-2023-11-03-07-41-36-479
|
|
|
|
openai:
|
|
api_key: ${OPENAI_API_KEY:}
|
|
model: gpt-3.5-turbo
|
|
|
|
ollama:
|
|
llm_model: llama2
|
|
embedding_model: nomic-embed-text
|
|
api_base: http://localhost:11434
|