mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-04-28 03:32:18 +00:00
* Updated prompt_style to be moved to the main LLM setting since all LLMs from llama_index can utilize this. I also included temperature, context window size, max_tokens, max_new_tokens into the openailike to help ensure the settings are consistent from the other implementations. * Removed prompt_style from llamacpp entirely * Fixed settings-local.yaml to include prompt_style in the LLM settings instead of llamacpp.
28 lines
613 B
YAML
28 lines
613 B
YAML
# poetry install --extras "ui llms-llama-cpp vector-stores-qdrant embeddings-huggingface"
|
|
server:
|
|
env_name: ${APP_ENV:local}
|
|
|
|
llm:
|
|
mode: llamacpp
|
|
# Should be matching the selected model
|
|
max_new_tokens: 512
|
|
context_window: 3900
|
|
tokenizer: mistralai/Mistral-7B-Instruct-v0.2
|
|
prompt_style: "mistral"
|
|
|
|
llamacpp:
|
|
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF
|
|
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf
|
|
|
|
embedding:
|
|
mode: huggingface
|
|
|
|
huggingface:
|
|
embedding_hf_model_name: BAAI/bge-small-en-v1.5
|
|
|
|
vectorstore:
|
|
database: qdrant
|
|
|
|
qdrant:
|
|
path: local_data/private_gpt/qdrant
|