mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-04-27 03:11:11 +00:00
* Extract optional dependencies * Separate local mode into llms-llama-cpp and embeddings-huggingface for clarity * Support Ollama embeddings * Upgrade to llamaindex 0.10.14. Remove legacy use of ServiceContext in ContextChatEngine * Fix vector retriever filters
17 lines
220 B
YAML
17 lines
220 B
YAML
server:
|
|
env_name: ${APP_ENV:sagemaker}
|
|
port: ${PORT:8001}
|
|
|
|
ui:
|
|
enabled: true
|
|
path: /
|
|
|
|
llm:
|
|
mode: sagemaker
|
|
|
|
embedding:
|
|
mode: sagemaker
|
|
|
|
sagemaker:
|
|
llm_endpoint_name: llm
|
|
embedding_endpoint_name: embedding |