diff --git a/settings.yaml b/settings.yaml index e076a948..2084636b 100644 --- a/settings.yaml +++ b/settings.yaml @@ -44,14 +44,14 @@ llm: temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1) rag: - similarity_top_k: 2 + similarity_top_k: 10 #This value controls how many "top" documents the RAG returns to use in the context. #similarity_value: 0.45 #This value is disabled by default. If you enable this settings, the RAG will only use articles that meet a certain percentage score. rerank: - enabled: false + enabled: true model: cross-encoder/ms-marco-MiniLM-L-2-v2 - top_n: 1 + top_n: 3 llamacpp: prompt_style: "chatml"