Using tokenizer in pgpt throws error

This commit is contained in:
Taddeus Buica 2024-11-05 20:05:19 +02:00
parent ad4d2af591
commit e146422686
2 changed files with 0 additions and 2 deletions

View File

@ -4,7 +4,6 @@ server:
llm:
mode: openailike
tokenizer: ${VLLM_TOKENIZER:lmsys/vicuna-7b-v1.5}
max_new_tokens: ${VLLM_MAX_NEW_TOKENS:5000}
context_window: ${VLLM_CONTEXT_WINDOW:4096}
temperature: ${VLLM_TEMPERATURE:0.1}

View File

@ -39,7 +39,6 @@ llm:
# Should be matching the selected model
max_new_tokens: 512
context_window: 3900
tokenizer: mistralai/Mistral-7B-Instruct-v0.2
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
rag: