mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-07-05 11:36:44 +00:00
fix: Adding an LLM param to fix broken generator from llamacpp (#1519)
This commit is contained in:
parent
e326126d0d
commit
869233f0e4
@ -42,7 +42,7 @@ class LLMComponent:
|
||||
context_window=settings.llm.context_window,
|
||||
generate_kwargs={},
|
||||
# All to GPU
|
||||
model_kwargs={"n_gpu_layers": -1},
|
||||
model_kwargs={"n_gpu_layers": -1, "offload_kqv": True},
|
||||
# transform inputs into Llama2 format
|
||||
messages_to_prompt=prompt_style.messages_to_prompt,
|
||||
completion_to_prompt=prompt_style.completion_to_prompt,
|
||||
|
Loading…
Reference in New Issue
Block a user