mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-21 19:39:00 +00:00
Use the token cache to infer greater n_past and reuse results (#3073)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -116,10 +116,7 @@ llmodel = load_llmodel_library()
|
||||
|
||||
class LLModelPromptContext(ctypes.Structure):
|
||||
_fields_ = [
|
||||
("tokens", ctypes.POINTER(ctypes.c_int32)),
|
||||
("tokens_size", ctypes.c_size_t),
|
||||
("n_past", ctypes.c_int32),
|
||||
("n_ctx", ctypes.c_int32),
|
||||
("n_predict", ctypes.c_int32),
|
||||
("top_k", ctypes.c_int32),
|
||||
("top_p", ctypes.c_float),
|
||||
@@ -393,9 +390,7 @@ class LLModel:
|
||||
):
|
||||
if self.context is None:
|
||||
context = LLModelPromptContext(
|
||||
tokens_size=0,
|
||||
n_past=0,
|
||||
n_ctx=0,
|
||||
n_predict=n_predict,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
|
Reference in New Issue
Block a user