mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-08 20:09:12 +00:00
llmodel_c: improve quality of error messages (#1625)
This commit is contained in:
@@ -42,10 +42,6 @@ def load_llmodel_library():
|
||||
llmodel = load_llmodel_library()
|
||||
|
||||
|
||||
class LLModelError(ctypes.Structure):
|
||||
_fields_ = [("message", ctypes.c_char_p), ("code", ctypes.c_int32)]
|
||||
|
||||
|
||||
class LLModelPromptContext(ctypes.Structure):
|
||||
_fields_ = [
|
||||
("logits", ctypes.POINTER(ctypes.c_float)),
|
||||
@@ -77,7 +73,7 @@ class LLModelGPUDevice(ctypes.Structure):
|
||||
llmodel.llmodel_model_create.argtypes = [ctypes.c_char_p]
|
||||
llmodel.llmodel_model_create.restype = ctypes.c_void_p
|
||||
|
||||
llmodel.llmodel_model_create2.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(LLModelError)]
|
||||
llmodel.llmodel_model_create2.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p)]
|
||||
llmodel.llmodel_model_create2.restype = ctypes.c_void_p
|
||||
|
||||
llmodel.llmodel_model_destroy.argtypes = [ctypes.c_void_p]
|
||||
@@ -150,6 +146,14 @@ def empty_response_callback(token_id: int, response: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _create_model(model_path: bytes) -> ctypes.c_void_p:
|
||||
err = ctypes.c_char_p()
|
||||
model = llmodel.llmodel_model_create2(model_path, b"auto", ctypes.byref(err))
|
||||
if model is None:
|
||||
raise ValueError(f"Unable to instantiate model: {err.decode()}")
|
||||
return model
|
||||
|
||||
|
||||
class LLModel:
|
||||
"""
|
||||
Base class and universal wrapper for GPT4All language models
|
||||
@@ -178,12 +182,8 @@ class LLModel:
|
||||
|
||||
def memory_needed(self, model_path: str) -> int:
|
||||
model_path_enc = model_path.encode("utf-8")
|
||||
self.model = llmodel.llmodel_model_create(model_path_enc)
|
||||
|
||||
if self.model is not None:
|
||||
return llmodel.llmodel_required_mem(self.model, model_path_enc)
|
||||
else:
|
||||
raise ValueError("Unable to instantiate model")
|
||||
self.model = _create_model(model_path_enc)
|
||||
return llmodel.llmodel_required_mem(self.model, model_path_enc)
|
||||
|
||||
def list_gpu(self, model_path: str) -> list:
|
||||
"""
|
||||
@@ -253,11 +253,7 @@ class LLModel:
|
||||
True if model loaded successfully, False otherwise
|
||||
"""
|
||||
model_path_enc = model_path.encode("utf-8")
|
||||
err = LLModelError()
|
||||
self.model = llmodel.llmodel_model_create2(model_path_enc, b"auto", ctypes.byref(err))
|
||||
|
||||
if self.model is None:
|
||||
raise ValueError(f"Unable to instantiate model: code={err.code}, {err.message.decode()}")
|
||||
self.model = _create_model(model_path_enc)
|
||||
|
||||
llmodel.llmodel_loadModel(self.model, model_path_enc)
|
||||
|
||||
|
Reference in New Issue
Block a user