mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-04 10:04:35 +00:00
python: init_gpu fixes (#2368)
* python: tweak GPU init failure message * llama.cpp: update submodule for use-after-free fix Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -274,11 +274,12 @@ class LLModel:
|
||||
|
||||
all_gpus = self.list_gpus()
|
||||
available_gpus = self.list_gpus(mem_required)
|
||||
unavailable_gpus = set(all_gpus).difference(available_gpus)
|
||||
unavailable_gpus = [g for g in all_gpus if g not in available_gpus]
|
||||
|
||||
error_msg = "Unable to initialize model on GPU: {!r}".format(device)
|
||||
error_msg += "\nAvailable GPUs: {}".format(available_gpus)
|
||||
error_msg += "\nUnavailable GPUs due to insufficient memory or features: {}".format(unavailable_gpus)
|
||||
error_msg = (f"Unable to initialize model on GPU: {device!r}" +
|
||||
f"\nAvailable GPUs: {available_gpus}")
|
||||
if unavailable_gpus:
|
||||
error_msg += f"\nUnavailable GPUs due to insufficient memory: {unavailable_gpus}"
|
||||
raise ValueError(error_msg)
|
||||
|
||||
def load_model(self) -> bool:
|
||||
|
Reference in New Issue
Block a user