mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 19:10:15 +00:00
python bindings should be quiet by default
* disable llama.cpp logging unless GPT4ALL_VERBOSE_LLAMACPP envvar is nonempty * make verbose flag for retrieve_model default false (but also be overridable via gpt4all constructor) should be able to run a basic test: ```python import gpt4all model = gpt4all.GPT4All('/Users/aaron/Downloads/rift-coder-v0-7b-q4_0.gguf') print(model.generate('def fib(n):')) ``` and see no non-model output when successful
This commit is contained in:
@@ -67,6 +67,7 @@ class GPT4All:
|
||||
allow_download: bool = True,
|
||||
n_threads: Optional[int] = None,
|
||||
device: Optional[str] = "cpu",
|
||||
verbose: bool = False,
|
||||
):
|
||||
"""
|
||||
Constructor
|
||||
@@ -91,7 +92,7 @@ class GPT4All:
|
||||
self.model_type = model_type
|
||||
self.model = pyllmodel.LLModel()
|
||||
# Retrieve model and download if allowed
|
||||
self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download)
|
||||
self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose)
|
||||
if device is not None:
|
||||
if device != "cpu":
|
||||
self.model.init_gpu(model_path=self.config["path"], device=device)
|
||||
@@ -119,7 +120,7 @@ class GPT4All:
|
||||
model_name: str,
|
||||
model_path: Optional[Union[str, os.PathLike[str]]] = None,
|
||||
allow_download: bool = True,
|
||||
verbose: bool = True,
|
||||
verbose: bool = False,
|
||||
) -> ConfigType:
|
||||
"""
|
||||
Find model file, and if it doesn't exist, download the model.
|
||||
|
Reference in New Issue
Block a user