python bindings should be quiet by default

* disable llama.cpp logging unless GPT4ALL_VERBOSE_LLAMACPP envvar is
  nonempty
* make verbose flag for retrieve_model default false (but also be
  overridable via gpt4all constructor)

should be able to run a basic test:

```python
import gpt4all
model = gpt4all.GPT4All('/Users/aaron/Downloads/rift-coder-v0-7b-q4_0.gguf')
print(model.generate('def fib(n):'))
```

and see no non-model output when successful
This commit is contained in:
Aaron Miller
2023-10-10 11:10:25 -07:00
parent 7b611b49f2
commit afaa291eab
2 changed files with 20 additions and 3 deletions

View File

@@ -67,6 +67,7 @@ class GPT4All:
allow_download: bool = True,
n_threads: Optional[int] = None,
device: Optional[str] = "cpu",
verbose: bool = False,
):
"""
Constructor
@@ -91,7 +92,7 @@ class GPT4All:
self.model_type = model_type
self.model = pyllmodel.LLModel()
# Retrieve model and download if allowed
self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download)
self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose)
if device is not None:
if device != "cpu":
self.model.init_gpu(model_path=self.config["path"], device=device)
@@ -119,7 +120,7 @@ class GPT4All:
model_name: str,
model_path: Optional[Union[str, os.PathLike[str]]] = None,
allow_download: bool = True,
verbose: bool = True,
verbose: bool = False,
) -> ConfigType:
"""
Find model file, and if it doesn't exist, download the model.