diff --git a/gpt4all-bindings/python/CHANGELOG.md b/gpt4all-bindings/python/CHANGELOG.md index 4753e648..e2be48e6 100644 --- a/gpt4all-bindings/python/CHANGELOG.md +++ b/gpt4all-bindings/python/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). +## [2.8.2] - 2024-08-14 + +### Fixed +- Fixed incompatibility with Python 3.8 since v2.7.0 and Python <=3.11 since v2.8.1 ([#2871](https://github.com/nomic-ai/gpt4all/pull/2871)) + ## [2.8.1] - 2024-08-13 ### Added diff --git a/gpt4all-bindings/python/gpt4all/_pyllmodel.py b/gpt4all-bindings/python/gpt4all/_pyllmodel.py index 88be2949..d623b528 100644 --- a/gpt4all-bindings/python/gpt4all/_pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/_pyllmodel.py @@ -45,7 +45,7 @@ def _load_cuda(rtver: str, blasver: str) -> None: cudalib = f"lib/libcudart.so.{rtver}" cublaslib = f"lib/libcublas.so.{blasver}" else: # Windows - cudalib = fr"bin\cudart64_{rtver.replace(".", "")}.dll" + cudalib = fr"bin\cudart64_{rtver.replace('.', '')}.dll" cublaslib = fr"bin\cublas64_{blasver}.dll" # preload the CUDA libs so the backend can find them diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index 4364d7d5..1711429d 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -209,27 +209,27 @@ class GPT4All: self._current_prompt_template: str = "{0}" device_init = None - if sys.platform == 'darwin': + if sys.platform == "darwin": if device is None: - backend = 'auto' # 'auto' is effectively 'metal' due to currently non-functional fallback - elif device == 'cpu': - backend = 'cpu' + backend = "auto" # "auto" is effectively "metal" due to currently non-functional fallback + elif device == "cpu": + backend = "cpu" else: - if platform.machine() != 'arm64' or device != 'gpu': - raise ValueError(f'Unknown device for this platform: {device}') - backend = 'metal' + if platform.machine() != "arm64" or device != "gpu": + raise ValueError(f"Unknown device for this platform: {device}") + backend = "metal" else: - backend = 'kompute' - if device is None or device == 'cpu': + backend = "kompute" + if device is None or device == "cpu": pass # use kompute with no device - elif device in ('cuda', 'kompute'): + elif device in ("cuda", "kompute"): backend = device - device_init = 'gpu' - elif device.startswith('cuda:'): - backend = 'cuda' - device_init = device.removeprefix('cuda:') + device_init = "gpu" + elif device.startswith("cuda:"): + backend = "cuda" + device_init = _remove_prefix(device, "cuda:") else: - device_init = device.removeprefix('kompute:') + device_init = _remove_prefix(device, "kompute:") # Retrieve model and download if allowed self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose) @@ -706,3 +706,7 @@ def _fsync(fd: int | _HasFileno) -> None: else: return os.fsync(fd) + + +def _remove_prefix(s: str, prefix: str) -> str: + return s[len(prefix):] if s.startswith(prefix) else s diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py index 63bd9140..e22cbe23 100644 --- a/gpt4all-bindings/python/setup.py +++ b/gpt4all-bindings/python/setup.py @@ -68,7 +68,7 @@ def get_long_description(): setup( name=package_name, - version="2.8.1", + version="2.8.2", description="Python bindings for GPT4All", long_description=get_long_description(), long_description_content_type="text/markdown",