python: add list_gpus to the GPT4All API (#2194)

Other changes:
* fix memory leak in llmodel_available_gpu_devices
* drop model argument from llmodel_available_gpu_devices
* breaking: make GPT4All/Embed4All arguments past model_name keyword-only

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-04-04 14:52:13 -04:00
committed by GitHub
parent 790320e170
commit 1b84a48c47
8 changed files with 91 additions and 58 deletions

View File

@@ -213,9 +213,9 @@ LLModel *LLModel::Implementation::constructDefaultLlama() {
return llama.get();
}
std::vector<LLModel::GPUDevice> LLModel::Implementation::availableGPUDevices() {
std::vector<LLModel::GPUDevice> LLModel::Implementation::availableGPUDevices(size_t memoryRequired) {
auto *llama = constructDefaultLlama();
if (llama) { return llama->availableGPUDevices(0); }
if (llama) { return llama->availableGPUDevices(memoryRequired); }
return {};
}